def clean_email2(self):
# If the primary email checker had an exception, the data will be gone
# from the cleaned_data structure
- if not self.cleaned_data.has_key('email'):
+ if 'email' not in self.cleaned_data:
return self.cleaned_data['email2']
email1 = self.cleaned_data['email'].lower()
email2 = self.cleaned_data['email2'].lower()
def clean_email2(self):
# If the primary email checker had an exception, the data will be gone
# from the cleaned_data structure
- if not self.cleaned_data.has_key('email'):
+ if 'email' not in self.cleaned_data:
return self.cleaned_data['email2'].lower()
email1 = self.cleaned_data['email'].lower()
email2 = self.cleaned_data['email2'].lower()
redir = '{0}/account/login/{1}/'.format(settings.SITE_ROOT, provider)
oa = OAuth2Session(client_id, scope=scope, redirect_uri=redir)
- if request.GET.has_key('code'):
+ if 'code' in request.GET:
log.info("Completing {0} oauth2 step from {1}".format(provider, get_client_ip(request)))
# Receiving a login request from the provider, so validate data
def oauth_login_facebook(request):
def _facebook_auth_data(oa):
r = oa.get('https://graph.facebook.com/me?fields=email,first_name,last_name').json()
- if not 'email' in r:
+ if 'email' not in r:
raise OAuthException("Your Facebook profile must provide an email address in order to log in")
return (r['email'],
def oauth_login_microsoft(request):
def _microsoft_auth_data(oa):
r = oa.get("https://apis.live.net/v5.0/me").json()
- if not 'emails' in r or not 'account' in r['emails']:
+ if 'emails' not in r or 'account' not in r['emails']:
raise OAuthException("Your Facebook profile must provide an email address in order to log in")
return (r['emails']['account'],
def value_from_datadict(self, data, files, name):
if settings.NOCAPTCHA:
return None
- if data.has_key('g-recaptcha-response'):
- return data['g-recaptcha-response']
- return None
+ return data.get('g-recaptcha-response', None)
class ReCaptchaField(forms.CharField):
@login_required
def listobjects(request, objtype):
- if not objtypes.has_key(objtype):
+ if objtype not in objtypes:
raise Http404("Object type not found")
o = objtypes[objtype]
'unapproved': o['objects'](request.user).filter(approved=False),
},
'title': o['title'],
- 'submit_header': o.has_key('submit_header') and o['submit_header'] or None,
+ 'submit_header': o.get('submit_header', None),
'suburl': objtype,
})
@frame_sources('https://www.google.com/')
@transaction.atomic
def signup_oauth(request):
- if not request.session.has_key('oauth_email') \
- or not request.session.has_key('oauth_firstname') \
- or not request.session.has_key('oauth_lastname'):
+ if 'oauth_email' not in request.session \
+ or 'oauth_firstname' not in request.session \
+ or 'oauth_lastname' not in request.session:
return HttpServerError(request, 'Invalid redirect received')
if request.method == 'POST':
# Redirect to the sessions page, or to the account page
# if none was given.
return HttpResponseRedirect(request.session.pop('login_next', '/account/'))
- elif request.GET.has_key('do_abort'):
+ elif 'do_abort' in request.GET:
del request.session['oauth_email']
del request.session['oauth_firstname']
del request.session['oauth_lastname']
# "suburl" - old style way of passing parameters
# deprecated - will be removed once all sites have migrated
- if request.GET.has_key('su'):
+ if 'su' in request.GET:
su = request.GET['su']
if not su.startswith('/'):
su = None
# "data" - new style way of passing parameter, where we only
# care that it's characters are what's in base64.
- if request.GET.has_key('d'):
+ if 'd' in request.GET:
d = request.GET['d']
if d != urllib.quote_plus(d, '=$'):
# Invalid character, so drop it
site = get_object_or_404(CommunityAuthSite, pk=siteid)
q = Q(is_active=True)
- if request.GET.has_key('s') and request.GET['s']:
+ if 's' in request.GET and request.GET['s']:
# General search term, match both name and email
q = q & (Q(email__icontains=request.GET['s']) | Q(first_name__icontains=request.GET['s']) | Q(last_name__icontains=request.GET['s']))
- elif request.GET.has_key('e') and request.GET['e']:
+ elif 'e' in request.GET and request.GET['e']:
q = q & Q(email__icontains=request.GET['e'])
- elif request.GET.has_key('n') and request.GET['n']:
+ elif 'n' in request.GET and request.GET['n']:
q = q & (Q(first_name__icontains=request.GET['n']) | Q(last_name__icontains=request.GET['n']))
- elif request.GET.has_key('u') and request.GET['u']:
+ elif 'u' in request.GET and request.GET['u']:
q = q & Q(username=request.GET['u'])
else:
raise Http404('No search term specified')
def save(self, commit=True):
model = super(OrganisationForm, self).save(commit=False)
- if self.cleaned_data.has_key('add_manager') and self.cleaned_data['add_manager']:
+ if 'add_manager' in self.cleaned_data and self.cleaned_data['add_manager']:
model.managers.add(User.objects.get(email=self.cleaned_data['add_manager'].lower()))
- if self.cleaned_data.has_key('remove_manager') and self.cleaned_data['remove_manager']:
+ if 'remove_manager' in self.cleaned_data and self.cleaned_data['remove_manager']:
for toremove in self.cleaned_data['remove_manager']:
model.managers.remove(toremove)
@cache(hours=6)
def dynamic_css(request, css):
- if not _dynamic_cssmap.has_key(css):
+ if css not in _dynamic_cssmap:
raise Http404('CSS not found')
files = _dynamic_cssmap[css]
resp = HttpResponse(content_type='text/css')
# If we somehow referred to a file that didn't exist, or
# one that we couldn't access.
raise Http404('CSS (sub) not found')
- if request.META.has_key('HTTP_IF_MODIFIED_SINCE'):
+ if 'HTTP_IF_MODIFIED_SINCE' in request.META:
# This code is mostly stolen from django :)
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$",
request.META.get('HTTP_IF_MODIFIED_SINCE'),
parent = ''
for d in subpath.split('/'):
# Check if allnodes contains a node matching the path
- if allnodes[parent].has_key(d):
+ if d in allnodes[parent]:
if allnodes[parent][d]['t'] == 'd':
canonpath = os.path.join(canonpath, d)
elif allnodes[parent][d]['t'] == 'l':
breadcrumbs.append({'name': pathpiece, 'path': breadroot})
# Check if there are any "content files" we should render directly on the webpage
- file_readme = (node.has_key('README') and node['README']['t'] == 'f') and node['README']['c'] or None
- file_message = (node.has_key('.message') and node['.message']['t'] == 'f') and node['.message']['c'] or None
- file_maintainer = (node.has_key('CURRENT_MAINTAINER') and node['CURRENT_MAINTAINER']['t'] == 'f') and node['CURRENT_MAINTAINER']['c'] or None
+ file_readme = ('README' in node and node['README']['t'] == 'f') and node['README']['c'] or None
+ file_message = ('.message' in node and node['.message']['t'] == 'f') and node['.message']['c'] or None
+ file_maintainer = ('CURRENT_MAINTAINER' in node and node['CURRENT_MAINTAINER']['t'] == 'f') and node['CURRENT_MAINTAINER']['c'] or None
del node
if self.instance.pk and self.instance.approved:
if self.cleaned_data['enddate'] != self.instance.enddate:
raise ValidationError("You cannot change the dates on events that have been approved")
- if self.cleaned_data.has_key('startdate') and self.cleaned_data['enddate'] < self.cleaned_data['startdate']:
+ if 'startdate' in self.cleaned_data and self.cleaned_data['enddate'] < self.cleaned_data['startdate']:
raise ValidationError("End date cannot be before start date!")
return self.cleaned_data['enddate']
# constants that we might eventually want to make configurable
hitsperpage = 20
- if request.GET.has_key('m') and request.GET['m'] == '1':
+ if request.GET.get('m', '') == '1':
searchlists = True
- if request.GET.has_key('l'):
- if request.GET['l'] != '':
- try:
- listid = int(request.GET['l'])
- except:
- listid = None
- else:
+ if request.GET.get('l', '') != '':
+ try:
+ listid = int(request.GET['l'])
+ except:
listid = None
else:
# Listid not specified. But do we have the name?
- if request.GET.has_key('ln'):
+ if 'ln' in request.GET:
try:
ll = MailingList.objects.get(listname=request.GET['ln'])
listid = ll.id
else:
listid = None
- if request.GET.has_key('d'):
+ if 'd' in request.GET:
try:
dateval = int(request.GET['d'])
except:
else:
dateval = None
- if request.GET.has_key('s'):
+ if 's' in request.GET:
listsort = request.GET['s']
- if not listsort in ('r', 'd', 'i'):
+ if listsort not in ('r', 'd', 'i'):
listsort = 'r'
else:
listsort = 'r'
dateval = 365
sortoptions = (
- {'val': 'r', 'text': 'Rank', 'selected': not (request.GET.has_key('s') and request.GET['s'] == 'd')},
- {'val': 'd', 'text': 'Date', 'selected': request.GET.has_key('s') and request.GET['s'] == 'd'},
- {'val': 'i', 'text': 'Reverse date', 'selected': request.GET.has_key('s') and request.GET['s'] == 'i'},
+ {'val': 'r', 'text': 'Rank', 'selected': request.GET.get('s', '') not in ('d', 'i')},
+ {'val': 'd', 'text': 'Date', 'selected': request.GET.get('s', '') == 'd'},
+ {'val': 'i', 'text': 'Reverse date', 'selected': request.GET.get('s', '') == 'i'},
)
dateoptions = (
{'val': -1, 'text': 'anytime'},
)
else:
searchlists = False
- if request.GET.has_key('u'):
- suburl = request.GET['u']
- else:
- suburl = None
-
- if request.GET.has_key('a'):
- allsites = (request.GET['a'] == "1")
- else:
- allsites = False
+ suburl = request.GET.get('u', None)
+ allsites = request.GET.get('a', None) == "1"
# Check that we actually have something to search for
- if not request.GET.has_key('q') or request.GET['q'] == '':
+ if request.GET.get('q', '') != '':
if searchlists:
return render(request, 'search/listsearch.html', {
'search_error': "No search term specified.",
})
# Is the request being paged?
- if request.GET.has_key('p'):
- try:
- pagenum = int(request.GET['p'])
- except:
- pagenum = 1
- else:
+ try:
+ pageum = int(request.GET.get('p', 1))
+ except:
pagenum = 1
firsthit = (pagenum - 1) * hitsperpage + 1
try:
for vector in val.split('/'):
k, v = vector.split(':')
- if not cvss.constants3.METRICS_VALUES.has_key(k):
+ if k not in cvss.constants3.METRICS_VALUES:
raise ValidationError("Metric {0} is unknown".format(k))
if k in ('AV', 'AC', 'PR', 'UI', 'S', 'C', 'I', 'A'):
raise ValidationError("Metric {0} must be specified in the dropdowns".format(k))
- if not cvss.constants3.METRICS_VALUES[k].has_key(v):
+ if v not in cvss.constants3.METRICS_VALUES[k]:
raise ValidationError("Metric {0} has unknown value {1}. Valind ones are: {2}".format(
k, v,
", ".join(cvss.constants3.METRICS_VALUES[k].keys()),
def change_view(self, request, object_id, form_url='', extra_context=None):
if hasattr(self.model, 'send_notification') and self.model.send_notification:
# Anything that sends notification supports manual notifications
- if extra_context == None:
+ if extra_context is None:
extra_context = dict()
extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date')
if change and hasattr(self.model, 'send_notification') and self.model.send_notification:
# We only do processing if something changed, not when adding
# a new object.
- if request.POST.has_key('new_notification') and request.POST['new_notification']:
+ if 'new_notification' in request.POST and request.POST['new_notification']:
# Need to send off a new notification. We'll also store
# it in the database for future reference, of course.
if not obj.org.email:
def get_nav_menu(section):
- if sitenav.has_key(section):
+ if section in sitenav:
return sitenav[section]
else:
return {}
or behind one of our SSL proxies, make sure to get the *actual* client IP,
and not the IP of the cache/proxy.
"""
- if request.META.has_key('HTTP_X_FORWARDED_FOR'):
+ if 'HTTP_X_FORWARDED_FOR' in request.META:
# There is a x-forwarded-for header, so trust it but only if the actual connection
# is coming in from one of our frontends.
if request.META['REMOTE_ADDR'] in settings.FRONTEND_SERVERS:
else:
# Include all field names except specified ones,
# that are local to this model (not auto created)
- return [f.name for f in obj._meta.get_fields() if not f.name in ('approved', 'submitter', 'id', ) and not f.auto_created]
+ return [f.name for f in obj._meta.get_fields() if f.name not in ('approved', 'submitter', 'id', ) and not f.auto_created]
def _get_attr_value(obj, fieldname):
from django.contrib.auth.views import login
return login(request, template_name='admin.html')
- if request.GET.has_key('next'):
+ if 'next' in request.GET:
# Put together an url-encoded dict of parameters we're getting back,
# including a small nonce at the beginning to make sure it doesn't
# encrypt the same way every time.
# Receive an authentication response from the main website and try
# to log the user in.
def auth_receive(request):
- if request.GET.has_key('s') and request.GET['s'] == "logout":
+ if 's' in request.GET and request.GET['s'] == "logout":
# This was a logout request
return HttpResponseRedirect('/')
- if not request.GET.has_key('i'):
+ if 'i' not in request:
return HttpResponse("Missing IV in url!", status=400)
- if not request.GET.has_key('d'):
+ if 'd' not in request.GET:
return HttpResponse("Missing data in url!", status=400)
# Set up an AES object and decrypt the data we received
# Finally, check of we have a data package that tells us where to
# redirect the user.
- if data.has_key('d'):
+ if 'd' in data:
(ivs, datas) = data['d'][0].split('$')
decryptor = AES.new(SHA.new(settings.SECRET_KEY).digest()[:16],
AES.MODE_CBC,
rdata = urlparse.parse_qs(s, strict_parsing=True)
except ValueError:
return HttpResponse("Invalid encrypted data received.", status=400)
- if rdata.has_key('r'):
+ if 'r' in rdata:
# Redirect address
return HttpResponseRedirect(rdata['r'][0])
# No redirect specified, see if we have it in our settings
found = False
for p, pinfo in platforms.items():
if pinfo['p'] == familypath and pinfo['f'] == shortdist:
- if not reporpms[v].has_key(p):
+ if p not in reporpms[v]:
reporpms[v][p] = {}
reporpms[v][p][arch] = max(ver, reporpms[v][p].get(arch, 0))
platforms[p]['found'] = True
for l in sys.stdin:
if l.startswith('templates/'):
tmpl = l[len('templates/'):].strip()
- if not tmpl in BANNED_TEMPLATES:
+ if tmpl not in BANNED_TEMPLATES:
curs.execute("SELECT varnish_purge_xkey(%(key)s)", {
'key': 'pgwt_{0}'.format(hashlib.md5(tmpl).hexdigest()),
})
'month': d.month,
})
x = curs.fetchall()
- if x[0][0] != None:
+ if x[0][0] is not None:
maxmsg = x[0][0]
else:
maxmsg = -1
return False
def crawl_page(self, url, relprio, internal):
- if self.pages_crawled.has_key(url) or self.pages_crawled.has_key(url + "/"):
+ if url in self.pages_crawled or url + "/" in self.pages_crawled:
return
if self.exclude_url(url):
(result, pagedata, lastmod) = self.fetch_page(url)
if result == 0:
- if pagedata == None:
+ if pagedata is None:
# Result ok but no data, means that the page was not modified.
# Thus we can happily consider ourselves done here.
return
h.putrequest("GET", url)
h.putheader("User-agent", "pgsearch/0.2")
h.putheader("Connection", "close")
- if self.scantimes.has_key(url):
+ if url in self.scantimes:
h.putheader("If-Modified-Since", formatdate(time.mktime(self.scantimes[url].timetuple())))
h.endheaders()
resp = h.getresponse()
return datetime.datetime.now()
def parse_html(self, page):
- if page == None:
+ if page is None:
return None
p = GenericHtmlParser()
def post_process_page(self, url):
for l in self.resolve_links(self.page.links, url):
- if self.pages_crawled.has_key(l) or self.pages_crawled.has_key(l + "/"):
+ if l in self.pages_crawled or l + "/" in self.pages_crawled:
continue
if self.exclude_url(l):
continue
# Advance 8 characters - length of https://.
url = url[len(self.hostname) + 8:]
if lastmod:
- if self.scantimes.has_key(url):
+ if url in self.scantimes:
if lastmod < self.scantimes[url]:
# Not modified since last scan, so don't reload
# Stick it in the list of pages we've scanned though,