Fix deprecated comparison structs
authorMagnus Hagander <magnus@hagander.net>
Thu, 17 Jan 2019 20:19:57 +0000 (21:19 +0100)
committerMagnus Hagander <magnus@hagander.net>
Thu, 17 Jan 2019 20:19:57 +0000 (21:19 +0100)
21 files changed:
pgweb/account/forms.py
pgweb/account/oauthclient.py
pgweb/account/recaptcha.py
pgweb/account/views.py
pgweb/core/forms.py
pgweb/core/views.py
pgweb/downloads/views.py
pgweb/events/forms.py
pgweb/search/views.py
pgweb/security/models.py
pgweb/util/admin.py
pgweb/util/contexts.py
pgweb/util/misc.py
pgweb/util/signals.py
tools/communityauth/sample/django/auth.py
tools/ftp/spider_yum.py
tools/purgehook/purgehook.py
tools/search/crawler/lib/archives.py
tools/search/crawler/lib/basecrawler.py
tools/search/crawler/lib/genericsite.py
tools/search/crawler/lib/sitemapsite.py

index a1652c71bebe26da141c027669c34d688362e09f..712b51421b8e1b05b9295ada78ff163751ce22f3 100644 (file)
@@ -67,7 +67,7 @@ class SignupForm(forms.Form):
     def clean_email2(self):
         # If the primary email checker had an exception, the data will be gone
         # from the cleaned_data structure
-        if not self.cleaned_data.has_key('email'):
+        if 'email' not in self.cleaned_data:
             return self.cleaned_data['email2']
         email1 = self.cleaned_data['email'].lower()
         email2 = self.cleaned_data['email2'].lower()
@@ -157,7 +157,7 @@ class ChangeEmailForm(forms.Form):
     def clean_email2(self):
         # If the primary email checker had an exception, the data will be gone
         # from the cleaned_data structure
-        if not self.cleaned_data.has_key('email'):
+        if 'email' not in self.cleaned_data:
             return self.cleaned_data['email2'].lower()
         email1 = self.cleaned_data['email'].lower()
         email2 = self.cleaned_data['email2'].lower()
index 432a5188efcfaeee5ebdebd2b6e0767a0ac860d1..a287081dd95397d28d5c89c7fe1ac4cd6a52622c 100644 (file)
@@ -26,7 +26,7 @@ def _login_oauth(request, provider, authurl, tokenurl, scope, authdatafunc):
     redir = '{0}/account/login/{1}/'.format(settings.SITE_ROOT, provider)
 
     oa = OAuth2Session(client_id, scope=scope, redirect_uri=redir)
-    if request.GET.has_key('code'):
+    if 'code' in request.GET:
         log.info("Completing {0} oauth2 step from {1}".format(provider, get_client_ip(request)))
 
         # Receiving a login request from the provider, so validate data
@@ -151,7 +151,7 @@ def oauth_login_github(request):
 def oauth_login_facebook(request):
     def _facebook_auth_data(oa):
         r = oa.get('https://graph.facebook.com/me?fields=email,first_name,last_name').json()
-        if not 'email' in r:
+        if 'email' not in r:
             raise OAuthException("Your Facebook profile must provide an email address in order to log in")
 
         return (r['email'],
@@ -174,7 +174,7 @@ def oauth_login_facebook(request):
 def oauth_login_microsoft(request):
     def _microsoft_auth_data(oa):
         r = oa.get("https://apis.live.net/v5.0/me").json()
-        if not 'emails' in r or not 'account' in r['emails']:
+        if 'emails' not in r or 'account' not in r['emails']:
             raise OAuthException("Your Facebook profile must provide an email address in order to log in")
 
         return (r['emails']['account'],
index e86bd2a836e6c1e5a527ffff998905d1a305b2ff..f21df56a287044df45e45ae3ae5a530d77acd412 100644 (file)
@@ -25,9 +25,7 @@ class ReCaptchaWidget(forms.widgets.Widget):
     def value_from_datadict(self, data, files, name):
         if settings.NOCAPTCHA:
             return None
-        if data.has_key('g-recaptcha-response'):
-            return data['g-recaptcha-response']
-        return None
+        return data.get('g-recaptcha-response', None)
 
 
 class ReCaptchaField(forms.CharField):
index a3f37e648d8cec7fce7b6662c5a3f05b769a71a4..dde8967d3f307ac19407fbf505a789a810c5003b 100644 (file)
@@ -204,7 +204,7 @@ def confirm_change_email(request, tokenhash):
 
 @login_required
 def listobjects(request, objtype):
-    if not objtypes.has_key(objtype):
+    if objtype not in objtypes:
         raise Http404("Object type not found")
     o = objtypes[objtype]
 
@@ -214,7 +214,7 @@ def listobjects(request, objtype):
             'unapproved': o['objects'](request.user).filter(approved=False),
         },
         'title': o['title'],
-        'submit_header': o.has_key('submit_header') and o['submit_header'] or None,
+        'submit_header': o.get('submit_header', None),
         'suburl': objtype,
     })
 
@@ -375,9 +375,9 @@ def signup_complete(request):
 @frame_sources('https://www.google.com/')
 @transaction.atomic
 def signup_oauth(request):
-    if not request.session.has_key('oauth_email') \
-       or not request.session.has_key('oauth_firstname') \
-       or not request.session.has_key('oauth_lastname'):
+    if 'oauth_email' not in request.session \
+       or 'oauth_firstname' not in request.session \
+       or 'oauth_lastname' not in request.session:
         return HttpServerError(request, 'Invalid redirect received')
 
     if request.method == 'POST':
@@ -413,7 +413,7 @@ def signup_oauth(request):
             # Redirect to the sessions page, or to the account page
             # if none was given.
             return HttpResponseRedirect(request.session.pop('login_next', '/account/'))
-    elif request.GET.has_key('do_abort'):
+    elif 'do_abort' in request.GET:
         del request.session['oauth_email']
         del request.session['oauth_firstname']
         del request.session['oauth_lastname']
@@ -459,7 +459,7 @@ def communityauth(request, siteid):
 
     # "suburl" - old style way of passing parameters
     # deprecated - will be removed once all sites have migrated
-    if request.GET.has_key('su'):
+    if 'su' in request.GET:
         su = request.GET['su']
         if not su.startswith('/'):
             su = None
@@ -468,7 +468,7 @@ def communityauth(request, siteid):
 
     # "data" - new style way of passing parameter, where we only
     # care that it's characters are what's in base64.
-    if request.GET.has_key('d'):
+    if 'd' in request.GET:
         d = request.GET['d']
         if d != urllib.quote_plus(d, '=$'):
             # Invalid character, so drop it
@@ -608,14 +608,14 @@ def communityauth_search(request, siteid):
     site = get_object_or_404(CommunityAuthSite, pk=siteid)
 
     q = Q(is_active=True)
-    if request.GET.has_key('s') and request.GET['s']:
+    if 's' in request.GET and request.GET['s']:
         # General search term, match both name and email
         q = q & (Q(email__icontains=request.GET['s']) | Q(first_name__icontains=request.GET['s']) | Q(last_name__icontains=request.GET['s']))
-    elif request.GET.has_key('e') and request.GET['e']:
+    elif 'e' in request.GET and request.GET['e']:
         q = q & Q(email__icontains=request.GET['e'])
-    elif request.GET.has_key('n') and request.GET['n']:
+    elif 'n' in request.GET and request.GET['n']:
         q = q & (Q(first_name__icontains=request.GET['n']) | Q(last_name__icontains=request.GET['n']))
-    elif request.GET.has_key('u') and request.GET['u']:
+    elif 'u' in request.GET and request.GET['u']:
         q = q & Q(username=request.GET['u'])
     else:
         raise Http404('No search term specified')
index ac0716c43cc6173d38cd5d635572adfb6c36ec89..3c749a8f6e658dbda2861b087c2bc88fbfc880ab 100644 (file)
@@ -44,9 +44,9 @@ class OrganisationForm(forms.ModelForm):
 
     def save(self, commit=True):
         model = super(OrganisationForm, self).save(commit=False)
-        if self.cleaned_data.has_key('add_manager') and self.cleaned_data['add_manager']:
+        if 'add_manager' in self.cleaned_data and self.cleaned_data['add_manager']:
             model.managers.add(User.objects.get(email=self.cleaned_data['add_manager'].lower()))
-        if self.cleaned_data.has_key('remove_manager') and self.cleaned_data['remove_manager']:
+        if 'remove_manager' in self.cleaned_data and self.cleaned_data['remove_manager']:
             for toremove in self.cleaned_data['remove_manager']:
                 model.managers.remove(toremove)
 
index f307dd6092481307613b6ef73973769aa3ed4215..5b44328f10ede65236f33ac1f8bfe8b082a0faf5 100644 (file)
@@ -205,7 +205,7 @@ _dynamic_cssmap = {
 
 @cache(hours=6)
 def dynamic_css(request, css):
-    if not _dynamic_cssmap.has_key(css):
+    if css not in _dynamic_cssmap:
         raise Http404('CSS not found')
     files = _dynamic_cssmap[css]
     resp = HttpResponse(content_type='text/css')
@@ -222,7 +222,7 @@ def dynamic_css(request, css):
             # If we somehow referred to a file that didn't exist, or
             # one that we couldn't access.
             raise Http404('CSS (sub) not found')
-    if request.META.has_key('HTTP_IF_MODIFIED_SINCE'):
+    if 'HTTP_IF_MODIFIED_SINCE' in request.META:
         # This code is mostly stolen from django :)
         matches = re.match(r"^([^;]+)(; length=([0-9]+))?$",
                            request.META.get('HTTP_IF_MODIFIED_SINCE'),
index 7ccdbb3a555dc418b04d14f41e8f78f330d1f6a3..deb29593e2288270992a226219658ff95b72bb92 100644 (file)
@@ -50,7 +50,7 @@ def ftpbrowser(request, subpath):
         parent = ''
         for d in subpath.split('/'):
             # Check if allnodes contains a node matching the path
-            if allnodes[parent].has_key(d):
+            if d in allnodes[parent]:
                 if allnodes[parent][d]['t'] == 'd':
                     canonpath = os.path.join(canonpath, d)
                 elif allnodes[parent][d]['t'] == 'l':
@@ -102,9 +102,9 @@ def ftpbrowser(request, subpath):
             breadcrumbs.append({'name': pathpiece, 'path': breadroot})
 
     # Check if there are any "content files" we should render directly on the webpage
-    file_readme = (node.has_key('README') and node['README']['t'] == 'f') and node['README']['c'] or None
-    file_message = (node.has_key('.message') and node['.message']['t'] == 'f') and node['.message']['c'] or None
-    file_maintainer = (node.has_key('CURRENT_MAINTAINER') and node['CURRENT_MAINTAINER']['t'] == 'f') and node['CURRENT_MAINTAINER']['c'] or None
+    file_readme = ('README' in node and node['README']['t'] == 'f') and node['README']['c'] or None
+    file_message = ('.message' in node and node['.message']['t'] == 'f') and node['.message']['c'] or None
+    file_maintainer = ('CURRENT_MAINTAINER' in node and node['CURRENT_MAINTAINER']['t'] == 'f') and node['CURRENT_MAINTAINER']['c'] or None
 
     del node
 
index 6522723776b4de4fb95bccaafbf8bddbcad0e068..05ff51254ebb4363d521418d637086cae6601300 100644 (file)
@@ -43,7 +43,7 @@ class EventForm(forms.ModelForm):
         if self.instance.pk and self.instance.approved:
             if self.cleaned_data['enddate'] != self.instance.enddate:
                 raise ValidationError("You cannot change the dates on events that have been approved")
-        if self.cleaned_data.has_key('startdate') and self.cleaned_data['enddate'] < self.cleaned_data['startdate']:
+        if 'startdate' in self.cleaned_data and self.cleaned_data['enddate'] < self.cleaned_data['startdate']:
             raise ValidationError("End date cannot be before start date!")
         return self.cleaned_data['enddate']
 
index 65add875b7e161aadac930bfc57b819f133f34d7..2c2ee870350deade06fe6c65ac1b835f13c91535 100644 (file)
@@ -60,20 +60,17 @@ def search(request):
     # constants that we might eventually want to make configurable
     hitsperpage = 20
 
-    if request.GET.has_key('m') and request.GET['m'] == '1':
+    if request.GET.get('m', '') == '1':
         searchlists = True
 
-        if request.GET.has_key('l'):
-            if request.GET['l'] != '':
-                try:
-                    listid = int(request.GET['l'])
-                except:
-                    listid = None
-            else:
+        if request.GET.get('l', '') != '':
+            try:
+                listid = int(request.GET['l'])
+            except:
                 listid = None
         else:
             # Listid not specified. But do we have the name?
-            if request.GET.has_key('ln'):
+            if 'ln' in request.GET:
                 try:
                     ll = MailingList.objects.get(listname=request.GET['ln'])
                     listid = ll.id
@@ -84,7 +81,7 @@ def search(request):
             else:
                 listid = None
 
-        if request.GET.has_key('d'):
+        if 'd' in request.GET:
             try:
                 dateval = int(request.GET['d'])
             except:
@@ -92,9 +89,9 @@ def search(request):
         else:
             dateval = None
 
-        if request.GET.has_key('s'):
+        if 's' in request.GET:
             listsort = request.GET['s']
-            if not listsort in ('r', 'd', 'i'):
+            if listsort not in ('r', 'd', 'i'):
                 listsort = 'r'
         else:
             listsort = 'r'
@@ -103,9 +100,9 @@ def search(request):
             dateval = 365
 
         sortoptions = (
-            {'val': 'r', 'text': 'Rank', 'selected': not (request.GET.has_key('s') and request.GET['s'] == 'd')},
-            {'val': 'd', 'text': 'Date', 'selected': request.GET.has_key('s') and request.GET['s'] == 'd'},
-            {'val': 'i', 'text': 'Reverse date', 'selected': request.GET.has_key('s') and request.GET['s'] == 'i'},
+            {'val': 'r', 'text': 'Rank', 'selected': request.GET.get('s', '') not in ('d', 'i')},
+            {'val': 'd', 'text': 'Date', 'selected': request.GET.get('s', '') == 'd'},
+            {'val': 'i', 'text': 'Reverse date', 'selected': request.GET.get('s', '') == 'i'},
         )
         dateoptions = (
             {'val': -1, 'text': 'anytime'},
@@ -117,18 +114,11 @@ def search(request):
         )
     else:
         searchlists = False
-        if request.GET.has_key('u'):
-            suburl = request.GET['u']
-        else:
-            suburl = None
-
-        if request.GET.has_key('a'):
-            allsites = (request.GET['a'] == "1")
-        else:
-            allsites = False
+        suburl = request.GET.get('u', None)
+        allsites = request.GET.get('a', None) == "1"
 
     # Check that we actually have something to search for
-    if not request.GET.has_key('q') or request.GET['q'] == '':
+    if request.GET.get('q', '') != '':
         if searchlists:
             return render(request, 'search/listsearch.html', {
                 'search_error': "No search term specified.",
@@ -151,12 +141,9 @@ def search(request):
         })
 
     # Is the request being paged?
-    if request.GET.has_key('p'):
-        try:
-            pagenum = int(request.GET['p'])
-        except:
-            pagenum = 1
-    else:
+    try:
+        pageum = int(request.GET.get('p', 1))
+    except:
         pagenum = 1
 
     firsthit = (pagenum - 1) * hitsperpage + 1
index be317931b5a57a33da4a2c977f81898c20f1b6f0..cde84ba0c2e165a15824a3b5f0f42eb71197d599 100644 (file)
@@ -35,11 +35,11 @@ def other_vectors_validator(val):
     try:
         for vector in val.split('/'):
             k, v = vector.split(':')
-            if not cvss.constants3.METRICS_VALUES.has_key(k):
+            if k not in cvss.constants3.METRICS_VALUES:
                 raise ValidationError("Metric {0} is unknown".format(k))
             if k in ('AV', 'AC', 'PR', 'UI', 'S', 'C', 'I', 'A'):
                 raise ValidationError("Metric {0} must be specified in the dropdowns".format(k))
-            if not cvss.constants3.METRICS_VALUES[k].has_key(v):
+            if v not in cvss.constants3.METRICS_VALUES[k]:
                 raise ValidationError("Metric {0} has unknown value {1}. Valind ones are: {2}".format(
                     k, v,
                     ", ".join(cvss.constants3.METRICS_VALUES[k].keys()),
index b3b10f5835ac32649c3e25d54f186fa54e354230..26ba726b99d09e1166e5b2aff3b61ac8258f6dc8 100644 (file)
@@ -28,7 +28,7 @@ class PgwebAdmin(admin.ModelAdmin):
     def change_view(self, request, object_id, form_url='', extra_context=None):
         if hasattr(self.model, 'send_notification') and self.model.send_notification:
             # Anything that sends notification supports manual notifications
-            if extra_context == None:
+            if extra_context is None:
                 extra_context = dict()
             extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date')
 
@@ -57,7 +57,7 @@ class PgwebAdmin(admin.ModelAdmin):
         if change and hasattr(self.model, 'send_notification') and self.model.send_notification:
             # We only do processing if something changed, not when adding
             # a new object.
-            if request.POST.has_key('new_notification') and request.POST['new_notification']:
+            if 'new_notification' in request.POST and request.POST['new_notification']:
                 # Need to send off a new notification. We'll also store
                 # it in the database for future reference, of course.
                 if not obj.org.email:
index 853ac1d915fdc5e4a2c2898abc2302f34e1b0ee0..43f6c3e2a51ffb7ee86a76bcd5a37c13556a7d8b 100644 (file)
@@ -87,7 +87,7 @@ sitenav = {
 
 
 def get_nav_menu(section):
-    if sitenav.has_key(section):
+    if section in sitenav:
         return sitenav[section]
     else:
         return {}
index 65ffb330f43f1aa4821cde4bc1151c77a9894992..353548b84cf4fde6036ba74e0822b6181c8b4a75 100644 (file)
@@ -27,7 +27,7 @@ def get_client_ip(request):
     or behind one of our SSL proxies, make sure to get the *actual* client IP,
     and not the IP of the cache/proxy.
     """
-    if request.META.has_key('HTTP_X_FORWARDED_FOR'):
+    if 'HTTP_X_FORWARDED_FOR' in request.META:
         # There is a x-forwarded-for header, so trust it but only if the actual connection
         # is coming in from one of our frontends.
         if request.META['REMOTE_ADDR'] in settings.FRONTEND_SERVERS:
index 879913d52700dc0db5e07ab8146609b4d488f5bc..02ed8035b2f2c688a9f46d7f2a78614d07d3431f 100644 (file)
@@ -54,7 +54,7 @@ def _get_all_notification_fields(obj):
     else:
         # Include all field names except specified ones,
         # that are local to this model (not auto created)
-        return [f.name for f in obj._meta.get_fields() if not f.name in ('approved', 'submitter', 'id', ) and not f.auto_created]
+        return [f.name for f in obj._meta.get_fields() if f.name not in ('approved', 'submitter', 'id', ) and not f.auto_created]
 
 
 def _get_attr_value(obj, fieldname):
index 452612c3cf05d8641e00506d8b444a4fc00d49d9..90e293c1f890e8b3a5cf316bcf78aeaf59442f68 100644 (file)
@@ -54,7 +54,7 @@ def login(request):
         from django.contrib.auth.views import login
         return login(request, template_name='admin.html')
 
-    if request.GET.has_key('next'):
+    if 'next' in request.GET:
         # Put together an url-encoded dict of parameters we're getting back,
         # including a small nonce at the beginning to make sure it doesn't
         # encrypt the same way every time.
@@ -85,13 +85,13 @@ def logout(request):
 # Receive an authentication response from the main website and try
 # to log the user in.
 def auth_receive(request):
-    if request.GET.has_key('s') and request.GET['s'] == "logout":
+    if 's' in request.GET and request.GET['s'] == "logout":
         # This was a logout request
         return HttpResponseRedirect('/')
 
-    if not request.GET.has_key('i'):
+    if 'i' not in request:
         return HttpResponse("Missing IV in url!", status=400)
-    if not request.GET.has_key('d'):
+    if 'd' not in request.GET:
         return HttpResponse("Missing data in url!", status=400)
 
     # Set up an AES object and decrypt the data we received
@@ -173,7 +173,7 @@ We apologize for the inconvenience.
 
     # Finally, check of we have a data package that tells us where to
     # redirect the user.
-    if data.has_key('d'):
+    if 'd' in data:
         (ivs, datas) = data['d'][0].split('$')
         decryptor = AES.new(SHA.new(settings.SECRET_KEY).digest()[:16],
                             AES.MODE_CBC,
@@ -183,7 +183,7 @@ We apologize for the inconvenience.
             rdata = urlparse.parse_qs(s, strict_parsing=True)
         except ValueError:
             return HttpResponse("Invalid encrypted data received.", status=400)
-        if rdata.has_key('r'):
+        if 'r' in rdata:
             # Redirect address
             return HttpResponseRedirect(rdata['r'][0])
     # No redirect specified, see if we have it in our settings
index 7b3a5869978032948264426b7064acd151ae93c1..36e248a4d215a1184e1bad25e2f79068b414a507 100755 (executable)
@@ -78,7 +78,7 @@ if __name__ == "__main__":
                     found = False
                     for p, pinfo in platforms.items():
                         if pinfo['p'] == familypath and pinfo['f'] == shortdist:
-                            if not reporpms[v].has_key(p):
+                            if p not in reporpms[v]:
                                 reporpms[v][p] = {}
                             reporpms[v][p][arch] = max(ver, reporpms[v][p].get(arch, 0))
                             platforms[p]['found'] = True
index 4584635dd00cf32940c27778c59aa24933677204..291fdac0f1bdbdf27a608d6ca778bbb6bfa9e082 100755 (executable)
@@ -26,7 +26,7 @@ if __name__ == "__main__":
     for l in sys.stdin:
         if l.startswith('templates/'):
             tmpl = l[len('templates/'):].strip()
-            if not tmpl in BANNED_TEMPLATES:
+            if tmpl not in BANNED_TEMPLATES:
                 curs.execute("SELECT varnish_purge_xkey(%(key)s)", {
                     'key': 'pgwt_{0}'.format(hashlib.md5(tmpl).hexdigest()),
                 })
index 7dbed9a583199ad38169c8931510f27b766ddf4d..ab1a5f249187a170c22a0349f66b190cab5688ec 100644 (file)
@@ -62,7 +62,7 @@ class MultiListCrawler(object):
                         'month': d.month,
                     })
                     x = curs.fetchall()
-                    if x[0][0] != None:
+                    if x[0][0] is not None:
                         maxmsg = x[0][0]
                     else:
                         maxmsg = -1
index 173cf0c89aa685de8706c19819bebac80852866d..7bd3c4d3d199e100f09931b39c342a666141743a 100644 (file)
@@ -93,7 +93,7 @@ class BaseSiteCrawler(object):
         return False
 
     def crawl_page(self, url, relprio, internal):
-        if self.pages_crawled.has_key(url) or self.pages_crawled.has_key(url + "/"):
+        if url in self.pages_crawled or url + "/" in self.pages_crawled:
             return
 
         if self.exclude_url(url):
@@ -103,7 +103,7 @@ class BaseSiteCrawler(object):
         (result, pagedata, lastmod) = self.fetch_page(url)
 
         if result == 0:
-            if pagedata == None:
+            if pagedata is None:
                 # Result ok but no data, means that the page was not modified.
                 # Thus we can happily consider ourselves done here.
                 return
@@ -184,7 +184,7 @@ class BaseSiteCrawler(object):
                 h.putrequest("GET", url)
             h.putheader("User-agent", "pgsearch/0.2")
             h.putheader("Connection", "close")
-            if self.scantimes.has_key(url):
+            if url in self.scantimes:
                 h.putheader("If-Modified-Since", formatdate(time.mktime(self.scantimes[url].timetuple())))
             h.endheaders()
             resp = h.getresponse()
@@ -224,7 +224,7 @@ class BaseSiteCrawler(object):
         return datetime.datetime.now()
 
     def parse_html(self, page):
-        if page == None:
+        if page is None:
             return None
 
         p = GenericHtmlParser()
index aa8b9e09f7cd4c935f2a7bf77edcedbe8493443d..da0d7c0fbdd0f90d78fdd20510f5f8910693c822 100644 (file)
@@ -46,7 +46,7 @@ class GenericSiteCrawler(BaseSiteCrawler):
 
     def post_process_page(self, url):
         for l in self.resolve_links(self.page.links, url):
-            if self.pages_crawled.has_key(l) or self.pages_crawled.has_key(l + "/"):
+            if l in self.pages_crawled or l + "/" in self.pages_crawled:
                 continue
             if self.exclude_url(l):
                 continue
index fddd3ffecf4c9ef34b46b6147d92b5231b84482b..4e98cfd18446999649cf7045823296ccb79d5b32 100644 (file)
@@ -85,7 +85,7 @@ class SitemapSiteCrawler(BaseSiteCrawler):
             # Advance 8 characters - length of https://.
             url = url[len(self.hostname) + 8:]
             if lastmod:
-                if self.scantimes.has_key(url):
+                if url in self.scantimes:
                     if lastmod < self.scantimes[url]:
                         # Not modified since last scan, so don't reload
                         # Stick it in the list of pages we've scanned though,