Projects STRLCPY maigret Commits 908176be
🤬
Revision indexing in progress... (symbol navigation in revisions will be accurate after indexed)
  • ■ ■ ■ ■ ■ ■
    maigret/activation.py
    1  -import aiohttp
    2  -from aiohttp import CookieJar
    3  -import asyncio
    4  -import json
    5 1  from http.cookiejar import MozillaCookieJar
    6 2  from http.cookies import Morsel
    7 3   
    8 4  import requests
     5 +from aiohttp import CookieJar
     6 + 
    9 7   
    10 8  class ParsingActivator:
    11 9   @staticmethod
    skipped 63 lines
  • ■ ■ ■ ■ ■
    maigret/checking.py
    skipped 466 lines
    467 467   if no_progressbar:
    468 468   await asyncio.gather(*tasks)
    469 469   else:
    470  - for f in tqdm.asyncio.tqdm.as_completed(tasks):
    471  - await f
     470 + for f in tqdm.asyncio.tqdm.as_completed(tasks, timeout=timeout):
     471 + try:
     472 + await f
     473 + except asyncio.exceptions.TimeoutError:
     474 + # TODO: write timeout to results
     475 + pass
    472 476   
    473 477   await session.close()
    474 478   
    skipped 137 lines
  • ■ ■ ■ ■ ■ ■
    maigret/maigret.py
    skipped 3 lines
    4 4   
    5 5  import os
    6 6  import platform
    7  -import sys
    8 7  from argparse import ArgumentParser, RawDescriptionHelpFormatter
    9 8   
    10 9  import requests
    skipped 165 lines
    176 175   action="store", metavar='REPORT_TYPE',
    177 176   dest="json", default='', type=check_supported_json_format,
    178 177   help=f"Generate a JSON report of specific type: {', '.join(SUPPORTED_JSON_REPORT_FORMATS)}"
    179  - " (one report per username)."
     178 + " (one report per username)."
    180 179   )
    181 180   
    182 181   args = parser.parse_args()
    skipped 21 lines
    204 203   u: args.id_type
    205 204   for u in args.username
    206 205   if u not in ['-']
    207  - and u not in args.ignore_ids_list
     206 + and u not in args.ignore_ids_list
    208 207   }
    209 208   
    210 209   parsing_enabled = not args.disable_extracting
    skipped 168 lines
    379 378   filename = report_filepath_tpl.format(username=username, postfix=f'_{args.json}.json')
    380 379   save_json_report(filename, username, results, report_type=args.json)
    381 380   query_notify.warning(f'JSON {args.json} report for {username} saved in {filename}')
    382  - 
    383 381   
    384 382   # reporting for all the result
    385 383   if general_results:
    skipped 31 lines
  • ■ ■ ■ ■ ■
    maigret/notify.py
    skipped 3 lines
    4 4  results of queries.
    5 5  """
    6 6  import sys
     7 + 
    7 8  from colorama import Fore, Style, init
    8 9   
    9 10  from .result import QueryStatus
    skipped 283 lines
  • ■ ■ ■ ■ ■ ■
    maigret/report.py
    1 1  import csv
    2  -import json
    3 2  import io
     3 +import json
    4 4  import logging
    5 5  import os
     6 +from argparse import ArgumentTypeError
     7 +from datetime import datetime
     8 + 
    6 9  import pycountry
    7 10  import xmind
    8  -from datetime import datetime
     11 +from dateutil.parser import parse as parse_datetime_str
    9 12  from jinja2 import Template
    10 13  from xhtml2pdf import pisa
    11  -from argparse import ArgumentTypeError
    12  -from dateutil.parser import parse as parse_datetime_str
    13 14   
    14 15  from .result import QueryStatus
    15 16  from .utils import is_country_tag, CaseConverter, enrich_link_str
    skipped 2 lines
    18 19   'simple',
    19 20   'ndjson',
    20 21  ]
    21  - 
    22 22   
    23 23  '''
    24 24  UTILS
    25 25  '''
     26 + 
     27 + 
    26 28  def filter_supposed_data(data):
    27 29   ### interesting fields
    28 30   allowed_fields = ['fullname', 'gender', 'location', 'age']
    skipped 6 lines
    35 37  '''
    36 38  REPORTS SAVING
    37 39  '''
     40 + 
     41 + 
    38 42  def save_csv_report(filename: str, username: str, results: dict):
    39 43   with open(filename, 'w', newline='', encoding='utf-8') as f:
    40 44   generate_csv_report(username, results, f)
    skipped 16 lines
    57 61   filled_template = template.render(**context)
    58 62   with open(filename, 'w+b') as f:
    59 63   pisa.pisaDocument(io.StringIO(filled_template), dest=f, default_css=css)
     64 + 
    60 65   
    61 66  def save_json_report(filename: str, username: str, results: dict, report_type: str):
    62 67   with open(filename, 'w', encoding='utf-8') as f:
    skipped 3 lines
    66 71  '''
    67 72  REPORTS GENERATING
    68 73  '''
     74 + 
     75 + 
    69 76  def generate_report_template(is_pdf: bool):
    70 77   """
    71 78   HTML/PDF template generation
    72 79   """
     80 + 
    73 81   def get_resource_content(filename):
    74 82   return open(os.path.join(maigret_path, 'resources', filename)).read()
    75 83   
    skipped 36 lines
    112 120   continue
    113 121   
    114 122   status = dictionary.get('status')
     123 + if not status: # FIXME: currently in case of timeout
     124 + continue
     125 + 
    115 126   if status.ids_data:
    116 127   dictionary['ids_data'] = status.ids_data
    117 128   extended_info_count += 1
    skipped 48 lines
    166 177   for t in status.tags:
    167 178   tags[t] = tags.get(t, 0) + 1
    168 179   
    169  - 
    170 180   brief_text.append(f'Search by {id_type} {username} returned {found_accounts} accounts.')
    171 181   
    172 182   if new_ids:
    skipped 4 lines
    177 187   
    178 188   brief_text.append(f'Extended info extracted from {extended_info_count} accounts.')
    179 189   
    180  - 
    181  - 
    182 190   brief = ' '.join(brief_text).strip()
    183 191   tuple_sort = lambda d: sorted(d, key=lambda x: x[1], reverse=True)
    184 192   
    skipped 36 lines
    221 229   results[site]['url_user'],
    222 230   str(results[site]['status'].status),
    223 231   results[site]['http_status'],
    224  - ])
     232 + ])
    225 233   
    226 234   
    227 235  def generate_txt_report(username: str, results: dict, file):
    skipped 25 lines
    253 261   
    254 262   if is_report_per_line:
    255 263   data['sitename'] = sitename
    256  - file.write(json.dumps(data)+'\n')
     264 + file.write(json.dumps(data) + '\n')
    257 265   else:
    258 266   all_json[sitename] = data
    259 267   
    260 268   if not is_report_per_line:
    261 269   file.write(json.dumps(all_json))
    262 270   
     271 + 
    263 272  '''
    264 273  XMIND 8 Functions
    265 274  '''
     275 + 
     276 + 
    266 277  def save_xmind_report(filename, username, results):
    267 278   if os.path.exists(filename):
    268 279   os.remove(filename)
    skipped 8 lines
    277 288   alltags = {}
    278 289   supposed_data = {}
    279 290   
    280  - sheet.setTitle("%s Analysis"%(username))
     291 + sheet.setTitle("%s Analysis" % (username))
    281 292   root_topic1 = sheet.getRootTopic()
    282  - root_topic1.setTitle("%s"%(username))
     293 + root_topic1.setTitle("%s" % (username))
    283 294   
    284 295   undefinedsection = root_topic1.addSubTopic()
    285 296   undefinedsection.setTitle("Undefined")
    skipped 47 lines
    333 344   currentsublabel.setTitle("%s: %s" % (k, currentval))
    334 345   ### Add Supposed DATA
    335 346   filterede_supposed_data = filter_supposed_data(supposed_data)
    336  - if(len(filterede_supposed_data) >0):
     347 + if (len(filterede_supposed_data) > 0):
    337 348   undefinedsection = root_topic1.addSubTopic()
    338 349   undefinedsection.setTitle("SUPPOSED DATA")
    339 350   for k, v in filterede_supposed_data.items():
    skipped 4 lines
    344 355  def check_supported_json_format(value):
    345 356   if value and not value in SUPPORTED_JSON_REPORT_FORMATS:
    346 357   raise ArgumentTypeError(f'JSON report type must be one of the following types: '
    347  - + ', '.join(SUPPORTED_JSON_REPORT_FORMATS))
     358 + + ', '.join(SUPPORTED_JSON_REPORT_FORMATS))
    348 359   return value
    349 360   
    350  - 
  • ■ ■ ■ ■ ■ ■
    maigret/resources/data.json
    skipped 12348 lines
    12349 12349   "us"
    12350 12350   ],
    12351 12351   "headers": {
    12352  - "authorization": "Bearer BQCEWXdzCPImYp4zhhbEssMRKqvUasJb9vVoe2A3J5eFMhTfn0b5jPkUHGJ9Fe0_HCaF81AMeRnSD9KzIPg"
     12352 + "authorization": "Bearer BQA6sdhtUg3hadjln7DCoAK6sLn7KrHfsn2DObW2gr-W3HgF0h1KZGVYgwispRDR1tqRntVeTd0Duvb2q4g"
    12353 12353   },
    12354 12354   "errors": {
    12355 12355   "Spotify is currently not available in your country.": "Access denied in your country, use proxy/vpn"
    skipped 1706 lines
    14062 14062   "video"
    14063 14063   ],
    14064 14064   "headers": {
    14065  - "Authorization": "jwt eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE2MTYwOTgwODAsInVzZXJfaWQiOm51bGwsImFwcF9pZCI6NTg0NzksInNjb3BlcyI6InB1YmxpYyIsInRlYW1fdXNlcl9pZCI6bnVsbH0.tTecsUjIJ0KCcMxOT8OgkCp-P3ezg5RR0FGqtiejqE8"
     14065 + "Authorization": "jwt eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE2MTYxMDcyNjAsInVzZXJfaWQiOm51bGwsImFwcF9pZCI6NTg0NzksInNjb3BlcyI6InB1YmxpYyIsInRlYW1fdXNlcl9pZCI6bnVsbH0.kzWxBf1qCJwjpZYUP6w-Pf4VptBMKpKUaMw8VnYwtPU"
    14066 14066   },
    14067 14067   "activation": {
    14068 14068   "url": "https://vimeo.com/_rv/viewer",
    skipped 900 lines
    14969 14969   "usernameUnclaimed": "noonewouldeverusethis7"
    14970 14970   },
    14971 14971   "YandexLocal": {
     14972 + "disabled": true,
    14972 14973   "tags": [
    14973 14974   "ru"
    14974 14975   ],
    skipped 8620 lines
    23595 23596   "urlMain": "https://calendly.com",
    23596 23597   "usernameClaimed": "john",
    23597 23598   "usernameUnclaimed": "noonewouldeverusethis7"
     23599 + },
     23600 + "depop.com": {
     23601 + "checkType": "message",
     23602 + "presenseStrs": [
     23603 + "first_name"
     23604 + ],
     23605 + "absenceStrs": [
     23606 + "invalidUrlError__message"
     23607 + ],
     23608 + "url": "https://www.depop.com/{username}",
     23609 + "urlMain": "https://www.depop.com",
     23610 + "usernameClaimed": "blue",
     23611 + "usernameUnclaimed": "noonewouldeverusethis7"
     23612 + },
     23613 + "community.brave.com": {
     23614 + "engine": "Discourse",
     23615 + "urlMain": "https://community.brave.com",
     23616 + "usernameClaimed": "alex",
     23617 + "usernameUnclaimed": "noonewouldeverusethis7"
     23618 + },
     23619 + "community.endlessos.com": {
     23620 + "engine": "Discourse",
     23621 + "urlMain": "https://community.endlessos.com",
     23622 + "usernameClaimed": "alex",
     23623 + "usernameUnclaimed": "noonewouldeverusethis7"
     23624 + },
     23625 + "forum.endeavouros.com": {
     23626 + "engine": "Discourse",
     23627 + "urlMain": "https://forum.endeavouros.com",
     23628 + "usernameClaimed": "alex",
     23629 + "usernameUnclaimed": "noonewouldeverusethis7"
     23630 + },
     23631 + "forum.garudalinux.org": {
     23632 + "engine": "Discourse",
     23633 + "urlMain": "https://forum.garudalinux.org",
     23634 + "usernameClaimed": "alex",
     23635 + "usernameUnclaimed": "noonewouldeverusethis7"
     23636 + },
     23637 + "forum.snapcraft.io": {
     23638 + "engine": "Discourse",
     23639 + "urlMain": "https://forum.snapcraft.io",
     23640 + "usernameClaimed": "alex",
     23641 + "usernameUnclaimed": "noonewouldeverusethis7"
     23642 + },
     23643 + "forum.zorin.com": {
     23644 + "engine": "Discourse",
     23645 + "urlMain": "https://forum.zorin.com",
     23646 + "usernameClaimed": "alex",
     23647 + "usernameUnclaimed": "noonewouldeverusethis7"
     23648 + },
     23649 + "codeseller.ru": {
     23650 + "engine": "Wordpress/Author",
     23651 + "urlMain": "https://codeseller.ru",
     23652 + "usernameClaimed": "alex",
     23653 + "usernameUnclaimed": "noonewouldeverusethis7"
     23654 + },
     23655 + "linuxpip.org": {
     23656 + "engine": "Wordpress/Author",
     23657 + "urlMain": "https://linuxpip.org",
     23658 + "usernameClaimed": "diehard",
     23659 + "usernameUnclaimed": "noonewouldeverusethis7"
    23598 23660   }
    23599 23661   },
    23600 23662   "engines": {
    skipped 86 lines
    23687 23749   },
    23688 23750   "presenseStrs": [
    23689 23751   "<meta name=\"generator\" content=\"Discourse"
     23752 + ]
     23753 + },
     23754 + "Wordpress/Author": {
     23755 + "name": "Wordpress/Author",
     23756 + "site": {
     23757 + "presenseStrs": [
     23758 + "author-",
     23759 + "author/"
     23760 + ],
     23761 + "absenceStrs": [
     23762 + "error404"
     23763 + ],
     23764 + "checkType": "message",
     23765 + "url": "{urlMain}/author/{username}/"
     23766 + },
     23767 + "presenseStrs": [
     23768 + "/wp-admin",
     23769 + "/wp-includes/wlwmanifest.xml"
    23690 23770   ]
    23691 23771   },
    23692 23772   "engine404": {
    skipped 29 lines
  • ■ ■ ■ ■ ■ ■
    maigret/sites.py
    skipped 1 lines
    2 2  """Maigret Sites Information"""
    3 3  import copy
    4 4  import json
    5  -import re
    6 5  import sys
    7 6   
    8 7  import requests
    skipped 78 lines
    87 86   url = self.url
    88 87   for group in ['urlMain', 'urlSubpath']:
    89 88   if group in url:
    90  - url = url.replace('{'+group+'}', self.__dict__[CaseConverter.camel_to_snake(group)])
     89 + url = url.replace('{' + group + '}', self.__dict__[CaseConverter.camel_to_snake(group)])
    91 90   
    92 91   self.url_regexp = URLMatcher.make_profile_url_regexp(url, self.regex_check)
    93 92   
    94 93   def detect_username(self, url: str) -> str:
    95 94   if self.url_regexp:
    96  - import logging
    97 95   match_groups = self.url_regexp.match(url)
    98 96   if match_groups:
    99 97   return match_groups.groups()[-1].rstrip('/')
    skipped 138 lines
    238 236   
    239 237   return self
    240 238   
    241  - 
    242 239   def load_from_json(self, json_data: dict) -> MaigretDatabase:
    243 240   # Add all of site information from the json file to internal site list.
    244 241   site_data = json_data.get("sites", {})
    skipped 17 lines
    262 259   )
    263 260   
    264 261   return self
    265  - 
    266 262   
    267 263   def load_from_str(self, db_str: str) -> MaigretDatabase:
    268 264   try:
    skipped 5 lines
    274 270   
    275 271   return self.load_from_json(data)
    276 272   
    277  - 
    278 273   def load_from_url(self, url: str) -> MaigretDatabase:
    279 274   is_url_valid = url.startswith('http://') or url.startswith('https://')
    280 275   
    skipped 21 lines
    302 297   )
    303 298   
    304 299   return self.load_from_json(data)
    305  - 
    306 300   
    307 301   def load_from_file(self, filename: str) -> MaigretDatabase:
    308 302   try:
    skipped 55 lines
    364 358   continue
    365 359   tags[tag] = tags.get(tag, 0) + 1
    366 360   
    367  - output += f'Enabled/total sites: {total_count-disabled_count}/{total_count}\n'
     361 + output += f'Enabled/total sites: {total_count - disabled_count}/{total_count}\n'
    368 362   output += 'Top sites\' profile URLs:\n'
    369 363   for url, count in sorted(urls.items(), key=lambda x: x[1], reverse=True)[:20]:
    370 364   if count == 1:
    skipped 7 lines
    378 372   output += f'{count}\t{tag}{mark}\n'
    379 373   
    380 374   return output
     375 + 
  • ■ ■ ■ ■ ■
    maigret/submit.py
    1 1  import difflib
    2  -import json
    3 2   
    4 3  import requests
    5 4  from mock import Mock
    skipped 83 lines
    89 88   domain_raw = URL_RE.sub('', url_exists).strip().strip('/')
    90 89   domain_raw = domain_raw.split('/')[0]
    91 90   
    92  - matched_sites = list(filter(lambda x: domain_raw in x.url_main+x.url, db.sites))
     91 + matched_sites = list(filter(lambda x: domain_raw in x.url_main + x.url, db.sites))
    93 92   if matched_sites:
    94 93   print(f'Sites with domain "{domain_raw}" already exists in the Maigret database!')
    95 94   status = lambda s: '(disabled)' if s.disabled else ''
    skipped 84 lines
  • ■ ■ ■ ■
    maigret/utils.py
    1 1  import re
    2  -import sys
    3 2   
    4 3   
    5 4  class CaseConverter:
    skipped 50 lines
    56 55   regexp_str = self._HTTP_URL_RE_STR.replace('(.+)', url_regexp)
    57 56   
    58 57   return re.compile(regexp_str)
     58 + 
  • ■ ■ ■ ■ ■ ■
    tests/conftest.py
    1 1  import glob
    2 2  import logging
    3 3  import os
     4 + 
    4 5  import pytest
    5 6  from _pytest.mark import Mark
    6  -from mock import Mock
    7 7   
    8  -from maigret.sites import MaigretDatabase, MaigretSite
     8 +from maigret.sites import MaigretDatabase
    9 9   
    10 10  CUR_PATH = os.path.dirname(os.path.realpath(__file__))
    11 11  JSON_FILE = os.path.join(CUR_PATH, '../maigret/resources/data.json')
    skipped 34 lines
  • ■ ■ ■ ■ ■
    tests/test_activation.py
    1 1  """Maigret activation test functions"""
    2 2  import json
     3 + 
    3 4  import aiohttp
    4 5  import pytest
    5 6  from mock import Mock
    skipped 49 lines
  • ■ ■ ■ ■ ■
    tests/test_maigret.py
    1 1  """Maigret main module test functions"""
    2 2  import asyncio
     3 + 
    3 4  import pytest
    4 5  from mock import Mock
    5 6   
    6 7  from maigret.maigret import self_check
    7  -from maigret.sites import MaigretDatabase, MaigretSite
     8 +from maigret.sites import MaigretDatabase
    8 9   
    9 10  EXAMPLE_DB = {
    10 11   'engines': {
    skipped 96 lines
  • ■ ■ ■ ■ ■ ■
    tests/test_sites.py
    1 1  """Maigret Database test functions"""
    2 2  from maigret.sites import MaigretDatabase, MaigretSite
    3 3   
    4  - 
    5 4  EXAMPLE_DB = {
    6 5   'engines': {
    7 6   "XenForo": {
    8  - "presenseStrs": ["XenForo"],
    9  - "site": {
    10  - "absenceStrs": [
    11  - "The specified member cannot be found. Please enter a member's entire name.",
    12  - ],
    13  - "checkType": "message",
    14  - "errors": {
    15  - "You must be logged-in to do that.": "Login required"
    16  - },
    17  - "url": "{urlMain}{urlSubpath}/members/?username={username}"
    18  - }
     7 + "presenseStrs": ["XenForo"],
     8 + "site": {
     9 + "absenceStrs": [
     10 + "The specified member cannot be found. Please enter a member's entire name.",
     11 + ],
     12 + "checkType": "message",
     13 + "errors": {
     14 + "You must be logged-in to do that.": "Login required"
     15 + },
     16 + "url": "{urlMain}{urlSubpath}/members/?username={username}"
     17 + }
    19 18   },
    20 19   },
    21 20   'sites': {
    22 21   "Amperka": {
    23  - "engine": "XenForo",
    24  - "rank": 121613,
    25  - "tags": [
    26  - "ru"
    27  - ],
    28  - "urlMain": "http://forum.amperka.ru",
    29  - "usernameClaimed": "adam",
    30  - "usernameUnclaimed": "noonewouldeverusethis7"
     22 + "engine": "XenForo",
     23 + "rank": 121613,
     24 + "tags": [
     25 + "ru"
     26 + ],
     27 + "urlMain": "http://forum.amperka.ru",
     28 + "usernameClaimed": "adam",
     29 + "usernameUnclaimed": "noonewouldeverusethis7"
    31 30   },
    32 31   }
    33 32  }
    skipped 132 lines
    166 165   
    167 166   assert len(db.ranked_sites_dict()) == 2
    168 167   assert len(db.ranked_sites_dict(disabled=False)) == 1
     168 + 
    169 169   
    170 170  def test_ranked_sites_dict_id_type():
    171 171   db = MaigretDatabase()
    skipped 8 lines
  • ■ ■ ■ ■ ■ ■
    tests/test_utils.py
    1 1  """Maigret utils test functions"""
    2 2  import itertools
    3 3  import re
     4 + 
    4 5  from maigret.utils import CaseConverter, is_country_tag, enrich_link_str, URLMatcher
    5 6   
    6 7   
    7 8  def test_case_convert_camel_to_snake():
    8  - a = 'SnakeCasedString'
    9  - b = CaseConverter.camel_to_snake(a)
     9 + a = 'SnakeCasedString'
     10 + b = CaseConverter.camel_to_snake(a)
     11 + 
     12 + assert b == 'snake_cased_string'
    10 13   
    11  - assert b == 'snake_cased_string'
    12 14   
    13 15  def test_case_convert_snake_to_camel():
    14  - a = 'camel_cased_string'
    15  - b = CaseConverter.snake_to_camel(a)
     16 + a = 'camel_cased_string'
     17 + b = CaseConverter.snake_to_camel(a)
     18 + 
     19 + assert b == 'camelCasedString'
    16 20   
    17  - assert b == 'camelCasedString'
    18 21   
    19 22  def test_case_convert_snake_to_title():
    20  - a = 'camel_cased_string'
    21  - b = CaseConverter.snake_to_title(a)
     23 + a = 'camel_cased_string'
     24 + b = CaseConverter.snake_to_title(a)
    22 25   
    23  - assert b == 'Camel cased string'
     26 + assert b == 'Camel cased string'
     27 + 
    24 28   
    25 29  def test_is_country_tag():
    26  - assert is_country_tag('ru') == True
    27  - assert is_country_tag('FR') == True
     30 + assert is_country_tag('ru') == True
     31 + assert is_country_tag('FR') == True
     32 + 
     33 + assert is_country_tag('a1') == False
     34 + assert is_country_tag('dating') == False
    28 35   
    29  - assert is_country_tag('a1') == False
    30  - assert is_country_tag('dating') == False
     36 + assert is_country_tag('global') == True
    31 37   
    32  - assert is_country_tag('global') == True
    33 38   
    34 39  def test_enrich_link_str():
    35  - assert enrich_link_str('test') == 'test'
    36  - assert enrich_link_str(' www.flickr.com/photos/alexaimephotography/') == '<a class="auto-link" href="www.flickr.com/photos/alexaimephotography/">www.flickr.com/photos/alexaimephotography/</a>'
     40 + assert enrich_link_str('test') == 'test'
     41 + assert enrich_link_str(
     42 + ' www.flickr.com/photos/alexaimephotography/') == '<a class="auto-link" href="www.flickr.com/photos/alexaimephotography/">www.flickr.com/photos/alexaimephotography/</a>'
     43 + 
    37 44   
    38 45  def test_url_extract_main_part():
    39  - url_main_part = 'flickr.com/photos/alexaimephotography'
     46 + url_main_part = 'flickr.com/photos/alexaimephotography'
    40 47   
    41  - parts = [
    42  - ['http://', 'https://'],
    43  - ['www.', ''],
    44  - [url_main_part],
    45  - ['/', ''],
    46  - ]
     48 + parts = [
     49 + ['http://', 'https://'],
     50 + ['www.', ''],
     51 + [url_main_part],
     52 + ['/', ''],
     53 + ]
    47 54   
    48  - url_regexp = re.compile('^https?://(www.)?flickr.com/photos/(.+?)$')
    49  - for url_parts in itertools.product(*parts):
    50  - url = ''.join(url_parts)
    51  - assert URLMatcher.extract_main_part(url) == url_main_part
    52  - assert not url_regexp.match(url) is None
     55 + url_regexp = re.compile('^https?://(www.)?flickr.com/photos/(.+?)$')
     56 + for url_parts in itertools.product(*parts):
     57 + url = ''.join(url_parts)
     58 + assert URLMatcher.extract_main_part(url) == url_main_part
     59 + assert not url_regexp.match(url) is None
     60 + 
    53 61   
    54 62  def test_url_make_profile_url_regexp():
    55  - url_main_part = 'flickr.com/photos/{username}'
     63 + url_main_part = 'flickr.com/photos/{username}'
    56 64   
    57  - parts = [
    58  - ['http://', 'https://'],
    59  - ['www.', ''],
    60  - [url_main_part],
    61  - ['/', ''],
    62  - ]
     65 + parts = [
     66 + ['http://', 'https://'],
     67 + ['www.', ''],
     68 + [url_main_part],
     69 + ['/', ''],
     70 + ]
    63 71   
    64  - for url_parts in itertools.product(*parts):
    65  - url = ''.join(url_parts)
    66  - assert URLMatcher.make_profile_url_regexp(url).pattern == r'^https?://(www.)?flickr\.com/photos/(.+?)$'
     72 + for url_parts in itertools.product(*parts):
     73 + url = ''.join(url_parts)
     74 + assert URLMatcher.make_profile_url_regexp(url).pattern == r'^https?://(www.)?flickr\.com/photos/(.+?)$'
    67 75   
Please wait...
Page is in error, reload to recover