Projects STRLCPY maigret Commits 5b405c6a
🤬
Revision indexing in progress... (symbol navigation in revisions will be accurate after indexed)
  • ■ ■ ■ ■
    .github/workflows/python-package.yml
    skipped 25 lines
    26 26   - name: Install dependencies
    27 27   run: |
    28 28   python -m pip install --upgrade pip
    29  - python -m pip install flake8 pytest pytest-rerunfailures
     29 + python -m pip install -r test-requirements.txt
    30 30   if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
    31 31   - name: Test with pytest
    32 32   run: |
    skipped 2 lines
  • ■ ■ ■ ■ ■ ■
    maigret/checking.py
    skipped 25 lines
    26 26  from .result import QueryResult, QueryStatus
    27 27  from .sites import MaigretDatabase, MaigretSite
    28 28  from .types import QueryOptions, QueryResultWrapper
    29  -from .utils import get_random_user_agent
     29 +from .utils import get_random_user_agent, ascii_data_display
    30 30   
    31 31   
    32 32  SUPPORTED_IDS = (
    skipped 200 lines
    233 233   result = build_result(QueryStatus.CLAIMED)
    234 234   else:
    235 235   result = build_result(QueryStatus.AVAILABLE)
    236  - elif check_type == "status_code":
     236 + elif check_type in "status_code":
    237 237   # Checks if the status code of the response is 2XX
    238  - if is_presense_detected and (not status_code >= 300 or status_code < 200):
     238 + if 200 <= status_code < 300:
    239 239   result = build_result(QueryStatus.CLAIMED)
    240 240   else:
    241 241   result = build_result(QueryStatus.AVAILABLE)
    skipped 30 lines
    272 272   new_usernames[v] = k
    273 273   
    274 274   results_info["ids_usernames"] = new_usernames
    275  - links = eval(extracted_ids_data.get("links", "[]"))
     275 + links = ascii_data_display(extracted_ids_data.get("links", "[]"))
    276 276   if "website" in extracted_ids_data:
    277 277   links.append(extracted_ids_data["website"])
    278 278   results_info["ids_links"] = links
    skipped 177 lines
    456 456   logger,
    457 457   query_notify=None,
    458 458   proxy=None,
    459  - timeout=None,
     459 + timeout=3,
    460 460   is_parsing_enabled=False,
    461 461   id_type="username",
    462 462   debug=False,
    skipped 15 lines
    478 478   query results.
    479 479   logger -- Standard Python logger object.
    480 480   timeout -- Time in seconds to wait before timing out request.
    481  - Default is no timeout.
     481 + Default is 3 seconds.
    482 482   is_parsing_enabled -- Extract additional info from account pages.
    483 483   id_type -- Type of username to search.
    484 484   Default is 'username', see all supported here:
    skipped 250 lines
  • ■ ■ ■ ■ ■ ■
    maigret/resources/data.json
    skipped 13034 lines
    13035 13035   "us"
    13036 13036   ],
    13037 13037   "headers": {
    13038  - "authorization": "Bearer BQBeVMTwloR4yQEzyayWE7uYo1A4OHV3Oe3Uuv8nHCIJqj73fH6UOJoSfNbzqeSSfLXAFNABEUSHxTZmPe0"
     13038 + "authorization": "Bearer BQBFMMVu1dPwJPlnzUteNyF8xlZy7545QnhHizEHWEUQGQrRLznY5k9B9v7JdAsL-wU-Tcep51JTqBesKKY"
    13039 13039   },
    13040 13040   "errors": {
    13041 13041   "Spotify is currently not available in your country.": "Access denied in your country, use proxy/vpn"
    skipped 1421 lines
    14463 14463   "sec-ch-ua": "Google Chrome\";v=\"87\", \" Not;A Brand\";v=\"99\", \"Chromium\";v=\"87\"",
    14464 14464   "authorization": "Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA",
    14465 14465   "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
    14466  - "x-guest-token": "1393906084428107777"
     14466 + "x-guest-token": "1394397954526560260"
    14467 14467   },
    14468 14468   "errors": {
    14469 14469   "Bad guest token": "x-guest-token update required"
    skipped 400 lines
    14870 14870   "video"
    14871 14871   ],
    14872 14872   "headers": {
    14873  - "Authorization": "jwt eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE2MjExNjkwNDAsInVzZXJfaWQiOm51bGwsImFwcF9pZCI6NTg0NzksInNjb3BlcyI6InB1YmxpYyIsInRlYW1fdXNlcl9pZCI6bnVsbH0.uANToRPWBXHTZwnk-qucbJf-7ObHhCTwu87uJbEOj-I"
     14873 + "Authorization": "jwt eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE2MjEyODYyODAsInVzZXJfaWQiOm51bGwsImFwcF9pZCI6NTg0NzksInNjb3BlcyI6InB1YmxpYyIsInRlYW1fdXNlcl9pZCI6bnVsbH0.mxLdaOuP260WcxBvhadTTUQyn8t75pWNhTmtZLFS-W4"
    14874 14874   },
    14875 14875   "activation": {
    14876 14876   "url": "https://vimeo.com/_rv/viewer",
    skipped 929 lines
    15806 15806   "url": "https://yandex.ru/bugbounty/researchers/{username}/",
    15807 15807   "source": "Yandex",
    15808 15808   "usernameClaimed": "pyrk1",
    15809  - "usernameUnclaimed": "noonewouldeverusethis7"
     15809 + "usernameUnclaimed": "noonewouldeverusethis7",
     15810 + "disabled": true
    15810 15811   },
    15811 15812   "YandexCollections API": {
    15812 15813   "tags": [
    skipped 461 lines
    16274 16275   },
    16275 16276   "author.today": {
    16276 16277   "tags": [
    16277  - "ru"
     16278 + "ru",
     16279 + "reading"
    16278 16280   ],
    16279 16281   "checkType": "status_code",
    16280 16282   "alexaRank": 12218,
    skipped 11672 lines
  • ■ ■ ■ ■ ■
    maigret/utils.py
     1 +import ast
    1 2  import re
    2 3  import random
     4 +from typing import Any
    3 5   
    4 6   
    5 7  DEFAULT_USER_AGENTS = [
    skipped 59 lines
    65 67   return re.compile(regexp_str)
    66 68   
    67 69   
     70 +def ascii_data_display(data: str) -> Any:
     71 + return ast.literal_eval(data)
     72 + 
     73 + 
    68 74  def get_dict_ascii_tree(items, prepend="", new_line=True):
    69 75   text = ""
    70 76   for num, item in enumerate(items):
    skipped 4 lines
    75 81   if field_value.startswith("['"):
    76 82   is_last_item = num == len(items) - 1
    77 83   prepend_symbols = " " * 3 if is_last_item else " ┃ "
    78  - field_value = get_dict_ascii_tree(eval(field_value), prepend_symbols)
     84 + data = ascii_data_display(field_value)
     85 + field_value = get_dict_ascii_tree(data, prepend_symbols)
    79 86   text += f"\n{prepend}{box_symbol}{field_name}: {field_value}"
    80 87   else:
    81 88   text += f"\n{prepend}{box_symbol} {item}"
    skipped 10 lines
  • ■ ■ ■ ■ ■ ■
    test-requirements.txt
     1 +flake8==3.8.4
     2 +pytest==6.2.4
     3 +pytest-asyncio==0.14.0
     4 +pytest-cov==2.10.1
     5 +pytest-httpserver==1.0.0
     6 +pytest-rerunfailures==9.1.1
     7 + 
  • ■ ■ ■ ■ ■ ■
    tests/conftest.py
    skipped 11 lines
    12 12  CUR_PATH = os.path.dirname(os.path.realpath(__file__))
    13 13  JSON_FILE = os.path.join(CUR_PATH, '../maigret/resources/data.json')
    14 14  TEST_JSON_FILE = os.path.join(CUR_PATH, 'db.json')
     15 +LOCAL_TEST_JSON_FILE = os.path.join(CUR_PATH, 'local.json')
    15 16  empty_mark = Mark('', (), {})
    16 17   
    17 18   
    skipped 18 lines
    36 37   
    37 38  @pytest.fixture(scope='session')
    38 39  def default_db():
    39  - db = MaigretDatabase().load_from_file(JSON_FILE)
    40  - 
    41  - return db
     40 + return MaigretDatabase().load_from_file(JSON_FILE)
    42 41   
    43 42   
    44 43  @pytest.fixture(scope='function')
    45 44  def test_db():
    46  - db = MaigretDatabase().load_from_file(TEST_JSON_FILE)
     45 + return MaigretDatabase().load_from_file(TEST_JSON_FILE)
    47 46   
    48  - return db
     47 + 
     48 +@pytest.fixture(scope='function')
     49 +def local_test_db():
     50 + return MaigretDatabase().load_from_file(LOCAL_TEST_JSON_FILE)
    49 51   
    50 52   
    51 53  @pytest.fixture(autouse=True)
    skipped 7 lines
    59 61  def argparser():
    60 62   return setup_arguments_parser()
    61 63   
     64 + 
     65 +@pytest.fixture(scope="session")
     66 +def httpserver_listen_address():
     67 + return ("localhost", 8989)
     68 + 
  • ■ ■ ■ ■ ■ ■
    tests/local.json
     1 +{
     2 + "engines": {},
     3 + "sites": {
     4 + "StatusCode": {
     5 + "checkType": "status_code",
     6 + "url": "http://localhost:8989/url?id={username}",
     7 + "urlMain": "http://localhost:8989/",
     8 + "usernameClaimed": "claimed",
     9 + "usernameUnclaimed": "unclaimed"
     10 + },
     11 + "Message": {
     12 + "checkType": "message",
     13 + "url": "http://localhost:8989/url?id={username}",
     14 + "urlMain": "http://localhost:8989/",
     15 + "presenseStrs": ["user", "profile"],
     16 + "absenseStrs": ["not found", "404"],
     17 + "usernameClaimed": "claimed",
     18 + "usernameUnclaimed": "unclaimed"
     19 + }
     20 + }
     21 +}
  • ■ ■ ■ ■ ■
    tests/test_activation.py
    skipped 21 lines
    22 22  """
    23 23   
    24 24   
     25 +@pytest.mark.skip(reason="periodically fails")
    25 26  @pytest.mark.slow
    26 27  def test_twitter_activation(default_db):
    27 28   twitter_site = default_db.sites_dict['Twitter']
    skipped 29 lines
  • ■ ■ ■ ■ ■ ■
    tests/test_checking.py
     1 +from mock import Mock
     2 +import pytest
     3 + 
     4 +from maigret import search
     5 + 
     6 + 
     7 +def site_result_except(server, username, **kwargs):
     8 + query = f'id={username}'
     9 + server.expect_request('/url', query_string=query).respond_with_data(**kwargs)
     10 + 
     11 + 
     12 +@pytest.mark.asyncio
     13 +async def test_checking_by_status_code(httpserver, local_test_db):
     14 + sites_dict = local_test_db.sites_dict
     15 + 
     16 + site_result_except(httpserver, 'claimed', status=200)
     17 + site_result_except(httpserver, 'unclaimed', status=404)
     18 + 
     19 + result = await search('claimed', site_dict=sites_dict, logger=Mock())
     20 + assert result['StatusCode']['status'].is_found() is True
     21 + 
     22 + result = await search('unclaimed', site_dict=sites_dict, logger=Mock())
     23 + assert result['StatusCode']['status'].is_found() is False
     24 + 
     25 + 
     26 +@pytest.mark.asyncio
     27 +async def test_checking_by_message_positive_full(httpserver, local_test_db):
     28 + sites_dict = local_test_db.sites_dict
     29 + 
     30 + site_result_except(httpserver, 'claimed', response_data="user profile")
     31 + site_result_except(httpserver, 'unclaimed', response_data="404 not found")
     32 + 
     33 + result = await search('claimed', site_dict=sites_dict, logger=Mock())
     34 + assert result['Message']['status'].is_found() is True
     35 + 
     36 + result = await search('unclaimed', site_dict=sites_dict, logger=Mock())
     37 + assert result['Message']['status'].is_found() is False
     38 + 
     39 + 
     40 +@pytest.mark.asyncio
     41 +async def test_checking_by_message_positive_part(httpserver, local_test_db):
     42 + sites_dict = local_test_db.sites_dict
     43 + 
     44 + site_result_except(httpserver, 'claimed', response_data="profile")
     45 + site_result_except(httpserver, 'unclaimed', response_data="404")
     46 + 
     47 + result = await search('claimed', site_dict=sites_dict, logger=Mock())
     48 + assert result['Message']['status'].is_found() is True
     49 + 
     50 + result = await search('unclaimed', site_dict=sites_dict, logger=Mock())
     51 + assert result['Message']['status'].is_found() is False
     52 + 
     53 + 
     54 +@pytest.mark.asyncio
     55 +async def test_checking_by_message_negative(httpserver, local_test_db):
     56 + sites_dict = local_test_db.sites_dict
     57 + 
     58 + site_result_except(httpserver, 'claimed', response_data="")
     59 + site_result_except(httpserver, 'unclaimed', response_data="user 404")
     60 + 
     61 + result = await search('claimed', site_dict=sites_dict, logger=Mock())
     62 + assert result['Message']['status'].is_found() is False
     63 + 
     64 + result = await search('unclaimed', site_dict=sites_dict, logger=Mock())
     65 + assert result['Message']['status'].is_found() is True
     66 + 
  • ■ ■ ■ ■ ■
    tests/test_utils.py
    skipped 56 lines
    57 57   )
    58 58   
    59 59   
     60 +def test_url_extract_main_part_negative():
     61 + url_main_part = 'None'
     62 + assert URLMatcher.extract_main_part(url_main_part) == ''
     63 + 
     64 + 
    60 65  def test_url_extract_main_part():
    61 66   url_main_part = 'flickr.com/photos/alexaimephotography'
    62 67   
    skipped 72 lines
Please wait...
Page is in error, reload to recover