Projects STRLCPY maigret Commits 8c700b98
🤬
  • Added new sites through auto submit, some fixes

  • Loading...
  • Soxoj committed 4 years ago
    8c700b98
    1 parent f9c9af5f
Revision indexing in progress... (symbol navigation in revisions will be accurate after indexed)
  • ■ ■ ■ ■ ■ ■
    cookies.txt
    1 1  # HTTP Cookie File downloaded with cookies.txt by Genuinous @genuinous
    2 2  # This file can be used by wget, curl, aria2c and other standard compliant tools.
    3 3  # Usage Examples:
    4  -# 1) wget -x --load-cookies cookies.txt "https://xss.is/search/"
    5  -# 2) curl --cookie cookies.txt "https://xss.is/search/"
    6  -# 3) aria2c --load-cookies cookies.txt "https://xss.is/search/"
     4 +# 1) wget -x --load-cookies cookies.txt "https://pixabay.com/users/blue-156711/"
     5 +# 2) curl --cookie cookies.txt "https://pixabay.com/users/blue-156711/"
     6 +# 3) aria2c --load-cookies cookies.txt "https://pixabay.com/users/blue-156711/"
    7 7  #
    8  -xss.is FALSE / TRUE 0 xf_csrf PMnZNsr42HETwYEr
    9  -xss.is FALSE / TRUE 0 xf_from_search google
    10  -xss.is FALSE / TRUE 1642709308 xf_user 215268%2CZNKB_-64Wk-BOpsdtLYy-1UxfS5zGpxWaiEGUhmX
    11  -xss.is FALSE / TRUE 0 xf_session sGdxJtP_sKV0LCG8vUQbr6cL670_EFWM
    12  -.xss.is TRUE / FALSE 0 muchacho_cache ["00fbb0f2772c9596b0483d6864563cce"]
    13  -.xss.is TRUE / FALSE 0 muchacho_png ["00fbb0f2772c9596b0483d6864563cce"]
    14  -.xss.is TRUE / FALSE 0 muchacho_etag ["00fbb0f2772c9596b0483d6864563cce"]
    15  -.xss.is TRUE / FALSE 1924905600 2e66e4dd94a7a237d0d1b4d50f01e179_evc ["00fbb0f2772c9596b0483d6864563cce"]
     8 +.pixabay.com TRUE / TRUE 1618356838 __cfduid d56929cd50d11474f421b849df5758a881615764837
     9 +.pixabay.com TRUE / TRUE 1615766638 __cf_bm ea8f7c565b44d749f65500f0e45176cebccaeb09-1615764837-1800-AYJIXh2boDJ6HPf44JI9fnteWABHOVvkxiSccACP9EiS1E58UDTGhViXtqjFfVE0QRj1WowP4ss2DzCs+pW+qUc=
     10 +pixabay.com FALSE / FALSE 0 anonymous_user_id c1e4ee09-5674-4252-aa94-8c47b1ea80ab
     11 +pixabay.com FALSE / FALSE 1647214439 csrftoken vfetTSvIul7gBlURt6s985JNM18GCdEwN5MWMKqX4yI73xoPgEj42dbNefjGx5fr
     12 +pixabay.com FALSE / FALSE 1647300839 client_width 1680
     13 +pixabay.com FALSE / FALSE 748111764839 is_human 1
    16 14   
  • ■ ■ ■ ■
    maigret/maigret.py
    skipped 252 lines
    253 253   site_data = get_top_sites_for_id(args.id_type)
    254 254   
    255 255   if args.new_site_to_submit:
    256  - is_submitted = await submit_dialog(db, args.new_site_to_submit)
     256 + is_submitted = await submit_dialog(db, args.new_site_to_submit, args.cookie_file)
    257 257   if is_submitted:
    258 258   db.save_to_file(args.db_file)
    259 259   
    skipped 157 lines
  • maigret/resources/data.json
    Diff is too large to be displayed.
  • ■ ■ ■ ■ ■
    maigret/submit.py
    skipped 84 lines
    85 85   return changes
    86 86   
    87 87   
    88  -async def submit_dialog(db, url_exists):
     88 +async def submit_dialog(db, url_exists, cookie_file):
    89 89   domain_raw = URL_RE.sub('', url_exists).strip().strip('/')
    90 90   domain_raw = domain_raw.split('/')[0]
    91 91   
    skipped 15 lines
    107 107   url_user = url_exists.replace(supposed_username, '{username}')
    108 108   url_not_exists = url_exists.replace(supposed_username, non_exist_username)
    109 109   
    110  - a = requests.get(url_exists).text
    111  - b = requests.get(url_not_exists).text
     110 + # cookies
     111 + cookie_dict = None
     112 + if cookie_file:
     113 + cookie_jar = await import_aiohttp_cookies(cookie_file)
     114 + cookie_dict = {c.key: c.value for c in cookie_jar}
     115 + 
     116 + a = requests.get(url_exists, cookies=cookie_dict).text
     117 + b = requests.get(url_not_exists, cookies=cookie_dict).text
    112 118   
    113 119   tokens_a = set(a.split('"'))
    114 120   tokens_b = set(b.split('"'))
    skipped 59 lines
  • sites.md
    Diff is too large to be displayed.
  • ■ ■ ■ ■ ■
    utils/update_site_data.py
    skipped 19 lines
    20 20   '5000': '5K',
    21 21   '10000': '10K',
    22 22   '100000': '100K',
    23  - '10000000': '1M',
    24  - '50000000': '10M',
     23 + '10000000': '10M',
     24 + '50000000': '50M',
     25 + '100000000': '100M',
    25 26  })
    26 27   
    27 28  SEMAPHORE = threading.Semaphore(10)
    skipped 30 lines
    58 59  def get_step_rank(rank):
    59 60   def get_readable_rank(r):
    60 61   return RANKS[str(r)]
     62 + 
    61 63   valid_step_ranks = sorted(map(int, RANKS.keys()))
    62  - if rank == 0:
     64 + if rank == 0 or rank == sys.maxsize:
    63 65   return get_readable_rank(valid_step_ranks[-1])
    64 66   else:
    65 67   return get_readable_rank(list(filter(lambda x: x >= rank, valid_step_ranks))[0])
    skipped 7 lines
    73 75   help="JSON file with sites data to update.")
    74 76   
    75 77   parser.add_argument('--empty-only', help='update only sites without rating', action='store_true')
     78 + parser.add_argument('--exclude-engine', help='do not update score with certain engine',
     79 + action="append", dest="exclude_engine_list", default=[])
    76 80   
    77 81   pool = list()
    78 82   
    skipped 12 lines
    91 95   for site in sites_subset:
    92 96   url_main = site.url_main
    93 97   if site.alexa_rank < sys.maxsize and args.empty_only:
     98 + continue
     99 + if args.exclude_engine_list and site.engine in args.exclude_engine_list:
    94 100   continue
    95 101   site.alexa_rank = 0
    96 102   th = threading.Thread(target=get_rank, args=(url_main, site))
    skipped 37 lines
Please wait...
Page is in error, reload to recover