Projects STRLCPY maigret Commits 32000a1c
🤬
Revision indexing in progress... (symbol navigation in revisions will be accurate after indexed)
  • ■ ■ ■ ■ ■ ■
    maigret/maigret.py
    skipped 194 lines
    195 195   metavar="DB_FILE",
    196 196   dest="db_file",
    197 197   default=None,
    198  - help="Load Maigret database from a JSON file or an online, valid, JSON file.",
     198 + help="Load Maigret database from a JSON file or HTTP web resource.",
    199 199   )
    200 200   parser.add_argument(
    201 201   "--cookies-jar-file",
    skipped 317 lines
    519 519   )
    520 520   
    521 521   # Create object with all information about sites we are aware of.
    522  - db = MaigretDatabase().load_from_file(args.db_file)
     522 + db = MaigretDatabase().load_from_path(args.db_file)
    523 523   get_top_sites_for_id = lambda x: db.ranked_sites_dict(
    524 524   top=args.top_sites,
    525 525   tags=args.tags,
    skipped 194 lines
  • ■ ■ ■ ■ ■ ■
    maigret/report.py
    skipped 138 lines
    139 139   if dictionary["status"].status != QueryStatus.CLAIMED:
    140 140   continue
    141 141   
    142  - site_fallback_name = dictionary.get('url_user', f'{website_name}: {username.lower()}')
     142 + site_fallback_name = dictionary.get(
     143 + 'url_user', f'{website_name}: {username.lower()}'
     144 + )
    143 145   # site_node_name = dictionary.get('url_user', f'{website_name}: {username.lower()}')
    144 146   site_node_name = graph.add_node('site', site_fallback_name)
    145 147   graph.link(username_node_name, site_node_name)
    skipped 19 lines
    165 167   data_node_name = graph.add_node(vv, site_fallback_name)
    166 168   graph.link(list_node_name, data_node_name)
    167 169   
    168  - add_ids = {a: b for b, a in db.extract_ids_from_url(vv).items()}
     170 + add_ids = {
     171 + a: b for b, a in db.extract_ids_from_url(vv).items()
     172 + }
    169 173   if add_ids:
    170 174   process_ids(data_node_name, add_ids)
    171 175   else:
    172  - # value is just a string
     176 + # value is just a string
    173 177   # ids_data_name = f'{k}: {v}'
    174 178   # if ids_data_name == parent_node:
    175 179   # continue
    skipped 344 lines
  • ■ ■ ■ ■ ■
    maigret/sites.py
    skipped 291 lines
    292 292   return self
    293 293   
    294 294   def save_to_file(self, filename: str) -> "MaigretDatabase":
     295 + if '://' in filename:
     296 + return self
     297 + 
    295 298   db_data = {
    296 299   "sites": {site.name: site.strip_engine_data().json for site in self._sites},
    297 300   "engines": {engine.name: engine.json for engine in self._engines},
    skipped 46 lines
    344 347   
    345 348   return self.load_from_json(data)
    346 349   
    347  - def load_from_url(self, url: str) -> "MaigretDatabase":
     350 + def load_from_path(self, path: str) -> "MaigretDatabase":
     351 + if '://' in path:
     352 + return self.load_from_http(path)
     353 + else:
     354 + return self.load_from_file(path)
     355 + 
     356 + def load_from_http(self, url: str) -> "MaigretDatabase":
    348 357   is_url_valid = url.startswith("http://") or url.startswith("https://")
    349 358   
    350 359   if not is_url_valid:
    skipped 49 lines
    400 409   
    401 410   return found_flags
    402 411   
    403  - 
    404 412   def extract_ids_from_url(self, url: str) -> dict:
    405 413   results = {}
    406 414   for s in self._sites:
    skipped 3 lines
    410 418   _id, _type = result
    411 419   results[_id] = _type
    412 420   return results
    413  - 
    414 421   
    415 422   def get_db_stats(self, sites_dict):
    416 423   if not sites_dict:
    skipped 37 lines
Please wait...
Page is in error, reload to recover