| skipped 18 lines |
19 | 19 | | save_json_report |
20 | 20 | | from .sites import MaigretDatabase |
21 | 21 | | from .submit import submit_dialog |
| 22 | + | from .utils import get_dict_ascii_tree |
22 | 23 | | |
23 | 24 | | __version__ = '0.1.15' |
24 | 25 | | |
| skipped 193 lines |
218 | 219 | | print("Using the proxy: " + args.proxy) |
219 | 220 | | |
220 | 221 | | if args.parse_url: |
221 | | - | page, _ = parse(args.parse_url, cookies_str='') |
222 | | - | info = extract(page) |
223 | | - | text = 'Extracted ID data from webpage: ' + ', '.join([f'{a}: {b}' for a, b in info.items()]) |
224 | | - | print(text) |
225 | | - | for k, v in info.items(): |
226 | | - | if 'username' in k: |
227 | | - | usernames[v] = 'username' |
228 | | - | if k in supported_recursive_search_ids: |
229 | | - | usernames[v] = k |
| 222 | + | # url, headers |
| 223 | + | reqs = [(args.parse_url, set())] |
| 224 | + | try: |
| 225 | + | # temporary workaround for URL mutations MVP |
| 226 | + | from socid_extractor import mutate_url |
| 227 | + | reqs += list(mutate_url(args.parse_url)) |
| 228 | + | except: |
| 229 | + | pass |
| 230 | + | |
| 231 | + | for req in reqs: |
| 232 | + | url, headers = req |
| 233 | + | print(f'Scanning webpage by URL {url}...') |
| 234 | + | page, _ = parse(url, cookies_str='', headers=headers) |
| 235 | + | info = extract(page) |
| 236 | + | if not info: |
| 237 | + | print('Nothing extracted') |
| 238 | + | else: |
| 239 | + | print(get_dict_ascii_tree(info.items(), new_line=False), ' ') |
| 240 | + | for k, v in info.items(): |
| 241 | + | if 'username' in k: |
| 242 | + | usernames[v] = 'username' |
| 243 | + | if k in supported_recursive_search_ids: |
| 244 | + | usernames[v] = k |
230 | 245 | | |
231 | 246 | | if args.tags: |
232 | 247 | | args.tags = list(set(str(args.tags).split(','))) |
| skipped 184 lines |