Projects STRLCPY pentest-tools Commits a9af5c99
🤬
  • ■ ■ ■ ■
    extract-endpoints.php
    skipped 591 lines
    592 592   if( isset($parse['path']) && is_array($_ignore) && count($_ignore) ) {
    593 593   $p = strrpos( $parse['path'], '.' );
    594 594   if( $p !== false ) {
    595  - $ext = substr( $parse['path'], $p+1 );
     595 + $ext = substr( $parse['path'], $p+1 );
    596 596   if( in_array($ext,$_ignore) ) {
    597 597   unset( $t_all[$k] );
    598 598   continue;
    skipped 68 lines
  • ■ ■ ■ ■ ■ ■
    favicon-hashtrick.py
    skipped 14 lines
    15 15  from colored import fg, bg, attr
    16 16   
    17 17   
     18 +# disable "InsecureRequestWarning: Unverified HTTPS request is being made."
     19 +from requests.packages.urllib3.exceptions import InsecureRequestWarning
     20 +requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
     21 + 
     22 + 
    18 23  def banner():
    19 24   print("""
    20 25   __ _ _ _ _ _ _
    skipped 8 lines
    29 34  """)
    30 35   pass
    31 36   
    32  -banner()
    33  - 
    34 37   
    35 38  def faviconHash( data, web ):
    36 39   if web:
    skipped 15 lines
    52 55   
    53 56  parser.parse_args()
    54 57  args = parser.parse_args()
     58 + 
     59 +if not args.silent:
     60 + banner()
    55 61   
    56 62  if args.values:
    57 63   t_values = args.values.split(',')
    skipped 19 lines
    77 83   web_src = False
    78 84   
    79 85  if args.favurl:
    80  - favsource = args.favurl
    81  - r = requests.get( favsource )
    82  - data = r.content
    83  - web_src = True
     86 + favsource = args.favurl
     87 + try:
     88 + r = requests.get( favsource, timeout=3, verify=False )
     89 + except Exception as e:
     90 + sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
     91 + exit()
     92 + data = r.content
     93 + web_src = True
    84 94   
    85 95  if not args.favfile64 and not args.favfile and not args.favurl:
    86 96   parser.error( 'missing favicon' )
    skipped 38 lines
  • ■ ■ ■ ■ ■ ■
    goop/__init__.py
     1 +__version__ = '0.1.1'
     2 + 
  • ■ ■ ■ ■ ■ ■
    goop/goop.py
     1 +import re
     2 +import requests
     3 + 
     4 +try:
     5 + from urllib.parse import quote_plus as url_encode
     6 +except ImportError:
     7 + from urllib import quote_plus as url_encode
     8 + 
     9 +def decode_html(string):
     10 + "decode common html/xml entities"
     11 + new_string = string
     12 + decoded = ['>', '<', '"', '&', '\'']
     13 + encoded = ['&gt;', '&lt;', '&quot;', '&amp;', '&#039;']
     14 + for e, d in zip(encoded, decoded):
     15 + new_string = new_string.replace(e, d)
     16 + for e, d in zip(encoded[::-1], decoded[::-1]):
     17 + new_string = new_string.replace(e, d)
     18 + return new_string
     19 + 
     20 +def parse(string):
     21 + "extract and parse resutls"
     22 + parsed = {}
     23 +# pattern = r'''<div><div class="[^"]+">
     24 +# <div class="[^"]+"><a href="/url\?q=(.+?)&sa=[^"]+"><div class="[^"]+">(.*?)</div>
     25 +# <div class="[^"]+">.*?</div></a></div>
     26 +# <div class="[^"]+"></div>
     27 +# <div class="[^"]+"><div><div class="[^"]+"><div><div><div class="[^"]+">(?:(.*?)(?: ...)?</div>|\n<span class="[^"]+">.*?</span><span class="[^"]+">.*?</span>(.*?)</div>)'''
     28 + pattern = r'''<div class="[^"]+"><a href="/url\?q=(.+?)&sa=[^"]+">'''
     29 + matches = re.finditer(pattern, string)
     30 + num = 0
     31 + for match in matches:
     32 + # parsed[num] = {'url' : match.group(1), 'text' : match.group(2), 'summary' : match.group(3) or match.group(4)}
     33 + parsed[num] = {'url' : match.group(1), 'text' : '', 'summary' : ''}
     34 + num += 1
     35 + return parsed
     36 + 
     37 +def search(query, cookie, page=0, full=False):
     38 + """
     39 + main function, returns parsed results
     40 + Args:
     41 + query - search string
     42 + cookie - facebook cookie
     43 + page - search result page number (optional)
     44 + """
     45 + offset = page * 10
     46 + filter = 1 if not full else 0
     47 + escaped = url_encode('https://google.com/search?q=%s&start=%i&filter=%i' % (url_encode(query), offset, filter))
     48 + headers = {
     49 + 'Host': 'developers.facebook.com',
     50 + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0',
     51 + 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
     52 + 'Accept-Language': 'en-US,en;q=0.5',
     53 + 'Accept-Encoding': 'deflate',
     54 + 'Connection': 'keep-alive',
     55 + 'Cookie': cookie,
     56 + 'Upgrade-Insecure-Requests': '1',
     57 + 'Cache-Control': 'max-age=0',
     58 + 'TE': 'Trailers'
     59 + }
     60 + response = requests.get('https://developers.facebook.com/tools/debug/echo/?q=%s' % escaped, headers=headers)
     61 + cleaned_response = decode_html(response.text)
     62 + parsed = parse(cleaned_response)
     63 + return parsed
     64 + 
  • ■ ■ ■ ■
    quickhits.py
    skipped 92 lines
    93 93   # url = url.strip('_')
    94 94   
    95 95   match = title_regexp.search( r.text )
    96  - title = match.group(1) if match else '-'
     96 + title = match.group(1).strip() if match else '-'
    97 97   
    98 98   ljust = 100
    99 99   while ljust < len(url):
    skipped 277 lines
Please wait...
Page is in error, reload to recover