Projects STRLCPY snscrape Commits 14831d41
🤬
Revision indexing in progress... (symbol navigation in revisions will be accurate after indexed)
  • ■ ■ ■ ■ ■ ■
    socialmediascraper/modules/facebook.py
     1 +import bs4
     2 +import json
     3 +import logging
     4 +import re
     5 +import socialmediascraper.base
     6 +import urllib.parse
     7 + 
     8 + 
     9 +logger = logging.getLogger(__name__)
     10 + 
     11 + 
     12 +class FacebookUserScraper(socialmediascraper.base.Scraper):
     13 + name = 'facebook-user'
     14 + 
     15 + def __init__(self, username, **kwargs):
     16 + super().__init__(**kwargs)
     17 + self._username = username
     18 + 
     19 + def _soup_to_items(self, soup, username, baseUrl):
     20 + yielded = set()
     21 + for a in soup.find_all('a', href = re.compile(r'^/[^/]+/(posts|photos|videos)/[^/]*\d')):
     22 + href = a.get('href')
     23 + if href.startswith(f'/{username}/'):
     24 + link = urllib.parse.urljoin(baseUrl, href)
     25 + if link not in yielded:
     26 + yield link
     27 + yielded.add(link)
     28 + 
     29 + def get_items(self):
     30 + headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
     31 + 
     32 + nextPageLinkPattern = re.compile(r'^/pages_reaction_units/more/\?page_id=')
     33 + spuriousForLoopPattern = re.compile(r'^for \(;;\);')
     34 + 
     35 + logger.info('Retrieving initial data')
     36 + baseUrl = f'https://www.facebook.com/{self._username}/'
     37 + r = self._get(baseUrl, headers = headers)
     38 + if r.status_code == 404:
     39 + logger.warning('User does not exist')
     40 + return
     41 + elif r.status_code != 200:
     42 + logger.error('Got status code {r.status_code}')
     43 + return
     44 + soup = bs4.BeautifulSoup(r.text, 'lxml')
     45 + username = re.sub(r'^https://www\.facebook\.com/([^/]+)/$', r'\1', soup.find('link').get('href')) # Canonical capitalisation
     46 + baseUrl = f'https://www.facebook.com/{username}/'
     47 + yield from self._soup_to_items(soup, username, baseUrl)
     48 + nextPageLink = soup.find('a', ajaxify = nextPageLinkPattern)
     49 + 
     50 + while nextPageLink:
     51 + logger.info('Retrieving next page')
     52 + 
     53 + # The web app sends a bunch of additional parameters. Most of them would be easy to add, but there's also __dyn, which is a compressed list of the "modules" loaded in the browser.
     54 + # Reproducing that would be difficult to get right, especially as Facebook's codebase evolves, so it's just not sent at all here.
     55 + r = self._get(urllib.parse.urljoin(baseUrl, nextPageLink.get('ajaxify')) + '&__a=1', headers = headers)
     56 + if r.status_code != 200:
     57 + logger.error(f'Got status code {r.status_code}')
     58 + return
     59 + response = json.loads(spuriousForLoopPattern.sub('', r.text))
     60 + assert 'domops' in response
     61 + assert len(response['domops']) == 1
     62 + assert len(response['domops'][0]) == 4
     63 + assert response['domops'][0][0] == 'replace', f'{response["domops"][0]} is not "replace"'
     64 + assert response['domops'][0][1] == '#www_pages_reaction_see_more_unitwww_pages_home'
     65 + assert response['domops'][0][2] == False
     66 + assert '__html' in response['domops'][0][3]
     67 + soup = bs4.BeautifulSoup(response['domops'][0][3]['__html'], 'lxml')
     68 + yield from self._soup_to_items(soup, username, baseUrl)
     69 + nextPageLink = soup.find('a', ajaxify = nextPageLinkPattern)
     70 + 
     71 + @classmethod
     72 + def setup_parser(cls, subparser):
     73 + subparser.add_argument('username', help = 'A Facebook username or user ID')
     74 + 
     75 + @classmethod
     76 + def from_args(cls, args):
     77 + return cls(args.username, retries = args.retries)
     78 + 
Please wait...
Page is in error, reload to recover