How to use the granary.source.FRIENDS function in granary

To help you get started, we’ve selected a few granary examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github snarfed / granary / granary / twitter.py View on Github external
group_id can be used to specify the slug of a list for which to return tweets.
    By default the current API user’s lists will be used, but lists owned by other
    users can be fetched by explicitly passing a username to user_id, e.g. to
    fetch tweets from the list @exampleuser/example-list you would call
    get_activities(user_id='exampleuser', group_id='example-list').

    Twitter replies default to including a mention of the user they're replying
    to, which overloads mentions a bit. When fetch_mentions is True, we determine
    that a tweet mentions the current user if it @-mentions their username and:

    * it's not a reply, OR
    * it's a reply, but not to the current user, AND
      * the tweet it's replying to doesn't @-mention the current user
    """
    if group_id is None:
      group_id = source.FRIENDS

    if user_id and user_id.startswith('@'):
      user_id = user_id[1:]

    # nested function for lazily fetching the user object if we need it
    user = []
    def _user():
      if not user:
        user.append(self.urlopen(API_USER % user_id if user_id else API_CURRENT_USER))
      return user[0]

    if count:
      count += start_index

    activities = []
    if activity_id:
github snarfed / granary / granary / twitter.py View on Github external
'count': count,
          'screen_name': user_id,
        }

        if fetch_likes:
          liked = self.urlopen(API_FAVORITES % user_id)
          if liked:
            activities += [self._make_like(tweet, _user()) for tweet in liked]
      elif group_id == source.SEARCH:
        if not search_query:
          raise ValueError('search requires search_query parameter')
        url = API_SEARCH % {
          'q': urllib.parse.quote_plus(search_query.encode('utf-8')),
          'count': count,
        }
      elif group_id in (source.FRIENDS, source.ALL):
        url = API_TIMELINE % (count)
      else:
        if not user_id:
          user_id = _user().get('screen_name')
        url = API_LIST_TIMELINE % {
          'count': count,
          'slug': group_id,
          'owner_screen_name': user_id,
        }

      headers = {'If-None-Match': etag} if etag else {}
      total_count = None
      try:
        resp = self.urlopen(url, headers=headers, parse_response=False)
        etag = resp.info().get('ETag')
        tweet_obj = source.load_json(resp.read(), url)
github snarfed / granary / granary / flickr.py View on Github external
if group_id is None:
      group_id = source.FRIENDS

    params = {}
    method = None

    if activity_id:
      params['photo_id'] = activity_id
      method = 'flickr.photos.getInfo'
    else:
      params['extras'] = self.API_EXTRAS
      params['per_page'] = 50
      if group_id == source.SELF:
        params['user_id'] = user_id
        method = 'flickr.people.getPhotos'
      if group_id == source.FRIENDS:
        method = 'flickr.photos.getContactsPhotos'
      if group_id == source.ALL:
        method = 'flickr.photos.getRecent'

    if not method:
      raise NotImplementedError()

    photos_resp = self.call_api_method(method, params)

    result = {'items': []}
    if activity_id:
      photos = [photos_resp.get('photo', {})]
    else:
      photos = photos_resp.get('photos', {}).get('photo', [])

    for photo in photos:
github snarfed / granary / granary / twitter.py View on Github external
'count': count,
          'screen_name': user_id,
        }

        if fetch_likes:
          liked = self.urlopen(API_FAVORITES % user_id)
          if liked:
            activities += [self._make_like(tweet, _user()) for tweet in liked]
      elif group_id == source.SEARCH:
        if not search_query:
          raise ValueError('search requires search_query parameter')
        url = API_SEARCH % {
          'q': urllib.parse.quote_plus(search_query.encode('utf-8')),
          'count': count,
        }
      elif group_id in (source.FRIENDS, source.ALL):
        url = API_TIMELINE % (count)
      else:
        if not user_id:
          user_id = _user().get('screen_name')
        url = API_LIST_TIMELINE % {
          'count': count,
          'slug': group_id,
          'owner_screen_name': user_id,
        }

      headers = {'If-None-Match': etag} if etag else {}
      total_count = None
      try:
        resp = self.urlopen(url, headers=headers, parse_response=False)
        etag = resp.info().get('ETag')
        tweet_obj = source.load_json(resp.read(), url)
github snarfed / granary / granary / flickr.py View on Github external
activity_id=None, start_index=0, count=0,
                              etag=None, min_id=None, cache=None,
                              fetch_replies=False, fetch_likes=False,
                              fetch_shares=False, fetch_events=False,
                              fetch_mentions=False, search_query=None, **kwargs):
    """Fetches Flickr photos and converts them to ActivityStreams activities.

    See method docstring in source.py for details.

    Mentions are not fetched or included because they don't exist in Flickr.
    https://github.com/snarfed/bridgy/issues/523#issuecomment-155523875
    """
    if user_id is None:
      user_id = 'me'
    if group_id is None:
      group_id = source.FRIENDS

    params = {}
    method = None

    if activity_id:
      params['photo_id'] = activity_id
      method = 'flickr.photos.getInfo'
    else:
      params['extras'] = self.API_EXTRAS
      params['per_page'] = 50
      if group_id == source.SELF:
        params['user_id'] = user_id
        method = 'flickr.people.getPhotos'
      if group_id == source.FRIENDS:
        method = 'flickr.photos.getContactsPhotos'
      if group_id == source.ALL:
github snarfed / granary / granary / instagram.py View on Github external
'Instagram only supports search over hashtags, so search_query must '
          'begin with the # character.')

    # TODO: paging
    media = []
    kwargs = {}
    if min_id is not None:
      kwargs['min_id'] = min_id

    activities = []
    try:
      media_url = (API_MEDIA_URL % activity_id if activity_id else
                   API_USER_MEDIA_URL % user_id if group_id == source.SELF else
                   API_MEDIA_POPULAR_URL if group_id == source.ALL else
                   API_MEDIA_SEARCH_URL % search_query if group_id == source.SEARCH else
                   API_USER_FEED_URL if group_id == source.FRIENDS else None)
      assert media_url
      media = self.urlopen(util.add_query_params(media_url, kwargs))
      if media:
        if activity_id:
          media = [media]
        activities += [self.media_to_activity(m) for m in util.trim_nulls(media)]

      if group_id == source.SELF and fetch_likes:
        # add the user's own likes
        liked = self.urlopen(
          util.add_query_params(API_USER_LIKES_URL % user_id, kwargs))
        if liked:
          user = self.urlopen(API_USER_URL % user_id)
          activities += [self.like_to_object(user, l['id'], l['link'])
                         for l in liked]
github snarfed / granary / api.py View on Github external
rss,
  source,
  twitter,
)

XML_TEMPLATE = """\

%s
"""
ITEMS_PER_PAGE_MAX = 100
ITEMS_PER_PAGE_DEFAULT = 10
RESPONSE_CACHE_TIME = datetime.timedelta(minutes=10)

# default values for each part of the API request path except the site, e.g.
# /twitter/@me/@self/@all/...
PATH_DEFAULTS = ((source.ME,), (source.ALL, source.FRIENDS), (source.APP,), ())
MAX_PATH_LEN = len(PATH_DEFAULTS) + 1

# map granary format name to MIME type. list of official MIME types:
# https://www.iana.org/assignments/media-types/media-types.xhtml
FORMATS = {
  'activitystreams': 'application/stream+json',
  'as1': 'application/stream+json',
  'as1-xml': 'application/xml',
  'as2': 'application/activity+json',
  'atom': 'application/atom+xml',
  'html': 'text/html',
  'json': 'application/json',
  'json-mf2': 'application/mf2+json',
  'jsonfeed': 'application/json',
  'mf2-json': 'application/mf2+json',
  'rss': 'application/rss+xml',
github snarfed / granary / granary / instagram.py View on Github external
scrape: if True, scrapes HTML from instagram.com instead of using the API.
        Populates the user's actor object in the 'actor' response field.
        Useful for apps that haven't yet been approved in the new permissions
        approval process. Currently only supports group_id=SELF. Also supports
        passing a shortcode as activity_id as well as the internal API id.
        http://developers.instagram.com/post/133424514006/instagram-platform-update
      cookie: string, only used if scrape=True
      ignore_rate_limit: boolean, for scraping, always make an HTTP request,
        even if we've been rate limited recently
      **: see :meth:`Source.get_activities_response`

    Raises:
      InstagramAPIError
    """
    if group_id is None:
      group_id = source.FRIENDS

    if scrape or self.scrape:
      cookie = cookie or self.cookie
      if not (activity_id or
              (group_id == source.SELF and user_id) or
              (group_id == source.FRIENDS and cookie)):
        raise NotImplementedError(
          'Scraping only supports activity_id, user_id and group_id=@self, or cookie and group_id=@friends.')
      elif fetch_likes and not cookie:
        raise NotImplementedError('Scraping likes requires a cookie.')

      # cache rate limited responses and short circuit
      global _last_rate_limited, _last_rate_limited_exc
      now = datetime.datetime.now()
      if not ignore_rate_limit and _last_rate_limited:
        retry = _last_rate_limited + RATE_LIMIT_BACKOFF
github snarfed / instagram-atom / cookie.py View on Github external
def get(self):
    cookie = 'sessionid=%s' % urllib.parse.quote(
      util.get_required_param(self, 'sessionid').encode('utf-8'))
    logging.info('Fetching with Cookie: %s', cookie)

    host_url = self.request.host_url + '/'
    ig = instagram.Instagram()
    try:
      resp = ig.get_activities_response(group_id=source.FRIENDS, scrape=True,
                                        cookie=cookie)
    except Exception as e:
      status, text = util.interpret_http_exception(e)
      if status in ('403',):
        self.response.headers['Content-Type'] = 'application/atom+xml'
        self.response.out.write(atom.activities_to_atom([{
          'object': {
            'url': self.request.url,
            'content': 'Your instagram-atom cookie isn\'t working. <a href="%s">Click here to regenerate your feed!</a>' % host_url,
            },
          }], {}, title='instagram-atom', host_url=host_url,
          request_url=self.request.path_url))
        return
      elif status == '401':
        # IG returns 401 sometimes as a form of rate limiting or bot detection
        self.response.status = '429'
github snarfed / granary / granary / mastodon.py View on Github external
"""
    if user_id and group_id in (source.FRIENDS, source.ALL):
      raise ValueError("Mastodon doesn't support group_id %s with user_id" % group_id)

    if not user_id:
      user_id = self.user_id
    if fetch_events:
      raise NotImplementedError()

    params = {}
    if count:
      params['limit'] = count + start_index

    if activity_id:
      statuses = [self._get(API_STATUS % activity_id)]
    elif group_id in (None, source.FRIENDS):
      statuses = self._get(API_TIMELINE, params=params)
    elif group_id == source.SEARCH:
      if not search_query:
        raise ValueError('search requires search_query parameter')
      statuses = self._get(API_SEARCH, params={
        'q': search_query,
        'resolve': True,
        'offset': start_index,
        'limit': count if count else '',
      }).get('statuses', [])
    else:  # eg group_id SELF
      statuses = self._get(API_ACCOUNT_STATUSES % user_id, params=params)

    activities = []

    # batch get memcached counts of favorites and retweets for all tweets