How to use the youtube-dl.youtube_dl.utils.try_get function in youtube_dl

To help you get started, we’ve selected a few youtube_dl examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github joegesualdo / get-youtube-subtitle-url-node / youtube-dl / youtube_dl / extractor / facebook.py View on Github external
server_js_data = self._parse_json(self._search_regex(
            r'handleServerJS\(({.+})(?:\);|,")', webpage,
            'server js data', default='{}'), video_id, fatal=False)

        if server_js_data:
            video_data = extract_video_data(server_js_data.get('instances', []))

        if not video_data:
            server_js_data = self._parse_json(
                self._search_regex(
                    r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+(?:stream_pagelet|pagelet_group_mall|permalink_video_pagelet)',
                    webpage, 'js data', default='{}'),
                video_id, transform_source=js_to_json, fatal=False)
            if server_js_data:
                video_data = extract_video_data(try_get(
                    server_js_data, lambda x: x['jsmods']['instances'],
                    list) or [])

        if not video_data:
            if not fatal_if_no_video:
                return webpage, False
            m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"&gt;<div>(.*?)</div>', webpage)
            if m_msg is not None:
                raise ExtractorError(
                    'The video is not available, Facebook said: "%s"' % m_msg.group(1),
                    expected=True)
            elif '&gt;You must log in to continue' in webpage:
                self.raise_login_required()
            else:
                raise ExtractorError('Cannot parse data')
github joegesualdo / get-youtube-subtitle-url-node / youtube-dl / youtube_dl / extractor / mtv.py View on Github external
data_zone = self._search_regex(
            r'data-zone=(["\'])(?P.+?_lc_promo.*?)\1', webpage,
            'data zone', default=data_zone, group='zone')

        feed_url = try_get(
            triforce_feed, lambda x: x['manifest']['zones'][data_zone]['feed'],
            compat_str)
        if not feed_url:
            return

        feed = self._download_json(feed_url, video_id, fatal=False)
        if not feed:
            return

        return try_get(feed, lambda x: x['result']['data']['id'], compat_str)
github joegesualdo / get-youtube-subtitle-url-node / youtube-dl / youtube_dl / extractor / francetv.py View on Github external
if (signed_url and isinstance(signed_url, compat_str) and
                        re.search(r'^(?:https?:)?//', signed_url)):
                    return signed_url
            return manifest_url

        is_live = None

        formats = []
        for video in info['videos']:
            if video['statut'] != 'ONLINE':
                continue
            video_url = video['url']
            if not video_url:
                continue
            if is_live is None:
                is_live = (try_get(
                    video, lambda x: x['plages_ouverture'][0]['direct'],
                    bool) is True) or '/live.francetv.fr/' in video_url
            format_id = video['format']
            ext = determine_ext(video_url)
            if ext == 'f4m':
                if georestricted:
                    # See https://github.com/rg3/youtube-dl/issues/3963
                    # m3u8 urls work fine
                    continue
                formats.extend(self._extract_f4m_formats(
                    sign(video_url, format_id) + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44',
                    video_id, f4m_id=format_id, fatal=False))
            elif ext == 'm3u8':
                formats.extend(self._extract_m3u8_formats(
                    sign(video_url, format_id), video_id, 'mp4',
                    entry_protocol='m3u8_native', m3u8_id=format_id,
github joegesualdo / get-youtube-subtitle-url-node / youtube-dl / youtube_dl / extractor / bbc.py View on Github external
}

        # Morph based embed (e.g. http://www.bbc.co.uk/sport/live/olympics/36895975)
        # There are several setPayload calls may be present but the video
        # seems to be always related to the first one
        morph_payload = self._parse_json(
            self._search_regex(
                r'Morph\.setPayload\([^,]+,\s*({.+?})\);',
                webpage, 'morph payload', default='{}'),
            playlist_id, fatal=False)
        if morph_payload:
            components = try_get(morph_payload, lambda x: x['body']['components'], list) or []
            for component in components:
                if not isinstance(component, dict):
                    continue
                lead_media = try_get(component, lambda x: x['props']['leadMedia'], dict)
                if not lead_media:
                    continue
                identifiers = lead_media.get('identifiers')
                if not identifiers or not isinstance(identifiers, dict):
                    continue
                programme_id = identifiers.get('vpid') or identifiers.get('playablePid')
                if not programme_id:
                    continue
                title = lead_media.get('title') or self._og_search_title(webpage)
                formats, subtitles = self._download_media_selector(programme_id)
                self._sort_formats(formats)
                description = lead_media.get('summary')
                uploader = lead_media.get('masterBrand')
                uploader_id = lead_media.get('mid')
                duration = None
                duration_d = lead_media.get('duration')
github joegesualdo / get-youtube-subtitle-url-node / youtube-dl / youtube_dl / extractor / instagram.py View on Github external
def get_count(key, kind):
                    return int_or_none(try_get(
                        media, (lambda x: x['edge_media_%s' % key]['count'],
                                lambda x: x['%ss' % kind]['count'])))
                like_count = get_count('preview_like', 'like')
github joegesualdo / get-youtube-subtitle-url-node / youtube-dl / youtube_dl / extractor / instagram.py View on Github external
def get_count(suffix):
            return int_or_none(try_get(
                node, lambda x: x['edge_media_' + suffix]['count']))
github joegesualdo / get-youtube-subtitle-url-node / youtube-dl / youtube_dl / extractor / sohu.py View on Github external
part_info = self._parse_json(self._download_webpage(
                        'http://%s/?%s' % (allot, compat_urllib_parse_urlencode(params)),
                        video_id, download_note), video_id)

                    video_url = part_info['url']
                    cdnId = part_info.get('nid')

                    retries += 1
                    if retries > 5:
                        raise ExtractorError('Failed to get video URL')

                formats.append({
                    'url': video_url,
                    'format_id': format_id,
                    'filesize': int_or_none(
                        try_get(data, lambda x: x['clipsBytes'][i])),
                    'width': int_or_none(data.get('width')),
                    'height': int_or_none(data.get('height')),
                    'fps': int_or_none(data.get('fps')),
                })
            self._sort_formats(formats)

            playlist.append({
                'id': '%s_part%d' % (video_id, i + 1),
                'title': title,
                'duration': vid_data['data']['clipsDuration'][i],
                'formats': formats,
            })

        if len(playlist) == 1:
            info = playlist[0]
            info['id'] = video_id