zip
stringlengths
19
109
filename
stringlengths
4
185
contents
stringlengths
0
30.1M
type_annotations
listlengths
0
1.97k
type_annotation_starts
listlengths
0
1.97k
type_annotation_ends
listlengths
0
1.97k
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/teamtreehouse.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, ExtractorError, float_or_none, get_element_by_class, get_element_by_id, parse_duration, remove_end, urlencode_postdata, urljoin, ) class TeamTreeHouseIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?teamtreehouse\.com/library/(?P<id>[^/]+)' _TESTS = [{ # Course 'url': 'https://teamtreehouse.com/library/introduction-to-user-authentication-in-php', 'info_dict': { 'id': 'introduction-to-user-authentication-in-php', 'title': 'Introduction to User Authentication in PHP', 'description': 'md5:405d7b4287a159b27ddf30ca72b5b053', }, 'playlist_mincount': 24, }, { # WorkShop 'url': 'https://teamtreehouse.com/library/deploying-a-react-app', 'info_dict': { 'id': 'deploying-a-react-app', 'title': 'Deploying a React App', 'description': 'md5:10a82e3ddff18c14ac13581c9b8e5921', }, 'playlist_mincount': 4, }, { # Video 'url': 'https://teamtreehouse.com/library/application-overview-2', 'info_dict': { 'id': 'application-overview-2', 'ext': 'mp4', 'title': 'Application Overview', 'description': 'md5:4b0a234385c27140a4378de5f1e15127', }, 'expected_warnings': ['This is just a preview'], }] _NETRC_MACHINE = 'teamtreehouse' def _real_initialize(self): email, password = self._get_login_info() if email is None: return signin_page = self._download_webpage( 'https://teamtreehouse.com/signin', None, 'Downloading signin page') data = self._form_hidden_inputs('new_user_session', signin_page) data.update({ 'user_session[email]': email, 'user_session[password]': password, }) error_message = get_element_by_class('error-message', self._download_webpage( 'https://teamtreehouse.com/person_session', None, 'Logging in', data=urlencode_postdata(data))) if error_message: raise ExtractorError(clean_html(error_message), expected=True) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = self._html_search_meta(['og:title', 'twitter:title'], webpage) description = self._html_search_meta( ['description', 'og:description', 'twitter:description'], webpage) entries = self._parse_html5_media_entries(url, webpage, display_id) if entries: info = entries[0] for subtitles in info.get('subtitles', {}).values(): for subtitle in subtitles: subtitle['ext'] = determine_ext(subtitle['url'], 'srt') is_preview = 'data-preview="true"' in webpage if is_preview: self.report_warning( 'This is just a preview. You need to be signed in with a Basic account to download the entire video.', display_id) duration = 30 else: duration = float_or_none(self._search_regex( r'data-duration="(\d+)"', webpage, 'duration'), 1000) if not duration: duration = parse_duration(get_element_by_id( 'video-duration', webpage)) info.update({ 'id': display_id, 'title': title, 'description': description, 'duration': duration, }) return info else: def extract_urls(html, extract_info=None): for path in re.findall(r'<a[^>]+href="([^"]+)"', html): page_url = urljoin(url, path) entry = { '_type': 'url_transparent', 'id': self._match_id(page_url), 'url': page_url, 'id_key': self.ie_key(), } if extract_info: entry.update(extract_info) entries.append(entry) workshop_videos = self._search_regex( r'(?s)<ul[^>]+id="workshop-videos"[^>]*>(.+?)</ul>', webpage, 'workshop videos', default=None) if workshop_videos: extract_urls(workshop_videos) else: stages_path = self._search_regex( r'(?s)<div[^>]+id="syllabus-stages"[^>]+data-url="([^"]+)"', webpage, 'stages path') if stages_path: stages_page = self._download_webpage( urljoin(url, stages_path), display_id, 'Downloading stages page') for chapter_number, (chapter, steps_list) in enumerate(re.findall(r'(?s)<h2[^>]*>\s*(.+?)\s*</h2>.+?<ul[^>]*>(.+?)</ul>', stages_page), 1): extract_urls(steps_list, { 'chapter': chapter, 'chapter_number': chapter_number, }) title = remove_end(title, ' Course') return self.playlist_result( entries, display_id, title, description)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/techtalks.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( get_element_by_attribute, clean_html, ) class TechTalksIE(InfoExtractor): _VALID_URL = r'https?://techtalks\.tv/talks/(?:[^/]+/)?(?P<id>\d+)' _TESTS = [{ 'url': 'http://techtalks.tv/talks/learning-topic-models-going-beyond-svd/57758/', 'info_dict': { 'id': '57758', 'title': 'Learning Topic Models --- Going beyond SVD', }, 'playlist': [ { 'info_dict': { 'id': '57758', 'ext': 'flv', 'title': 'Learning Topic Models --- Going beyond SVD', }, }, { 'info_dict': { 'id': '57758-slides', 'ext': 'flv', 'title': 'Learning Topic Models --- Going beyond SVD', }, }, ], 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://techtalks.tv/talks/57758', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) talk_id = mobj.group('id') webpage = self._download_webpage(url, talk_id) rtmp_url = self._search_regex( r'netConnectionUrl: \'(.*?)\'', webpage, 'rtmp url') play_path = self._search_regex( r'href=\'(.*?)\' [^>]*id="flowplayer_presenter"', webpage, 'presenter play path') title = clean_html(get_element_by_attribute('class', 'title', webpage)) video_info = { 'id': talk_id, 'title': title, 'url': rtmp_url, 'play_path': play_path, 'ext': 'flv', } m_slides = re.search(r'<a class="slides" href=\'(.*?)\'', webpage) if m_slides is None: return video_info else: return { '_type': 'playlist', 'id': talk_id, 'title': title, 'entries': [ video_info, # The slides video { 'id': talk_id + '-slides', 'title': title, 'url': rtmp_url, 'play_path': m_slides.group(1), 'ext': 'flv', }, ], }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/ted.py
from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse ) from ..utils import ( extract_attributes, float_or_none, int_or_none, try_get, url_or_none, ) class TEDIE(InfoExtractor): IE_NAME = 'ted' _VALID_URL = r'''(?x) (?P<proto>https?://) (?P<type>www|embed(?:-ssl)?)(?P<urlmain>\.ted\.com/ ( (?P<type_playlist>playlists(?:/(?P<playlist_id>\d+))?) # We have a playlist | ((?P<type_talk>talks)) # We have a simple talk | (?P<type_watch>watch)/[^/]+/[^/]+ ) (/lang/(.*?))? # The url may contain the language /(?P<name>[\w-]+) # Here goes the name and then ".html" .*)$ ''' _TESTS = [{ 'url': 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html', 'md5': 'b0ce2b05ca215042124fbc9e3886493a', 'info_dict': { 'id': '102', 'ext': 'mp4', 'title': 'The illusion of consciousness', 'description': ('Philosopher Dan Dennett makes a compelling ' 'argument that not only don\'t we understand our own ' 'consciousness, but that half the time our brains are ' 'actively fooling us.'), 'uploader': 'Dan Dennett', 'width': 853, 'duration': 1308, 'view_count': int, 'comment_count': int, 'tags': list, }, 'params': { 'skip_download': True, }, }, { # missing HTTP bitrates 'url': 'https://www.ted.com/talks/vishal_sikka_the_beauty_and_power_of_algorithms', 'info_dict': { 'id': '6069', 'ext': 'mp4', 'title': 'The beauty and power of algorithms', 'thumbnail': r're:^https?://.+\.jpg', 'description': 'md5:734e352710fb00d840ab87ae31aaf688', 'uploader': 'Vishal Sikka', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ted.com/talks/gabby_giffords_and_mark_kelly_be_passionate_be_courageous_be_your_best', 'md5': 'e6b9617c01a7970ceac8bb2c92c346c0', 'info_dict': { 'id': '1972', 'ext': 'mp4', 'title': 'Be passionate. Be courageous. Be your best.', 'uploader': 'Gabby Giffords and Mark Kelly', 'description': 'md5:5174aed4d0f16021b704120360f72b92', 'duration': 1128, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ted.com/playlists/who_are_the_hackers', 'info_dict': { 'id': '10', 'title': 'Who are the hackers?', 'description': 'md5:49a0dbe8fb76d81a0e64b4a80af7f15a' }, 'playlist_mincount': 6, }, { # contains a youtube video 'url': 'https://www.ted.com/talks/douglas_adams_parrots_the_universe_and_everything', 'add_ie': ['Youtube'], 'info_dict': { 'id': '_ZG8HBuDjgc', 'ext': 'webm', 'title': 'Douglas Adams: Parrots the Universe and Everything', 'description': 'md5:01ad1e199c49ac640cb1196c0e9016af', 'uploader': 'University of California Television (UCTV)', 'uploader_id': 'UCtelevision', 'upload_date': '20080522', }, 'params': { 'skip_download': True, }, }, { # no nativeDownloads 'url': 'https://www.ted.com/talks/tom_thum_the_orchestra_in_my_mouth', 'info_dict': { 'id': '1792', 'ext': 'mp4', 'title': 'The orchestra in my mouth', 'description': 'md5:5d1d78650e2f8dfcbb8ebee2951ac29a', 'uploader': 'Tom Thum', 'view_count': int, 'comment_count': int, 'tags': list, }, 'params': { 'skip_download': True, }, }] _NATIVE_FORMATS = { 'low': {'width': 320, 'height': 180}, 'medium': {'width': 512, 'height': 288}, 'high': {'width': 854, 'height': 480}, } def _extract_info(self, webpage): info_json = self._search_regex( r'(?s)q\(\s*"\w+.init"\s*,\s*({.+?})\)\s*</script>', webpage, 'info json') return json.loads(info_json) def _real_extract(self, url): m = re.match(self._VALID_URL, url, re.VERBOSE) if m.group('type').startswith('embed'): desktop_url = m.group('proto') + 'www' + m.group('urlmain') return self.url_result(desktop_url, 'TED') name = m.group('name') if m.group('type_talk'): return self._talk_info(url, name) elif m.group('type_watch'): return self._watch_info(url, name) else: return self._playlist_videos_info(url, name) def _playlist_videos_info(self, url, name): '''Returns the videos of the playlist''' webpage = self._download_webpage(url, name, 'Downloading playlist webpage') playlist_entries = [] for entry in re.findall(r'(?s)<[^>]+data-ga-context=["\']playlist["\'][^>]*>', webpage): attrs = extract_attributes(entry) entry_url = compat_urlparse.urljoin(url, attrs['href']) playlist_entries.append(self.url_result(entry_url, self.ie_key())) final_url = self._og_search_url(webpage, fatal=False) playlist_id = ( re.match(self._VALID_URL, final_url).group('playlist_id') if final_url else None) return self.playlist_result( playlist_entries, playlist_id=playlist_id, playlist_title=self._og_search_title(webpage, fatal=False), playlist_description=self._og_search_description(webpage)) def _talk_info(self, url, video_name): webpage = self._download_webpage(url, video_name) info = self._extract_info(webpage) data = try_get(info, lambda x: x['__INITIAL_DATA__'], dict) or info talk_info = data['talks'][0] title = talk_info['title'].strip() downloads = talk_info.get('downloads') or {} native_downloads = downloads.get('nativeDownloads') or talk_info.get('nativeDownloads') or {} formats = [{ 'url': format_url, 'format_id': format_id, } for (format_id, format_url) in native_downloads.items() if format_url is not None] subtitled_downloads = downloads.get('subtitledDownloads') or {} for lang, subtitled_download in subtitled_downloads.items(): for q in self._NATIVE_FORMATS: q_url = subtitled_download.get(q) if not q_url: continue formats.append({ 'url': q_url, 'format_id': '%s-%s' % (q, lang), 'language': lang, }) if formats: for f in formats: finfo = self._NATIVE_FORMATS.get(f['format_id'].split('-')[0]) if finfo: f.update(finfo) player_talk = talk_info['player_talks'][0] external = player_talk.get('external') if isinstance(external, dict): service = external.get('service') if isinstance(service, compat_str): ext_url = None if service.lower() == 'youtube': ext_url = external.get('code') return self.url_result(ext_url or external['uri']) resources_ = player_talk.get('resources') or talk_info.get('resources') http_url = None for format_id, resources in resources_.items(): if format_id == 'hls': if not isinstance(resources, dict): continue stream_url = url_or_none(resources.get('stream')) if not stream_url: continue formats.extend(self._extract_m3u8_formats( stream_url, video_name, 'mp4', m3u8_id=format_id, fatal=False)) else: if not isinstance(resources, list): continue if format_id == 'h264': for resource in resources: h264_url = resource.get('file') if not h264_url: continue bitrate = int_or_none(resource.get('bitrate')) formats.append({ 'url': h264_url, 'format_id': '%s-%sk' % (format_id, bitrate), 'tbr': bitrate, }) if re.search(r'\d+k', h264_url): http_url = h264_url elif format_id == 'rtmp': streamer = talk_info.get('streamer') if not streamer: continue for resource in resources: formats.append({ 'format_id': '%s-%s' % (format_id, resource.get('name')), 'url': streamer, 'play_path': resource['file'], 'ext': 'flv', 'width': int_or_none(resource.get('width')), 'height': int_or_none(resource.get('height')), 'tbr': int_or_none(resource.get('bitrate')), }) m3u8_formats = list(filter( lambda f: f.get('protocol') == 'm3u8' and f.get('vcodec') != 'none', formats)) if http_url: for m3u8_format in m3u8_formats: bitrate = self._search_regex(r'(\d+k)', m3u8_format['url'], 'bitrate', default=None) if not bitrate: continue bitrate_url = re.sub(r'\d+k', bitrate, http_url) if not self._is_valid_url( bitrate_url, video_name, '%s bitrate' % bitrate): continue f = m3u8_format.copy() f.update({ 'url': bitrate_url, 'format_id': m3u8_format['format_id'].replace('hls', 'http'), 'protocol': 'http', }) if f.get('acodec') == 'none': del f['acodec'] formats.append(f) audio_download = talk_info.get('audioDownload') if audio_download: formats.append({ 'url': audio_download, 'format_id': 'audio', 'vcodec': 'none', }) self._sort_formats(formats) video_id = compat_str(talk_info['id']) return { 'id': video_id, 'title': title, 'uploader': player_talk.get('speaker') or talk_info.get('speaker'), 'thumbnail': player_talk.get('thumb') or talk_info.get('thumb'), 'description': self._og_search_description(webpage), 'subtitles': self._get_subtitles(video_id, talk_info), 'formats': formats, 'duration': float_or_none(talk_info.get('duration')), 'view_count': int_or_none(data.get('viewed_count')), 'comment_count': int_or_none( try_get(data, lambda x: x['comments']['count'])), 'tags': try_get(talk_info, lambda x: x['tags'], list), } def _get_subtitles(self, video_id, talk_info): sub_lang_list = {} for language in try_get( talk_info, (lambda x: x['downloads']['languages'], lambda x: x['languages']), list): lang_code = language.get('languageCode') or language.get('ianaCode') if not lang_code: continue sub_lang_list[lang_code] = [ { 'url': 'http://www.ted.com/talks/subtitles/id/%s/lang/%s/format/%s' % (video_id, lang_code, ext), 'ext': ext, } for ext in ['ted', 'srt'] ] return sub_lang_list def _watch_info(self, url, name): webpage = self._download_webpage(url, name) config_json = self._html_search_regex( r'"pages\.jwplayer"\s*,\s*({.+?})\s*\)\s*</script>', webpage, 'config', default=None) if not config_json: embed_url = self._search_regex( r"<iframe[^>]+class='pages-video-embed__video__object'[^>]+src='([^']+)'", webpage, 'embed url') return self.url_result(self._proto_relative_url(embed_url)) config = json.loads(config_json)['config'] video_url = config['video']['url'] thumbnail = config.get('image', {}).get('url') title = self._html_search_regex( r"(?s)<h1(?:\s+class='[^']+')?>(.+?)</h1>", webpage, 'title') description = self._html_search_regex( [ r'(?s)<h4 class="[^"]+" id="h3--about-this-talk">.*?</h4>(.*?)</div>', r'(?s)<p><strong>About this talk:</strong>\s+(.*?)</p>', ], webpage, 'description', fatal=False) return { 'id': name, 'url': video_url, 'title': title, 'thumbnail': thumbnail, 'description': description, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tele13.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import ( js_to_json, qualities, determine_ext, ) class Tele13IE(InfoExtractor): _VALID_URL = r'^https?://(?:www\.)?t13\.cl/videos(?:/[^/]+)+/(?P<id>[\w-]+)' _TESTS = [ { 'url': 'http://www.t13.cl/videos/actualidad/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', 'md5': '4cb1fa38adcad8fea88487a078831755', 'info_dict': { 'id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', 'ext': 'mp4', 'title': 'El círculo de hierro de Michelle Bachelet en su regreso a La Moneda', }, 'params': { # HTTP Error 404: Not Found 'skip_download': True, }, }, { 'url': 'http://www.t13.cl/videos/mundo/tendencias/video-captan-misteriosa-bola-fuego-cielos-bangkok', 'md5': '867adf6a3b3fef932c68a71d70b70946', 'info_dict': { 'id': 'rOoKv2OMpOw', 'ext': 'mp4', 'title': 'Shooting star seen on 7-Sep-2015', 'description': 'md5:7292ff2a34b2f673da77da222ae77e1e', 'uploader': 'Porjai Jaturongkhakun', 'upload_date': '20150906', 'uploader_id': 'UCnLY_3ezwNcDSC_Wc6suZxw', }, 'add_ie': ['Youtube'], } ] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) setup_js = self._search_regex( r"(?s)jwplayer\('player-vivo'\).setup\((\{.*?\})\)", webpage, 'setup code') sources = self._parse_json(self._search_regex( r'sources\s*:\s*(\[[^\]]+\])', setup_js, 'sources'), display_id, js_to_json) preference = qualities(['Móvil', 'SD', 'HD']) formats = [] urls = [] for f in sources: format_url = f['file'] if format_url and format_url not in urls: ext = determine_ext(format_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif YoutubeIE.suitable(format_url): return self.url_result(format_url, 'Youtube') else: formats.append({ 'url': format_url, 'format_id': f.get('label'), 'preference': preference(f.get('label')), 'ext': ext, }) urls.append(format_url) self._sort_formats(formats) return { 'id': display_id, 'title': self._search_regex( r'title\s*:\s*"([^"]+)"', setup_js, 'title'), 'description': self._html_search_meta( 'description', webpage, 'description'), 'thumbnail': self._search_regex( r'image\s*:\s*"([^"]+)"', setup_js, 'thumbnail', default=None), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tele5.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from .nexx import NexxIE from ..compat import compat_urlparse class Tele5IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tele5\.de/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.tele5.de/mediathek/filme-online/videos?vid=1549416', 'info_dict': { 'id': '1549416', 'ext': 'mp4', 'upload_date': '20180814', 'timestamp': 1534290623, 'title': 'Pandorum', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.tele5.de/kalkofes-mattscheibe/video-clips/politik-und-gesellschaft?ve_id=1551191', 'only_matching': True, }, { 'url': 'https://www.tele5.de/video-clip/?ve_id=1609440', 'only_matching': True, }, { 'url': 'https://www.tele5.de/filme/schlefaz-dragon-crusaders/', 'only_matching': True, }, { 'url': 'https://www.tele5.de/filme/making-of/avengers-endgame/', 'only_matching': True, }, { 'url': 'https://www.tele5.de/star-trek/raumschiff-voyager/ganze-folge/das-vinculum/', 'only_matching': True, }, { 'url': 'https://www.tele5.de/anders-ist-sevda/', 'only_matching': True, }] def _real_extract(self, url): qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) video_id = (qs.get('vid') or qs.get('ve_id') or [None])[0] if not video_id: display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._html_search_regex( (r'id\s*=\s*["\']video-player["\'][^>]+data-id\s*=\s*["\'](\d+)', r'\s+id\s*=\s*["\']player_(\d{6,})', r'\bdata-id\s*=\s*["\'](\d{6,})'), webpage, 'video id') return self.url_result( 'https://api.nexx.cloud/v3/759/videos/byid/%s' % video_id, ie=NexxIE.ie_key(), video_id=video_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/telebruxelles.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class TeleBruxellesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:telebruxelles|bx1)\.be/(?:[^/]+/)*(?P<id>[^/#?]+)' _TESTS = [{ 'url': 'http://bx1.be/news/que-risque-lauteur-dune-fausse-alerte-a-la-bombe/', 'md5': 'a2a67a5b1c3e8c9d33109b902f474fd9', 'info_dict': { 'id': '158856', 'display_id': 'que-risque-lauteur-dune-fausse-alerte-a-la-bombe', 'ext': 'mp4', 'title': 'Que risque l’auteur d’une fausse alerte à la bombe ?', 'description': 'md5:3cf8df235d44ebc5426373050840e466', }, }, { 'url': 'http://bx1.be/sport/futsal-schaerbeek-sincline-5-3-a-thulin/', 'md5': 'dfe07ecc9c153ceba8582ac912687675', 'info_dict': { 'id': '158433', 'display_id': 'futsal-schaerbeek-sincline-5-3-a-thulin', 'ext': 'mp4', 'title': 'Futsal : Schaerbeek s’incline 5-3 à Thulin', 'description': 'md5:fd013f1488d5e2dceb9cebe39e2d569b', }, }, { 'url': 'http://bx1.be/emission/bxenf1-gastronomie/', 'only_matching': True, }, { 'url': 'https://bx1.be/berchem-sainte-agathe/personnel-carrefour-de-berchem-sainte-agathe-inquiet/', 'only_matching': True, }, { 'url': 'https://bx1.be/dernier-jt/', 'only_matching': True, }, { # live stream 'url': 'https://bx1.be/lives/direct-tv/', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) article_id = self._html_search_regex( r'<article[^>]+\bid=["\']post-(\d+)', webpage, 'article ID', default=None) title = self._html_search_regex( r'<h1[^>]*>(.+?)</h1>', webpage, 'title', default=None) or self._og_search_title(webpage) description = self._og_search_description(webpage, default=None) rtmp_url = self._html_search_regex( r'file["\']?\s*:\s*"(r(?:tm|mt)ps?://[^/]+/(?:vod/mp4:"\s*\+\s*"[^"]+"\s*\+\s*"\.mp4|stream/live))"', webpage, 'RTMP url') # Yes, they have a typo in scheme name for live stream URLs (e.g. # https://bx1.be/lives/direct-tv/) rtmp_url = re.sub(r'^rmtp', 'rtmp', rtmp_url) rtmp_url = re.sub(r'"\s*\+\s*"', '', rtmp_url) formats = self._extract_wowza_formats(rtmp_url, article_id or display_id) self._sort_formats(formats) is_live = 'stream/live' in rtmp_url return { 'id': article_id or display_id, 'display_id': display_id, 'title': self._live_title(title) if is_live else title, 'description': description, 'formats': formats, 'is_live': is_live, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/telecinco.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from .ooyala import OoyalaIE from ..utils import ( clean_html, determine_ext, int_or_none, str_or_none, urljoin, ) class TelecincoIE(InfoExtractor): IE_DESC = 'telecinco.es, cuatro.com and mediaset.es' _VALID_URL = r'https?://(?:www\.)?(?:telecinco\.es|cuatro\.com|mediaset\.es)/(?:[^/]+/)+(?P<id>.+?)\.html' _TESTS = [{ 'url': 'http://www.telecinco.es/robinfood/temporada-01/t01xp14/Bacalao-cocochas-pil-pil_0_1876350223.html', 'info_dict': { 'id': '1876350223', 'title': 'Bacalao con kokotxas al pil-pil', 'description': 'md5:1382dacd32dd4592d478cbdca458e5bb', }, 'playlist': [{ 'md5': 'adb28c37238b675dad0f042292f209a7', 'info_dict': { 'id': 'JEA5ijCnF6p5W08A1rNKn7', 'ext': 'mp4', 'title': 'Con Martín Berasategui, hacer un bacalao al pil-pil es fácil y divertido', 'duration': 662, }, }] }, { 'url': 'http://www.cuatro.com/deportes/futbol/barcelona/Leo_Messi-Champions-Roma_2_2052780128.html', 'md5': '9468140ebc300fbb8b9d65dc6e5c4b43', 'info_dict': { 'id': 'jn24Od1zGLG4XUZcnUnZB6', 'ext': 'mp4', 'title': '¿Quién es este ex futbolista con el que hablan Leo Messi y Luis Suárez?', 'description': 'md5:a62ecb5f1934fc787107d7b9a2262805', 'duration': 79, }, }, { 'url': 'http://www.mediaset.es/12meses/campanas/doylacara/conlatratanohaytrato/Ayudame-dar-cara-trata-trato_2_1986630220.html', 'md5': 'ae2dc6b7b50b2392076a51c0f70e01f6', 'info_dict': { 'id': 'aywerkD2Sv1vGNqq9b85Q2', 'ext': 'mp4', 'title': '#DOYLACARA. Con la trata no hay trato', 'description': 'md5:2771356ff7bfad9179c5f5cd954f1477', 'duration': 50, }, }, { 'url': 'http://www.telecinco.es/informativos/nacional/Pablo_Iglesias-Informativos_Telecinco-entrevista-Pedro_Piqueras_2_1945155182.html', 'only_matching': True, }, { 'url': 'http://www.telecinco.es/espanasinirmaslejos/Espana-gran-destino-turistico_2_1240605043.html', 'only_matching': True, }, { # ooyala video 'url': 'http://www.cuatro.com/chesterinlove/a-carta/chester-chester_in_love-chester_edu_2_2331030022.html', 'only_matching': True, }] def _parse_content(self, content, url): video_id = content['dataMediaId'] if content.get('dataCmsId') == 'ooyala': return self.url_result( 'ooyala:%s' % video_id, OoyalaIE.ie_key(), video_id) config_url = urljoin(url, content['dataConfig']) config = self._download_json( config_url, video_id, 'Downloading config JSON') title = config['info']['title'] def mmc_url(mmc_type): return re.sub( r'/(?:flash|html5)\.json', '/%s.json' % mmc_type, config['services']['mmc']) duration = None formats = [] for mmc_type in ('flash', 'html5'): mmc = self._download_json( mmc_url(mmc_type), video_id, 'Downloading %s mmc JSON' % mmc_type, fatal=False) if not mmc: continue if not duration: duration = int_or_none(mmc.get('duration')) for location in mmc['locations']: gat = self._proto_relative_url(location.get('gat'), 'http:') gcp = location.get('gcp') ogn = location.get('ogn') if None in (gat, gcp, ogn): continue token_data = { 'gcp': gcp, 'ogn': ogn, 'sta': 0, } media = self._download_json( gat, video_id, data=json.dumps(token_data).encode('utf-8'), headers={ 'Content-Type': 'application/json;charset=utf-8', 'Referer': url, }, fatal=False) or {} stream = media.get('stream') or media.get('file') if not stream: continue ext = determine_ext(stream) if ext == 'f4m': formats.extend(self._extract_f4m_formats( stream + '&hdcore=3.2.0&plugin=aasp-3.2.0.77.18', video_id, f4m_id='hds', fatal=False)) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( stream, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': content.get('dataPoster') or config.get('poster', {}).get('imageUrl'), 'duration': duration, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) article = self._parse_json(self._search_regex( r'window\.\$REACTBASE_STATE\.article\s*=\s*({.+})', webpage, 'article'), display_id)['article'] title = article.get('title') description = clean_html(article.get('leadParagraph')) if article.get('editorialType') != 'VID': entries = [] for p in article.get('body', []): content = p.get('content') if p.get('type') != 'video' or not content: continue entries.append(self._parse_content(content, url)) return self.playlist_result( entries, str_or_none(article.get('id')), title, description) content = article['opening']['content'] info = self._parse_content(content, url) info.update({ 'description': description, }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/telegraaf.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, remove_end, ) class TelegraafIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?telegraaf\.nl/tv/(?:[^/]+/)+(?P<id>\d+)/[^/]+\.html' _TEST = { 'url': 'http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html', 'info_dict': { 'id': '24353229', 'ext': 'mp4', 'title': 'Tikibad ontruimd wegens brand', 'description': 'md5:05ca046ff47b931f9b04855015e163a4', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 33, }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) player_url = self._html_search_regex( r'<iframe[^>]+src="([^"]+")', webpage, 'player URL') player_page = self._download_webpage( player_url, video_id, note='Download player webpage') playlist_url = self._search_regex( r'playlist\s*:\s*"([^"]+)"', player_page, 'playlist URL') playlist_data = self._download_json(playlist_url, video_id) item = playlist_data['items'][0] formats = [] locations = item['locations'] for location in locations.get('adaptive', []): manifest_url = location['src'] ext = determine_ext(manifest_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( manifest_url, video_id, ext='mp4', m3u8_id='hls', fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( manifest_url, video_id, mpd_id='dash', fatal=False)) else: self.report_warning('Unknown adaptive format %s' % ext) for location in locations.get('progressive', []): formats.append({ 'url': location['sources'][0]['src'], 'width': location.get('width'), 'height': location.get('height'), 'format_id': 'http-%s' % location['label'], }) self._sort_formats(formats) title = remove_end(self._og_search_title(webpage), ' - VIDEO') description = self._og_search_description(webpage) duration = item.get('duration') thumbnail = item.get('poster') return { 'id': video_id, 'title': title, 'description': description, 'formats': formats, 'duration': duration, 'thumbnail': thumbnail, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/telemb.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import remove_start class TeleMBIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?telemb\.be/(?P<display_id>.+?)_d_(?P<id>\d+)\.html' _TESTS = [ { 'url': 'http://www.telemb.be/mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-_d_13466.html', 'md5': 'f45ea69878516ba039835794e0f8f783', 'info_dict': { 'id': '13466', 'display_id': 'mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-', 'ext': 'mp4', 'title': 'Mons - Cook with Danielle : des cours de cuisine en anglais ! - Les reportages', 'description': 'md5:bc5225f47b17c309761c856ad4776265', 'thumbnail': r're:^http://.*\.(?:jpg|png)$', } }, { # non-ASCII characters in download URL 'url': 'http://telemb.be/les-reportages-havre-incendie-mortel_d_13514.html', 'md5': '6e9682736e5ccd4eab7f21e855350733', 'info_dict': { 'id': '13514', 'display_id': 'les-reportages-havre-incendie-mortel', 'ext': 'mp4', 'title': 'Havré - Incendie mortel - Les reportages', 'description': 'md5:5e54cb449acb029c2b7734e2d946bd4a', 'thumbnail': r're:^http://.*\.(?:jpg|png)$', } }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') webpage = self._download_webpage(url, display_id) formats = [] for video_url in re.findall(r'file\s*:\s*"([^"]+)"', webpage): fmt = { 'url': video_url, 'format_id': video_url.split(':')[0] } rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', video_url) if rtmp: fmt.update({ 'play_path': rtmp.group('playpath'), 'app': rtmp.group('app'), 'player_url': 'http://p.jwpcdn.com/6/10/jwplayer.flash.swf', 'page_url': 'http://www.telemb.be', 'preference': -1, }) formats.append(fmt) self._sort_formats(formats) title = remove_start(self._og_search_title(webpage), 'TéléMB : ') description = self._html_search_regex( r'<meta property="og:description" content="(.+?)" />', webpage, 'description', fatal=False) thumbnail = self._og_search_thumbnail(webpage) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/telequebec.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, smuggle_url, try_get, unified_timestamp, ) class TeleQuebecBaseIE(InfoExtractor): @staticmethod def _limelight_result(media_id): return { '_type': 'url_transparent', 'url': smuggle_url( 'limelight:media:' + media_id, {'geo_countries': ['CA']}), 'ie_key': 'LimelightMedia', } class TeleQuebecIE(TeleQuebecBaseIE): _VALID_URL = r'''(?x) https?:// (?: zonevideo\.telequebec\.tv/media| coucou\.telequebec\.tv/videos )/(?P<id>\d+) ''' _TESTS = [{ # available till 01.01.2023 'url': 'http://zonevideo.telequebec.tv/media/37578/un-petit-choc-et-puis-repart/un-chef-a-la-cabane', 'info_dict': { 'id': '577116881b4b439084e6b1cf4ef8b1b3', 'ext': 'mp4', 'title': 'Un petit choc et puis repart!', 'description': 'md5:b04a7e6b3f74e32d7b294cffe8658374', 'upload_date': '20180222', 'timestamp': 1519326631, }, 'params': { 'skip_download': True, }, }, { # no description 'url': 'http://zonevideo.telequebec.tv/media/30261', 'only_matching': True, }, { 'url': 'https://coucou.telequebec.tv/videos/41788/idee-de-genie/l-heure-du-bain', 'only_matching': True, }] def _real_extract(self, url): media_id = self._match_id(url) media_data = self._download_json( 'https://mnmedias.api.telequebec.tv/api/v2/media/' + media_id, media_id)['media'] info = self._limelight_result(media_data['streamInfo']['sourceId']) info.update({ 'title': media_data.get('title'), 'description': try_get( media_data, lambda x: x['descriptions'][0]['text'], compat_str), 'duration': int_or_none( media_data.get('durationInMilliseconds'), 1000), }) return info class TeleQuebecSquatIE(InfoExtractor): _VALID_URL = r'https://squat\.telequebec\.tv/videos/(?P<id>\d+)' _TESTS = [{ 'url': 'https://squat.telequebec.tv/videos/9314', 'info_dict': { 'id': 'd59ae78112d542e793d83cc9d3a5b530', 'ext': 'mp4', 'title': 'Poupeflekta', 'description': 'md5:2f0718f8d2f8fece1646ee25fb7bce75', 'duration': 1351, 'timestamp': 1569057600, 'upload_date': '20190921', 'series': 'Miraculous : Les Aventures de Ladybug et Chat Noir', 'season': 'Saison 3', 'season_number': 3, 'episode_number': 57, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'https://squat.api.telequebec.tv/v1/videos/%s' % video_id, video_id) media_id = video['sourceId'] return { '_type': 'url_transparent', 'url': 'http://zonevideo.telequebec.tv/media/%s' % media_id, 'ie_key': TeleQuebecIE.ie_key(), 'id': media_id, 'title': video.get('titre'), 'description': video.get('description'), 'timestamp': unified_timestamp(video.get('datePublication')), 'series': video.get('container'), 'season': video.get('saison'), 'season_number': int_or_none(video.get('noSaison')), 'episode_number': int_or_none(video.get('episode')), } class TeleQuebecEmissionIE(TeleQuebecBaseIE): _VALID_URL = r'''(?x) https?:// (?: [^/]+\.telequebec\.tv/emissions/| (?:www\.)?telequebec\.tv/ ) (?P<id>[^?#&]+) ''' _TESTS = [{ 'url': 'http://lindicemcsween.telequebec.tv/emissions/100430013/des-soins-esthetiques-a-377-d-interets-annuels-ca-vous-tente', 'info_dict': { 'id': '66648a6aef914fe3badda25e81a4d50a', 'ext': 'mp4', 'title': "Des soins esthétiques à 377 % d'intérêts annuels, ça vous tente?", 'description': 'md5:369e0d55d0083f1fc9b71ffb640ea014', 'upload_date': '20171024', 'timestamp': 1508862118, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://bancpublic.telequebec.tv/emissions/emission-49/31986/jeunes-meres-sous-pression', 'only_matching': True, }, { 'url': 'http://www.telequebec.tv/masha-et-michka/epi059masha-et-michka-3-053-078', 'only_matching': True, }, { 'url': 'http://www.telequebec.tv/documentaire/bebes-sur-mesure/', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) media_id = self._search_regex( r'mediaUID\s*:\s*["\'][Ll]imelight_(?P<id>[a-z0-9]{32})', webpage, 'limelight id') info = self._limelight_result(media_id) info.update({ 'title': self._og_search_title(webpage, default=None), 'description': self._og_search_description(webpage, default=None), }) return info class TeleQuebecLiveIE(InfoExtractor): _VALID_URL = r'https?://zonevideo\.telequebec\.tv/(?P<id>endirect)' _TEST = { 'url': 'http://zonevideo.telequebec.tv/endirect/', 'info_dict': { 'id': 'endirect', 'ext': 'mp4', 'title': 're:^Télé-Québec - En direct [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, }, 'params': { 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) m3u8_url = None webpage = self._download_webpage( 'https://player.telequebec.tv/Tq_VideoPlayer.js', video_id, fatal=False) if webpage: m3u8_url = self._search_regex( r'm3U8Url\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'm3u8 url', default=None, group='url') if not m3u8_url: m3u8_url = 'https://teleqmmd.mmdlive.lldns.net/teleqmmd/f386e3b206814e1f8c8c1c71c0f8e748/manifest.m3u8' formats = self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', m3u8_id='hls') self._sort_formats(formats) return { 'id': video_id, 'title': self._live_title('Télé-Québec - En direct'), 'is_live': True, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/teletask.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import unified_strdate class TeleTaskIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tele-task\.de/archive/video/html5/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.tele-task.de/archive/video/html5/26168/', 'info_dict': { 'id': '26168', 'title': 'Duplicate Detection', }, 'playlist': [{ 'md5': '290ef69fb2792e481169c3958dbfbd57', 'info_dict': { 'id': '26168-speaker', 'ext': 'mp4', 'title': 'Duplicate Detection', 'upload_date': '20141218', } }, { 'md5': 'e1e7218c5f0e4790015a437fcf6c71b4', 'info_dict': { 'id': '26168-slides', 'ext': 'mp4', 'title': 'Duplicate Detection', 'upload_date': '20141218', } }] } def _real_extract(self, url): lecture_id = self._match_id(url) webpage = self._download_webpage(url, lecture_id) title = self._html_search_regex( r'itemprop="name">([^<]+)</a>', webpage, 'title') upload_date = unified_strdate(self._html_search_regex( r'Date:</td><td>([^<]+)</td>', webpage, 'date', fatal=False)) entries = [{ 'id': '%s-%s' % (lecture_id, format_id), 'url': video_url, 'title': title, 'upload_date': upload_date, } for format_id, video_url in re.findall( r'<video class="([^"]+)"[^>]*>\s*<source src="([^"]+)"', webpage)] return self.playlist_result(entries, lecture_id, title)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/telewebion.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class TelewebionIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?telewebion\.com/#!/episode/(?P<id>\d+)' _TEST = { 'url': 'http://www.telewebion.com/#!/episode/1263668/', 'info_dict': { 'id': '1263668', 'ext': 'mp4', 'title': 'قرعه\u200cکشی لیگ قهرمانان اروپا', 'thumbnail': r're:^https?://.*\.jpg', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) secure_token = self._download_webpage( 'http://m.s2.telewebion.com/op/op?action=getSecurityToken', video_id) episode_details = self._download_json( 'http://m.s2.telewebion.com/op/op', video_id, query={'action': 'getEpisodeDetails', 'episode_id': video_id}) m3u8_url = 'http://m.s1.telewebion.com/smil/%s.m3u8?filepath=%s&m3u8=1&secure_token=%s' % ( video_id, episode_details['file_path'], secure_token) formats = self._extract_m3u8_formats( m3u8_url, video_id, ext='mp4', m3u8_id='hls') picture_paths = [ episode_details.get('picture_path'), episode_details.get('large_picture_path'), ] thumbnails = [{ 'url': picture_path, 'preference': idx, } for idx, picture_path in enumerate(picture_paths) if picture_path is not None] return { 'id': video_id, 'title': episode_details['title'], 'formats': formats, 'thumbnails': thumbnails, 'view_count': episode_details.get('view_count'), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tennistv.py
# coding: utf-8 from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import ( ExtractorError, unified_timestamp, ) class TennisTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tennistv\.com/videos/(?P<id>[-a-z0-9]+)' _TEST = { 'url': 'https://www.tennistv.com/videos/indian-wells-2018-verdasco-fritz', 'info_dict': { 'id': 'indian-wells-2018-verdasco-fritz', 'ext': 'mp4', 'title': 'Fernando Verdasco v Taylor Fritz', 'description': 're:^After his stunning victory.{174}$', 'thumbnail': 'https://atp-prod.akamaized.net/api/images/v1/images/112831/landscape/1242/0', 'timestamp': 1521017381, 'upload_date': '20180314', }, 'params': { 'skip_download': True, }, 'skip': 'Requires email and password of a subscribed account', } _NETRC_MACHINE = 'tennistv' def _login(self): username, password = self._get_login_info() if not username or not password: raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True) login_form = { 'Email': username, 'Password': password, } login_json = json.dumps(login_form).encode('utf-8') headers = { 'content-type': 'application/json', 'Referer': 'https://www.tennistv.com/login', 'Origin': 'https://www.tennistv.com', } login_result = self._download_json( 'https://www.tennistv.com/api/users/v1/login', None, note='Logging in', errnote='Login failed (wrong password?)', headers=headers, data=login_json) if login_result['error']['errorCode']: raise ExtractorError('Login failed, %s said: %r' % (self.IE_NAME, login_result['error']['errorMessage'])) if login_result['entitlement'] != 'SUBSCRIBED': self.report_warning('%s may not be subscribed to %s.' % (username, self.IE_NAME)) self._session_token = login_result['sessionToken'] def _real_initialize(self): self._login() def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) internal_id = self._search_regex(r'video=([0-9]+)', webpage, 'internal video id') headers = { 'Origin': 'https://www.tennistv.com', 'authorization': 'ATP %s' % self._session_token, 'content-type': 'application/json', 'Referer': url, } check_data = { 'videoID': internal_id, 'VideoUrlType': 'HLSV3', } check_json = json.dumps(check_data).encode('utf-8') check_result = self._download_json( 'https://www.tennistv.com/api/users/v1/entitlementchecknondiva', video_id, note='Checking video authorization', headers=headers, data=check_json) formats = self._extract_m3u8_formats(check_result['contentUrl'], video_id, ext='mp4') vdata_url = 'https://www.tennistv.com/api/channels/v1/de/none/video/%s' % video_id vdata = self._download_json(vdata_url, video_id) timestamp = unified_timestamp(vdata['timestamp']) thumbnail = vdata['video']['thumbnailUrl'] description = vdata['displayText']['description'] title = vdata['video']['title'] series = vdata['tour'] venue = vdata['displayText']['venue'] round_str = vdata['seo']['round'] return { 'id': video_id, 'title': title, 'description': description, 'formats': formats, 'thumbnail': thumbnail, 'timestamp': timestamp, 'series': series, 'season': venue, 'episode': round_str, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tenplay.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( parse_age_limit, parse_iso8601, smuggle_url, ) class TenPlayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?10play\.com\.au/[^/]+/episodes/[^/]+/[^/]+/(?P<id>tpv\d{6}[a-z]{5})' _TEST = { 'url': 'https://10play.com.au/masterchef/episodes/season-1/masterchef-s1-ep-1/tpv190718kwzga', 'info_dict': { 'id': '6060533435001', 'ext': 'mp4', 'title': 'MasterChef - S1 Ep. 1', 'description': 'md5:4fe7b78e28af8f2d900cd20d900ef95c', 'age_limit': 10, 'timestamp': 1240828200, 'upload_date': '20090427', 'uploader_id': '2199827728001', }, 'params': { 'format': 'bestvideo', 'skip_download': True, } } BRIGHTCOVE_URL_TEMPLATE = 'https://players.brightcove.net/2199827728001/cN6vRtRQt_default/index.html?videoId=%s' def _real_extract(self, url): content_id = self._match_id(url) data = self._download_json( 'https://10play.com.au/api/video/' + content_id, content_id) video = data.get('video') or {} metadata = data.get('metaData') or {} brightcove_id = video.get('videoId') or metadata['showContentVideoId'] brightcove_url = smuggle_url( self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, {'geo_countries': ['AU']}) return { '_type': 'url_transparent', 'url': brightcove_url, 'id': content_id, 'title': video.get('title') or metadata.get('pageContentName') or metadata.get('showContentName'), 'description': video.get('description'), 'age_limit': parse_age_limit(video.get('showRatingClassification') or metadata.get('showProgramClassification')), 'series': metadata.get('showName'), 'season': metadata.get('showContentSeason'), 'timestamp': parse_iso8601(metadata.get('contentPublishDate') or metadata.get('pageContentPublishDate')), 'ie_key': 'BrightcoveNew', }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/testurl.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError class TestURLIE(InfoExtractor): """ Allows addressing of the test cases as test:yout.*be_1 """ IE_DESC = False # Do not list _VALID_URL = r'test(?:url)?:(?P<id>(?P<extractor>.+?)(?:_(?P<num>[0-9]+))?)$' def _real_extract(self, url): from ..extractor import gen_extractors mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') extractor_id = mobj.group('extractor') all_extractors = gen_extractors() rex = re.compile(extractor_id, flags=re.IGNORECASE) matching_extractors = [ e for e in all_extractors if rex.search(e.IE_NAME)] if len(matching_extractors) == 0: raise ExtractorError( 'No extractors matching %r found' % extractor_id, expected=True) elif len(matching_extractors) > 1: # Is it obvious which one to pick? try: extractor = next( ie for ie in matching_extractors if ie.IE_NAME.lower() == extractor_id.lower()) except StopIteration: raise ExtractorError( ('Found multiple matching extractors: %s' % ' '.join(ie.IE_NAME for ie in matching_extractors)), expected=True) else: extractor = matching_extractors[0] num_str = mobj.group('num') num = int(num_str) if num_str else 0 testcases = [] t = getattr(extractor, '_TEST', None) if t: testcases.append(t) testcases.extend(getattr(extractor, '_TESTS', [])) try: tc = testcases[num] except IndexError: raise ExtractorError( ('Test case %d not found, got only %d tests' % (num, len(testcases))), expected=True) self.to_screen('Test URL: %s' % tc['url']) return self.url_result(tc['url'], video_id=video_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tf1.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str class TF1IE(InfoExtractor): """TF1 uses the wat.tv player.""" _VALID_URL = r'https?://(?:(?:videos|www|lci)\.tf1|(?:www\.)?(?:tfou|ushuaiatv|histoire|tvbreizh))\.fr/(?:[^/]+/)*(?P<id>[^/?#.]+)' _TESTS = [{ 'url': 'http://videos.tf1.fr/auto-moto/citroen-grand-c4-picasso-2013-presentation-officielle-8062060.html', 'info_dict': { 'id': '10635995', 'ext': 'mp4', 'title': 'Citroën Grand C4 Picasso 2013 : présentation officielle', 'description': 'Vidéo officielle du nouveau Citroën Grand C4 Picasso, lancé à l\'automne 2013.', }, 'params': { # Sometimes wat serves the whole file with the --test option 'skip_download': True, }, 'expected_warnings': ['HTTP Error 404'], }, { 'url': 'http://www.tfou.fr/chuggington/videos/le-grand-mysterioso-chuggington-7085291-739.html', 'info_dict': { 'id': 'le-grand-mysterioso-chuggington-7085291-739', 'ext': 'mp4', 'title': 'Le grand Mystérioso - Chuggington', 'description': 'Le grand Mystérioso - Emery rêve qu\'un article lui soit consacré dans le journal.', 'upload_date': '20150103', }, 'params': { # Sometimes wat serves the whole file with the --test option 'skip_download': True, }, 'skip': 'HTTP Error 410: Gone', }, { 'url': 'http://www.tf1.fr/tf1/koh-lanta/videos/replay-koh-lanta-22-mai-2015.html', 'only_matching': True, }, { 'url': 'http://lci.tf1.fr/sept-a-huit/videos/sept-a-huit-du-24-mai-2015-8611550.html', 'only_matching': True, }, { 'url': 'http://www.tf1.fr/hd1/documentaire/videos/mylene-farmer-d-une-icone.html', 'only_matching': True, }, { 'url': 'https://www.tf1.fr/tmc/quotidien-avec-yann-barthes/videos/quotidien-premiere-partie-11-juin-2019.html', 'info_dict': { 'id': '13641379', 'ext': 'mp4', 'title': 'md5:f392bc52245dc5ad43771650c96fb620', 'description': 'md5:44bc54f0a21322f5b91d68e76a544eae', 'upload_date': '20190611', }, 'params': { # Sometimes wat serves the whole file with the --test option 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) wat_id = None data = self._parse_json( self._search_regex( r'__APOLLO_STATE__\s*=\s*({.+?})\s*(?:;|</script>)', webpage, 'data', default='{}'), video_id, fatal=False) if data: try: wat_id = next( video.get('streamId') for key, video in data.items() if isinstance(video, dict) and video.get('slug') == video_id) if not isinstance(wat_id, compat_str) or not wat_id.isdigit(): wat_id = None except StopIteration: pass if not wat_id: wat_id = self._html_search_regex( (r'(["\'])(?:https?:)?//www\.wat\.tv/embedframe/.*?(?P<id>\d{8})\1', r'(["\']?)streamId\1\s*:\s*(["\']?)(?P<id>\d+)\2'), webpage, 'wat id', group='id') return self.url_result('wat:%s' % wat_id, 'Wat')
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tfo.py
# coding: utf-8 from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import ( HEADRequest, ExtractorError, int_or_none, clean_html, ) class TFOIE(InfoExtractor): _GEO_COUNTRIES = ['CA'] _VALID_URL = r'https?://(?:www\.)?tfo\.org/(?:en|fr)/(?:[^/]+/){2}(?P<id>\d+)' _TEST = { 'url': 'http://www.tfo.org/en/universe/tfo-247/100463871/video-game-hackathon', 'md5': '47c987d0515561114cf03d1226a9d4c7', 'info_dict': { 'id': '100463871', 'ext': 'mp4', 'title': 'Video Game Hackathon', 'description': 'md5:558afeba217c6c8d96c60e5421795c07', 'upload_date': '20160212', 'timestamp': 1455310233, } } def _real_extract(self, url): video_id = self._match_id(url) self._request_webpage(HEADRequest('http://www.tfo.org/'), video_id) infos = self._download_json( 'http://www.tfo.org/api/web/video/get_infos', video_id, data=json.dumps({ 'product_id': video_id, }).encode(), headers={ 'X-tfo-session': self._get_cookies('http://www.tfo.org/')['tfo-session'].value, }) if infos.get('success') == 0: if infos.get('code') == 'ErrGeoBlocked': self.raise_geo_restricted(countries=self._GEO_COUNTRIES) raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(infos['msg'])), expected=True) video_data = infos['data'] return { '_type': 'url_transparent', 'id': video_id, 'url': 'limelight:media:' + video_data['llid'], 'title': video_data['title'], 'description': video_data.get('description'), 'series': video_data.get('collection'), 'season_number': int_or_none(video_data.get('season')), 'episode_number': int_or_none(video_data.get('episode')), 'duration': int_or_none(video_data.get('duration')), 'ie_key': 'LimelightMedia', }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/theintercept.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( parse_iso8601, int_or_none, ExtractorError, ) class TheInterceptIE(InfoExtractor): _VALID_URL = r'https?://theintercept\.com/fieldofvision/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://theintercept.com/fieldofvision/thisisacoup-episode-four-surrender-or-die/', 'md5': '145f28b41d44aab2f87c0a4ac8ec95bd', 'info_dict': { 'id': '46214', 'ext': 'mp4', 'title': '#ThisIsACoup – Episode Four: Surrender or Die', 'description': 'md5:74dd27f0e2fbd50817829f97eaa33140', 'timestamp': 1450429239, 'upload_date': '20151218', 'comment_count': int, } }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) json_data = self._parse_json(self._search_regex( r'initialStoreTree\s*=\s*(?P<json_data>{.+})', webpage, 'initialStoreTree'), display_id) for post in json_data['resources']['posts'].values(): if post['slug'] == display_id: return { '_type': 'url_transparent', 'url': 'jwplatform:%s' % post['fov_videoid'], 'id': compat_str(post['ID']), 'display_id': display_id, 'title': post['title'], 'description': post.get('excerpt'), 'timestamp': parse_iso8601(post.get('date')), 'comment_count': int_or_none(post.get('comments_number')), } raise ExtractorError('Unable to find the current post')
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/theplatform.py
# coding: utf-8 from __future__ import unicode_literals import re import time import hmac import binascii import hashlib from .once import OnceIE from .adobepass import AdobePassIE from ..compat import ( compat_parse_qs, compat_urllib_parse_urlparse, ) from ..utils import ( determine_ext, ExtractorError, float_or_none, int_or_none, sanitized_Request, unsmuggle_url, update_url_query, xpath_with_ns, mimetype2ext, find_xpath_attr, ) default_ns = 'http://www.w3.org/2005/SMIL21/Language' _x = lambda p: xpath_with_ns(p, {'smil': default_ns}) class ThePlatformBaseIE(OnceIE): _TP_TLD = 'com' def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'): meta = self._download_xml( smil_url, video_id, note=note, query={'format': 'SMIL'}, headers=self.geo_verification_headers()) error_element = find_xpath_attr(meta, _x('.//smil:ref'), 'src') if error_element is not None: exception = find_xpath_attr( error_element, _x('.//smil:param'), 'name', 'exception') if exception is not None: if exception.get('value') == 'GeoLocationBlocked': self.raise_geo_restricted(error_element.attrib['abstract']) elif error_element.attrib['src'].startswith( 'http://link.theplatform.%s/s/errorFiles/Unavailable.' % self._TP_TLD): raise ExtractorError( error_element.attrib['abstract'], expected=True) smil_formats = self._parse_smil_formats( meta, smil_url, video_id, namespace=default_ns, # the parameters are from syfy.com, other sites may use others, # they also work for nbc.com f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'}, transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src)) formats = [] for _format in smil_formats: if OnceIE.suitable(_format['url']): formats.extend(self._extract_once_formats(_format['url'])) else: media_url = _format['url'] if determine_ext(media_url) == 'm3u8': hdnea2 = self._get_cookies(media_url).get('hdnea2') if hdnea2: _format['url'] = update_url_query(media_url, {'hdnea3': hdnea2.value}) formats.append(_format) subtitles = self._parse_smil_subtitles(meta, default_ns) return formats, subtitles def _download_theplatform_metadata(self, path, video_id): info_url = 'http://link.theplatform.%s/s/%s?format=preview' % (self._TP_TLD, path) return self._download_json(info_url, video_id) def _parse_theplatform_metadata(self, info): subtitles = {} captions = info.get('captions') if isinstance(captions, list): for caption in captions: lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type') subtitles.setdefault(lang, []).append({ 'ext': mimetype2ext(mime), 'url': src, }) duration = info.get('duration') tp_chapters = info.get('chapters', []) chapters = [] if tp_chapters: def _add_chapter(start_time, end_time): start_time = float_or_none(start_time, 1000) end_time = float_or_none(end_time, 1000) if start_time is None or end_time is None: return chapters.append({ 'start_time': start_time, 'end_time': end_time, }) for chapter in tp_chapters[:-1]: _add_chapter(chapter.get('startTime'), chapter.get('endTime')) _add_chapter(tp_chapters[-1].get('startTime'), tp_chapters[-1].get('endTime') or duration) return { 'title': info['title'], 'subtitles': subtitles, 'description': info['description'], 'thumbnail': info['defaultThumbnailUrl'], 'duration': float_or_none(duration, 1000), 'timestamp': int_or_none(info.get('pubDate'), 1000) or None, 'uploader': info.get('billingCode'), 'chapters': chapters, } def _extract_theplatform_metadata(self, path, video_id): info = self._download_theplatform_metadata(path, video_id) return self._parse_theplatform_metadata(info) class ThePlatformIE(ThePlatformBaseIE, AdobePassIE): _VALID_URL = r'''(?x) (?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/ (?:(?:(?:[^/]+/)+select/)?(?P<media>media/(?:guid/\d+/)?)?|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))? |theplatform:)(?P<id>[^/\?&]+)''' _TESTS = [{ # from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/ 'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true', 'info_dict': { 'id': 'e9I_cZgTgIPd', 'ext': 'flv', 'title': 'Blackberry\'s big, bold Z30', 'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.', 'duration': 247, 'timestamp': 1383239700, 'upload_date': '20131031', 'uploader': 'CBSI-NEW', }, 'params': { # rtmp download 'skip_download': True, }, 'skip': '404 Not Found', }, { # from http://www.cnet.com/videos/tesla-model-s-a-second-step-towards-a-cleaner-motoring-future/ 'url': 'http://link.theplatform.com/s/kYEXFC/22d_qsQ6MIRT', 'info_dict': { 'id': '22d_qsQ6MIRT', 'ext': 'flv', 'description': 'md5:ac330c9258c04f9d7512cf26b9595409', 'title': 'Tesla Model S: A second step towards a cleaner motoring future', 'timestamp': 1426176191, 'upload_date': '20150312', 'uploader': 'CBSI-NEW', }, 'params': { # rtmp download 'skip_download': True, } }, { 'url': 'https://player.theplatform.com/p/D6x-PC/pulse_preview/embed/select/media/yMBg9E8KFxZD', 'info_dict': { 'id': 'yMBg9E8KFxZD', 'ext': 'mp4', 'description': 'md5:644ad9188d655b742f942bf2e06b002d', 'title': 'HIGHLIGHTS: USA bag first ever series Cup win', 'uploader': 'EGSM', } }, { 'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7', 'only_matching': True, }, { 'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701', 'md5': 'fb96bb3d85118930a5b055783a3bd992', 'info_dict': { 'id': 'tdy_or_siri_150701', 'ext': 'mp4', 'title': 'iPhone Siri’s sassy response to a math question has people talking', 'description': 'md5:a565d1deadd5086f3331d57298ec6333', 'duration': 83.0, 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1435752600, 'upload_date': '20150701', 'uploader': 'NBCU-NEWS', }, }, { # From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1 # geo-restricted (US), HLS encrypted with AES-128 'url': 'http://player.theplatform.com/p/NnzsPC/onsite_universal/select/media/guid/2410887629/2928790?fwsitesection=nbc_the_blacklist_video_library&autoPlay=true&carouselID=137781', 'only_matching': True, }] @classmethod def _extract_urls(cls, webpage): m = re.search( r'''(?x) <meta\s+ property=(["'])(?:og:video(?::(?:secure_)?url)?|twitter:player)\1\s+ content=(["'])(?P<url>https?://player\.theplatform\.com/p/.+?)\2 ''', webpage) if m: return [m.group('url')] # Are whitesapces ignored in URLs? # https://github.com/ytdl-org/youtube-dl/issues/12044 matches = re.findall( r'(?s)<(?:iframe|script)[^>]+src=(["\'])((?:https?:)?//player\.theplatform\.com/p/.+?)\1', webpage) if matches: return [re.sub(r'\s', '', list(zip(*matches))[1][0])] @staticmethod def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False): flags = '10' if include_qs else '00' expiration_date = '%x' % (int(time.time()) + life) def str_to_hex(str): return binascii.b2a_hex(str.encode('ascii')).decode('ascii') def hex_to_bytes(hex): return binascii.a2b_hex(hex.encode('ascii')) relative_path = re.match(r'https?://link\.theplatform\.com/s/([^?]+)', url).group(1) clear_text = hex_to_bytes(flags + expiration_date + str_to_hex(relative_path)) checksum = hmac.new(sig_key.encode('ascii'), clear_text, hashlib.sha1).hexdigest() sig = flags + expiration_date + checksum + str_to_hex(sig_secret) return '%s&sig=%s' % (url, sig) def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) mobj = re.match(self._VALID_URL, url) provider_id = mobj.group('provider_id') video_id = mobj.group('id') if not provider_id: provider_id = 'dJ5BDC' path = provider_id + '/' if mobj.group('media'): path += mobj.group('media') path += video_id qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query) if 'guid' in qs_dict: webpage = self._download_webpage(url, video_id) scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage) feed_id = None # feed id usually locates in the last script. # Seems there's no pattern for the interested script filename, so # I try one by one for script in reversed(scripts): feed_script = self._download_webpage( self._proto_relative_url(script, 'http:'), video_id, 'Downloading feed script') feed_id = self._search_regex( r'defaultFeedId\s*:\s*"([^"]+)"', feed_script, 'default feed id', default=None) if feed_id is not None: break if feed_id is None: raise ExtractorError('Unable to find feed id') return self.url_result('http://feed.theplatform.com/f/%s/%s?byGuid=%s' % ( provider_id, feed_id, qs_dict['guid'][0])) if smuggled_data.get('force_smil_url', False): smil_url = url # Explicitly specified SMIL (see https://github.com/ytdl-org/youtube-dl/issues/7385) elif '/guid/' in url: headers = {} source_url = smuggled_data.get('source_url') if source_url: headers['Referer'] = source_url request = sanitized_Request(url, headers=headers) webpage = self._download_webpage(request, video_id) smil_url = self._search_regex( r'<link[^>]+href=(["\'])(?P<url>.+?)\1[^>]+type=["\']application/smil\+xml', webpage, 'smil url', group='url') path = self._search_regex( r'link\.theplatform\.com/s/((?:[^/?#&]+/)+[^/?#&]+)', smil_url, 'path') smil_url += '?' if '?' not in smil_url else '&' + 'formats=m3u,mpeg4' elif mobj.group('config'): config_url = url + '&form=json' config_url = config_url.replace('swf/', 'config/') config_url = config_url.replace('onsite/', 'onsite/config/') config = self._download_json(config_url, video_id, 'Downloading config') if 'releaseUrl' in config: release_url = config['releaseUrl'] else: release_url = 'http://link.theplatform.com/s/%s?mbr=true' % path smil_url = release_url + '&formats=MPEG4&manifest=f4m' else: smil_url = 'http://link.theplatform.com/s/%s?mbr=true' % path sig = smuggled_data.get('sig') if sig: smil_url = self._sign_url(smil_url, sig['key'], sig['secret']) formats, subtitles = self._extract_theplatform_smil(smil_url, video_id) self._sort_formats(formats) ret = self._extract_theplatform_metadata(path, video_id) combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles) ret.update({ 'id': video_id, 'formats': formats, 'subtitles': combined_subtitles, }) return ret class ThePlatformFeedIE(ThePlatformBaseIE): _URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&%s' _VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*(?P<filter>by(?:Gui|I)d=(?P<id>[^&]+))' _TESTS = [{ # From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207 'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207', 'md5': '6e32495b5073ab414471b615c5ded394', 'info_dict': { 'id': 'n_hardball_5biden_140207', 'ext': 'mp4', 'title': 'The Biden factor: will Joe run in 2016?', 'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20140208', 'timestamp': 1391824260, 'duration': 467.0, 'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'], 'uploader': 'NBCU-NEWS', }, }, { 'url': 'http://feed.theplatform.com/f/2E2eJC/nnd_NBCNews?byGuid=nn_netcast_180306.Copy.01', 'only_matching': True, }] def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custom_fields=None, asset_types_query={}, account_id=None): real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, filter_query) entry = self._download_json(real_url, video_id)['entries'][0] main_smil_url = 'http://link.theplatform.com/s/%s/media/guid/%d/%s' % (provider_id, account_id, entry['guid']) if account_id else entry.get('plmedia$publicUrl') formats = [] subtitles = {} first_video_id = None duration = None asset_types = [] for item in entry['media$content']: smil_url = item['plfile$url'] cur_video_id = ThePlatformIE._match_id(smil_url) if first_video_id is None: first_video_id = cur_video_id duration = float_or_none(item.get('plfile$duration')) file_asset_types = item.get('plfile$assetTypes') or compat_parse_qs(compat_urllib_parse_urlparse(smil_url).query)['assetTypes'] for asset_type in file_asset_types: if asset_type in asset_types: continue asset_types.append(asset_type) query = { 'mbr': 'true', 'formats': item['plfile$format'], 'assetTypes': asset_type, } if asset_type in asset_types_query: query.update(asset_types_query[asset_type]) cur_formats, cur_subtitles = self._extract_theplatform_smil(update_url_query( main_smil_url or smil_url, query), video_id, 'Downloading SMIL data for %s' % asset_type) formats.extend(cur_formats) subtitles = self._merge_subtitles(subtitles, cur_subtitles) self._sort_formats(formats) thumbnails = [{ 'url': thumbnail['plfile$url'], 'width': int_or_none(thumbnail.get('plfile$width')), 'height': int_or_none(thumbnail.get('plfile$height')), } for thumbnail in entry.get('media$thumbnails', [])] timestamp = int_or_none(entry.get('media$availableDate'), scale=1000) categories = [item['media$name'] for item in entry.get('media$categories', [])] ret = self._extract_theplatform_metadata('%s/%s' % (provider_id, first_video_id), video_id) subtitles = self._merge_subtitles(subtitles, ret['subtitles']) ret.update({ 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, 'duration': duration, 'timestamp': timestamp, 'categories': categories, }) if custom_fields: ret.update(custom_fields(entry)) return ret def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') provider_id = mobj.group('provider_id') feed_id = mobj.group('feed_id') filter_query = mobj.group('filter') return self._extract_feed_info(provider_id, feed_id, filter_query, video_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/thescene.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urlparse class TheSceneIE(InfoExtractor): _VALID_URL = r'https?://thescene\.com/watch/[^/]+/(?P<id>[^/#?]+)' _TEST = { 'url': 'https://thescene.com/watch/vogue/narciso-rodriguez-spring-2013-ready-to-wear', 'info_dict': { 'id': '520e8faac2b4c00e3c6e5f43', 'ext': 'mp4', 'title': 'Narciso Rodriguez: Spring 2013 Ready-to-Wear', 'display_id': 'narciso-rodriguez-spring-2013-ready-to-wear', 'duration': 127, 'series': 'Style.com Fashion Shows', 'season': 'Ready To Wear Spring 2013', 'tags': list, 'categories': list, 'upload_date': '20120913', 'timestamp': 1347512400, 'uploader': 'vogue', }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) player_url = compat_urlparse.urljoin( url, self._html_search_regex( r'id=\'js-player-script\'[^>]+src=\'(.+?)\'', webpage, 'player url')) return { '_type': 'url_transparent', 'display_id': display_id, 'url': player_url, 'ie_key': 'CondeNast', }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/thestar.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class TheStarIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?thestar\.com/(?:[^/]+/)*(?P<id>.+)\.html' _TEST = { 'url': 'http://www.thestar.com/life/2016/02/01/mankind-why-this-woman-started-a-men-s-skincare-line.html', 'md5': '2c62dd4db2027e35579fefb97a8b6554', 'info_dict': { 'id': '4732393888001', 'ext': 'mp4', 'title': 'Mankind: Why this woman started a men\'s skin care line', 'description': 'Robert Cribb talks to Young Lee, the founder of Uncle Peter\'s MAN.', 'uploader_id': '794267642001', 'timestamp': 1454353482, 'upload_date': '20160201', }, 'params': { # m3u8 download 'skip_download': True, } } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/794267642001/default_default/index.html?videoId=%s' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) brightcove_id = self._search_regex( r'mainartBrightcoveVideoId["\']?\s*:\s*["\']?(\d+)', webpage, 'brightcove id') return self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/thesun.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import extract_attributes class TheSunIE(InfoExtractor): _VALID_URL = r'https://(?:www\.)?thesun\.co\.uk/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'https://www.thesun.co.uk/tvandshowbiz/2261604/orlando-bloom-and-katy-perry-post-adorable-instagram-video-together-celebrating-thanksgiving-after-split-rumours/', 'info_dict': { 'id': '2261604', 'title': 'md5:cba22f48bad9218b64d5bbe0e16afddf', }, 'playlist_count': 2, } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s' def _real_extract(self, url): article_id = self._match_id(url) webpage = self._download_webpage(url, article_id) entries = [] for video in re.findall( r'<video[^>]+data-video-id-pending=[^>]+>', webpage): attrs = extract_attributes(video) video_id = attrs['data-video-id-pending'] account_id = attrs.get('data-account', '5067014667001') entries.append(self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % (account_id, video_id), 'BrightcoveNew', video_id)) return self.playlist_result( entries, article_id, self._og_search_title(webpage, fatal=False))
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/theweatherchannel.py
# coding: utf-8 from __future__ import unicode_literals from .theplatform import ThePlatformIE from ..utils import ( determine_ext, parse_duration, ) class TheWeatherChannelIE(ThePlatformIE): _VALID_URL = r'https?://(?:www\.)?weather\.com/(?:[^/]+/)*video/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://weather.com/series/great-outdoors/video/ice-climber-is-in-for-a-shock', 'md5': 'ab924ac9574e79689c24c6b95e957def', 'info_dict': { 'id': 'cc82397e-cc3f-4d11-9390-a785add090e8', 'ext': 'mp4', 'title': 'Ice Climber Is In For A Shock', 'description': 'md5:55606ce1378d4c72e6545e160c9d9695', 'uploader': 'TWC - Digital (No Distro)', 'uploader_id': '6ccd5455-16bb-46f2-9c57-ff858bb9f62c', } }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) drupal_settings = self._parse_json(self._search_regex( r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings'), display_id) video_id = drupal_settings['twc']['contexts']['node']['uuid'] video_data = self._download_json( 'https://dsx.weather.com/cms/v4/asset-collection/en_US/' + video_id, video_id) seo_meta = video_data.get('seometa', {}) title = video_data.get('title') or seo_meta['title'] urls = [] thumbnails = [] formats = [] for variant_id, variant_url in video_data.get('variants', []).items(): variant_url = variant_url.strip() if not variant_url or variant_url in urls: continue urls.append(variant_url) ext = determine_ext(variant_url) if ext == 'jpg': thumbnails.append({ 'url': variant_url, 'id': variant_id, }) elif ThePlatformIE.suitable(variant_url): tp_formats, _ = self._extract_theplatform_smil(variant_url, video_id) formats.extend(tp_formats) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( variant_url, video_id, 'mp4', 'm3u8_native', m3u8_id=variant_id, fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( variant_url, video_id, f4m_id=variant_id, fatal=False)) else: formats.append({ 'url': variant_url, 'format_id': variant_id, }) self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': video_data.get('description') or seo_meta.get('description') or seo_meta.get('og:description'), 'duration': parse_duration(video_data.get('duration')), 'uploader': video_data.get('providername'), 'uploader_id': video_data.get('providerid'), 'thumbnails': thumbnails, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/thisamericanlife.py
from __future__ import unicode_literals from .common import InfoExtractor class ThisAmericanLifeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?thisamericanlife\.org/(?:radio-archives/episode/|play_full\.php\?play=)(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.thisamericanlife.org/radio-archives/episode/487/harper-high-school-part-one', 'md5': '8f7d2da8926298fdfca2ee37764c11ce', 'info_dict': { 'id': '487', 'ext': 'm4a', 'title': '487: Harper High School, Part One', 'description': 'md5:ee40bdf3fb96174a9027f76dbecea655', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'http://www.thisamericanlife.org/play_full.php?play=487', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'http://www.thisamericanlife.org/radio-archives/episode/%s' % video_id, video_id) return { 'id': video_id, 'url': 'http://stream.thisamericanlife.org/{0}/stream/{0}_64k.m3u8'.format(video_id), 'protocol': 'm3u8_native', 'ext': 'm4a', 'acodec': 'aac', 'vcodec': 'none', 'abr': 64, 'title': self._html_search_meta(r'twitter:title', webpage, 'title', fatal=True), 'description': self._html_search_meta(r'description', webpage, 'description'), 'thumbnail': self._og_search_thumbnail(webpage), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/thisav.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import remove_end class ThisAVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?thisav\.com/video/(?P<id>[0-9]+)/.*' _TESTS = [{ # jwplayer 'url': 'http://www.thisav.com/video/47734/%98%26sup1%3B%83%9E%83%82---just-fit.html', 'md5': '0480f1ef3932d901f0e0e719f188f19b', 'info_dict': { 'id': '47734', 'ext': 'flv', 'title': '高樹マリア - Just fit', 'uploader': 'dj7970', 'uploader_id': 'dj7970' } }, { # html5 media 'url': 'http://www.thisav.com/video/242352/nerdy-18yo-big-ass-tattoos-and-glasses.html', 'md5': 'ba90c076bd0f80203679e5b60bf523ee', 'info_dict': { 'id': '242352', 'ext': 'mp4', 'title': 'Nerdy 18yo Big Ass Tattoos and Glasses', 'uploader': 'cybersluts', 'uploader_id': 'cybersluts', }, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) title = remove_end(self._html_search_regex( r'<title>([^<]+)</title>', webpage, 'title'), ' - 視頻 - ThisAV.com-世界第一中文成人娛樂網站') video_url = self._html_search_regex( r"addVariable\('file','([^']+)'\);", webpage, 'video url', default=None) if video_url: info_dict = { 'formats': [{ 'url': video_url, }], } else: entries = self._parse_html5_media_entries(url, webpage, video_id) if entries: info_dict = entries[0] else: info_dict = self._extract_jwplayer_data( webpage, video_id, require_title=False) uploader = self._html_search_regex( r': <a href="http://www\.thisav\.com/user/[0-9]+/(?:[^"]+)">([^<]+)</a>', webpage, 'uploader name', fatal=False) uploader_id = self._html_search_regex( r': <a href="http://www\.thisav\.com/user/[0-9]+/([^"]+)">(?:[^<]+)</a>', webpage, 'uploader id', fatal=False) info_dict.update({ 'id': video_id, 'uploader': uploader, 'uploader_id': uploader_id, 'title': title, }) return info_dict
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/thisoldhouse.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import try_get class ThisOldHouseIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?thisoldhouse\.com/(?:watch|how-to|tv-episode)/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.thisoldhouse.com/how-to/how-to-build-storage-bench', 'md5': '568acf9ca25a639f0c4ff905826b662f', 'info_dict': { 'id': '2REGtUDQ', 'ext': 'mp4', 'title': 'How to Build a Storage Bench', 'description': 'In the workshop, Tom Silva and Kevin O\'Connor build a storage bench for an entryway.', 'timestamp': 1442548800, 'upload_date': '20150918', } }, { 'url': 'https://www.thisoldhouse.com/watch/arlington-arts-crafts-arts-and-crafts-class-begins', 'only_matching': True, }, { 'url': 'https://www.thisoldhouse.com/tv-episode/ask-toh-shelf-rough-electric', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._search_regex( (r'data-mid=(["\'])(?P<id>(?:(?!\1).)+)\1', r'id=(["\'])inline-video-player-(?P<id>(?:(?!\1).)+)\1'), webpage, 'video id', default=None, group='id') if not video_id: drupal_settings = self._parse_json(self._search_regex( r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings'), display_id) video_id = try_get( drupal_settings, lambda x: x['jwplatform']['video_id'], compat_str) or list(drupal_settings['comScore'])[0] return self.url_result('jwplatform:' + video_id, 'JWPlatform', video_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/threeqsdn.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, js_to_json, mimetype2ext, ) class ThreeQSDNIE(InfoExtractor): IE_NAME = '3qsdn' IE_DESC = '3Q SDN' _VALID_URL = r'https?://playout\.3qsdn\.com/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _TESTS = [{ # ondemand from http://www.philharmonie.tv/veranstaltung/26/ 'url': 'http://playout.3qsdn.com/0280d6b9-1215-11e6-b427-0cc47a188158?protocol=http', 'md5': 'ab040e37bcfa2e0c079f92cb1dd7f6cd', 'info_dict': { 'id': '0280d6b9-1215-11e6-b427-0cc47a188158', 'ext': 'mp4', 'title': '0280d6b9-1215-11e6-b427-0cc47a188158', 'is_live': False, }, 'expected_warnings': ['Failed to download MPD manifest', 'Failed to parse JSON'], }, { # live video stream 'url': 'https://playout.3qsdn.com/d755d94b-4ab9-11e3-9162-0025907ad44f?js=true', 'info_dict': { 'id': 'd755d94b-4ab9-11e3-9162-0025907ad44f', 'ext': 'mp4', 'title': 're:^d755d94b-4ab9-11e3-9162-0025907ad44f [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, }, 'params': { 'skip_download': True, # m3u8 downloads }, 'expected_warnings': ['Failed to download MPD manifest'], }, { # live audio stream 'url': 'http://playout.3qsdn.com/9edf36e0-6bf2-11e2-a16a-9acf09e2db48', 'only_matching': True, }, { # live audio stream with some 404 URLs 'url': 'http://playout.3qsdn.com/ac5c3186-777a-11e2-9c30-9acf09e2db48', 'only_matching': True, }, { # geo restricted with 'This content is not available in your country' 'url': 'http://playout.3qsdn.com/d63a3ffe-75e8-11e2-9c30-9acf09e2db48', 'only_matching': True, }, { # geo restricted with 'playout.3qsdn.com/forbidden' 'url': 'http://playout.3qsdn.com/8e330f26-6ae2-11e2-a16a-9acf09e2db48', 'only_matching': True, }, { # live video with rtmp link 'url': 'https://playout.3qsdn.com/6092bb9e-8f72-11e4-a173-002590c750be', 'only_matching': True, }] @staticmethod def _extract_url(webpage): mobj = re.search( r'<iframe[^>]+\b(?:data-)?src=(["\'])(?P<url>%s.*?)\1' % ThreeQSDNIE._VALID_URL, webpage) if mobj: return mobj.group('url') def _real_extract(self, url): video_id = self._match_id(url) js = self._download_webpage( 'http://playout.3qsdn.com/%s' % video_id, video_id, query={'js': 'true'}) if any(p in js for p in ( '>This content is not available in your country', 'playout.3qsdn.com/forbidden')): self.raise_geo_restricted() stream_content = self._search_regex( r'streamContent\s*:\s*(["\'])(?P<content>.+?)\1', js, 'stream content', default='demand', group='content') live = stream_content == 'live' stream_type = self._search_regex( r'streamType\s*:\s*(["\'])(?P<type>audio|video)\1', js, 'stream type', default='video', group='type') formats = [] urls = set() def extract_formats(item_url, item={}): if not item_url or item_url in urls: return urls.add(item_url) ext = mimetype2ext(item.get('type')) or determine_ext(item_url, default_ext=None) if ext == 'mpd': formats.extend(self._extract_mpd_formats( item_url, video_id, mpd_id='mpd', fatal=False)) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( item_url, video_id, 'mp4', entry_protocol='m3u8' if live else 'm3u8_native', m3u8_id='hls', fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( item_url, video_id, f4m_id='hds', fatal=False)) else: if not self._is_valid_url(item_url, video_id): return formats.append({ 'url': item_url, 'format_id': item.get('quality'), 'ext': 'mp4' if item_url.startswith('rtsp') else ext, 'vcodec': 'none' if stream_type == 'audio' else None, }) for item_js in re.findall(r'({[^{]*?\b(?:src|source)\s*:\s*["\'].+?})', js): f = self._parse_json( item_js, video_id, transform_source=js_to_json, fatal=False) if not f: continue extract_formats(f.get('src'), f) # More relaxed version to collect additional URLs and acting # as a future-proof fallback for _, src in re.findall(r'\b(?:src|source)\s*:\s*(["\'])((?:https?|rtsp)://.+?)\1', js): extract_formats(src) self._sort_formats(formats) title = self._live_title(video_id) if live else video_id return { 'id': video_id, 'title': title, 'is_live': live, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tiktok.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( compat_str, ExtractorError, int_or_none, str_or_none, try_get, url_or_none, ) class TikTokBaseIE(InfoExtractor): def _extract_aweme(self, data): video = data['video'] description = str_or_none(try_get(data, lambda x: x['desc'])) width = int_or_none(try_get(data, lambda x: video['width'])) height = int_or_none(try_get(data, lambda x: video['height'])) format_urls = set() formats = [] for format_id in ( 'play_addr_lowbr', 'play_addr', 'play_addr_h264', 'download_addr'): for format in try_get( video, lambda x: x[format_id]['url_list'], list) or []: format_url = url_or_none(format) if not format_url: continue if format_url in format_urls: continue format_urls.add(format_url) formats.append({ 'url': format_url, 'ext': 'mp4', 'height': height, 'width': width, }) self._sort_formats(formats) thumbnail = url_or_none(try_get( video, lambda x: x['cover']['url_list'][0], compat_str)) uploader = try_get(data, lambda x: x['author']['nickname'], compat_str) timestamp = int_or_none(data.get('create_time')) comment_count = int_or_none(data.get('comment_count')) or int_or_none( try_get(data, lambda x: x['statistics']['comment_count'])) repost_count = int_or_none(try_get( data, lambda x: x['statistics']['share_count'])) aweme_id = data['aweme_id'] return { 'id': aweme_id, 'title': uploader or aweme_id, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'timestamp': timestamp, 'comment_count': comment_count, 'repost_count': repost_count, 'formats': formats, } class TikTokIE(TikTokBaseIE): _VALID_URL = r'''(?x) https?:// (?: (?:m\.)?tiktok\.com/v| (?:www\.)?tiktok\.com/share/video ) /(?P<id>\d+) ''' _TESTS = [{ 'url': 'https://m.tiktok.com/v/6606727368545406213.html', 'md5': 'd584b572e92fcd48888051f238022420', 'info_dict': { 'id': '6606727368545406213', 'ext': 'mp4', 'title': 'Zureeal', 'description': '#bowsette#mario#cosplay#uk#lgbt#gaming#asian#bowsettecosplay', 'thumbnail': r're:^https?://.*~noop.image', 'uploader': 'Zureeal', 'timestamp': 1538248586, 'upload_date': '20180929', 'comment_count': int, 'repost_count': int, } }, { 'url': 'https://www.tiktok.com/share/video/6606727368545406213', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://m.tiktok.com/v/%s.html' % video_id, video_id) data = self._parse_json(self._search_regex( r'\bdata\s*=\s*({.+?})\s*;', webpage, 'data'), video_id) return self._extract_aweme(data) class TikTokUserIE(TikTokBaseIE): _VALID_URL = r'''(?x) https?:// (?: (?:m\.)?tiktok\.com/h5/share/usr| (?:www\.)?tiktok\.com/share/user ) /(?P<id>\d+) ''' _TESTS = [{ 'url': 'https://m.tiktok.com/h5/share/usr/188294915489964032.html', 'info_dict': { 'id': '188294915489964032', }, 'playlist_mincount': 24, }, { 'url': 'https://www.tiktok.com/share/user/188294915489964032', 'only_matching': True, }] def _real_extract(self, url): user_id = self._match_id(url) data = self._download_json( 'https://m.tiktok.com/h5/share/usr/list/%s/' % user_id, user_id, query={'_signature': '_'}) entries = [] for aweme in data['aweme_list']: try: entry = self._extract_aweme(aweme) except ExtractorError: continue entry['extractor_key'] = TikTokIE.ie_key() entries.append(entry) return self.playlist_result(entries, user_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tinypic.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError class TinyPicIE(InfoExtractor): IE_NAME = 'tinypic' IE_DESC = 'tinypic.com videos' _VALID_URL = r'https?://(?:.+?\.)?tinypic\.com/player\.php\?v=(?P<id>[^&]+)&s=\d+' _TESTS = [ { 'url': 'http://tinypic.com/player.php?v=6xw7tc%3E&s=5#.UtqZmbRFCM8', 'md5': '609b74432465364e72727ebc6203f044', 'info_dict': { 'id': '6xw7tc', 'ext': 'flv', 'title': 'shadow phenomenon weird', }, }, { 'url': 'http://de.tinypic.com/player.php?v=dy90yh&s=8', 'only_matching': True, } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id, 'Downloading page') mobj = re.search(r'(?m)fo\.addVariable\("file",\s"(?P<fileid>[\da-z]+)"\);\n' r'\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage) if mobj is None: raise ExtractorError('Video %s does not exist' % video_id, expected=True) file_id = mobj.group('fileid') server_id = mobj.group('serverid') KEYWORDS_SUFFIX = ', Video, images, photos, videos, myspace, ebay, video hosting, photo hosting' keywords = self._html_search_meta('keywords', webpage, 'title') title = keywords[:-len(KEYWORDS_SUFFIX)] if keywords.endswith(KEYWORDS_SUFFIX) else '' video_url = 'http://v%s.tinypic.com/%s.flv' % (server_id, file_id) thumbnail = 'http://v%s.tinypic.com/%s_th.jpg' % (server_id, file_id) return { 'id': file_id, 'url': video_url, 'thumbnail': thumbnail, 'title': title }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tmz.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class TMZIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tmz\.com/videos/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'http://www.tmz.com/videos/0_okj015ty/', 'md5': '4d22a51ef205b6c06395d8394f72d560', 'info_dict': { 'id': '0_okj015ty', 'ext': 'mp4', 'title': 'Kim Kardashian\'s Boobs Unlock a Mystery!', 'description': 'Did Kim Kardasain try to one-up Khloe by one-upping Kylie??? Or is she just showing off her amazing boobs?', 'timestamp': 1394747163, 'uploader_id': 'batchUser', 'upload_date': '20140313', } }, { 'url': 'http://www.tmz.com/videos/0-cegprt2p/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url).replace('-', '_') return self.url_result('kaltura:591531:%s' % video_id, 'Kaltura', video_id) class TMZArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tmz\.com/\d{4}/\d{2}/\d{2}/(?P<id>[^/]+)/?' _TEST = { 'url': 'http://www.tmz.com/2015/04/19/bobby-brown-bobbi-kristina-awake-video-concert', 'md5': '3316ff838ae5bb7f642537825e1e90d2', 'info_dict': { 'id': '0_6snoelag', 'ext': 'mov', 'title': 'Bobby Brown Tells Crowd ... Bobbi Kristina is Awake', 'description': 'Bobby Brown stunned his audience during a concert Saturday night, when he told the crowd, "Bobbi is awake. She\'s watching me."', 'timestamp': 1429467813, 'upload_date': '20150419', 'uploader_id': 'batchUser', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) embedded_video_info = self._parse_json(self._html_search_regex( r'tmzVideoEmbed\(({.+?})\);', webpage, 'embedded video info'), video_id) return self.url_result( 'http://www.tmz.com/videos/%s/' % embedded_video_info['id'])
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tnaflix.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( fix_xml_ampersands, float_or_none, int_or_none, parse_duration, str_to_int, unescapeHTML, xpath_text, ) class TNAFlixNetworkBaseIE(InfoExtractor): # May be overridden in descendants if necessary _CONFIG_REGEX = [ r'flashvars\.config\s*=\s*escape\("(?P<url>[^"]+)"', r'<input[^>]+name="config\d?" value="(?P<url>[^"]+)"', r'config\s*=\s*(["\'])(?P<url>(?:https?:)?//(?:(?!\1).)+)\1', ] _HOST = 'tna' _VKEY_SUFFIX = '' _TITLE_REGEX = r'<input[^>]+name="title" value="([^"]+)"' _DESCRIPTION_REGEX = r'<input[^>]+name="description" value="([^"]+)"' _UPLOADER_REGEX = r'<input[^>]+name="username" value="([^"]+)"' _VIEW_COUNT_REGEX = None _COMMENT_COUNT_REGEX = None _AVERAGE_RATING_REGEX = None _CATEGORIES_REGEX = r'<li[^>]*>\s*<span[^>]+class="infoTitle"[^>]*>Categories:</span>\s*<span[^>]+class="listView"[^>]*>(.+?)</span>\s*</li>' def _extract_thumbnails(self, flix_xml): def get_child(elem, names): for name in names: child = elem.find(name) if child is not None: return child timeline = get_child(flix_xml, ['timeline', 'rolloverBarImage']) if timeline is None: return pattern_el = get_child(timeline, ['imagePattern', 'pattern']) if pattern_el is None or not pattern_el.text: return first_el = get_child(timeline, ['imageFirst', 'first']) last_el = get_child(timeline, ['imageLast', 'last']) if first_el is None or last_el is None: return first_text = first_el.text last_text = last_el.text if not first_text.isdigit() or not last_text.isdigit(): return first = int(first_text) last = int(last_text) if first > last: return width = int_or_none(xpath_text(timeline, './imageWidth', 'thumbnail width')) height = int_or_none(xpath_text(timeline, './imageHeight', 'thumbnail height')) return [{ 'url': self._proto_relative_url(pattern_el.text.replace('#', compat_str(i)), 'http:'), 'width': width, 'height': height, } for i in range(first, last + 1)] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') for display_id_key in ('display_id', 'display_id_2'): if display_id_key in mobj.groupdict(): display_id = mobj.group(display_id_key) if display_id: break else: display_id = video_id webpage = self._download_webpage(url, display_id) cfg_url = self._proto_relative_url(self._html_search_regex( self._CONFIG_REGEX, webpage, 'flashvars.config', default=None, group='url'), 'http:') if not cfg_url: inputs = self._hidden_inputs(webpage) cfg_url = ('https://cdn-fck.%sflix.com/%sflix/%s%s.fid?key=%s&VID=%s&premium=1&vip=1&alpha' % (self._HOST, self._HOST, inputs['vkey'], self._VKEY_SUFFIX, inputs['nkey'], video_id)) cfg_xml = self._download_xml( cfg_url, display_id, 'Downloading metadata', transform_source=fix_xml_ampersands, headers={'Referer': url}) formats = [] def extract_video_url(vl): # Any URL modification now results in HTTP Error 403: Forbidden return unescapeHTML(vl.text) video_link = cfg_xml.find('./videoLink') if video_link is not None: formats.append({ 'url': extract_video_url(video_link), 'ext': xpath_text(cfg_xml, './videoConfig/type', 'type', default='flv'), }) for item in cfg_xml.findall('./quality/item'): video_link = item.find('./videoLink') if video_link is None: continue res = item.find('res') format_id = None if res is None else res.text height = int_or_none(self._search_regex( r'^(\d+)[pP]', format_id, 'height', default=None)) formats.append({ 'url': self._proto_relative_url(extract_video_url(video_link), 'http:'), 'format_id': format_id, 'height': height, }) self._sort_formats(formats) thumbnail = self._proto_relative_url( xpath_text(cfg_xml, './startThumb', 'thumbnail'), 'http:') thumbnails = self._extract_thumbnails(cfg_xml) title = None if self._TITLE_REGEX: title = self._html_search_regex( self._TITLE_REGEX, webpage, 'title', default=None) if not title: title = self._og_search_title(webpage) age_limit = self._rta_search(webpage) or 18 duration = parse_duration(self._html_search_meta( 'duration', webpage, 'duration', default=None)) def extract_field(pattern, name): return self._html_search_regex(pattern, webpage, name, default=None) if pattern else None description = extract_field(self._DESCRIPTION_REGEX, 'description') uploader = extract_field(self._UPLOADER_REGEX, 'uploader') view_count = str_to_int(extract_field(self._VIEW_COUNT_REGEX, 'view count')) comment_count = str_to_int(extract_field(self._COMMENT_COUNT_REGEX, 'comment count')) average_rating = float_or_none(extract_field(self._AVERAGE_RATING_REGEX, 'average rating')) categories_str = extract_field(self._CATEGORIES_REGEX, 'categories') categories = [c.strip() for c in categories_str.split(',')] if categories_str is not None else [] return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'thumbnails': thumbnails, 'duration': duration, 'age_limit': age_limit, 'uploader': uploader, 'view_count': view_count, 'comment_count': comment_count, 'average_rating': average_rating, 'categories': categories, 'formats': formats, } class TNAFlixNetworkEmbedIE(TNAFlixNetworkBaseIE): _VALID_URL = r'https?://player\.(?:tna|emp)flix\.com/video/(?P<id>\d+)' _TITLE_REGEX = r'<title>([^<]+)</title>' _TESTS = [{ 'url': 'https://player.tnaflix.com/video/6538', 'info_dict': { 'id': '6538', 'display_id': '6538', 'ext': 'mp4', 'title': 'Educational xxx video', 'thumbnail': r're:https?://.*\.jpg$', 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://player.empflix.com/video/33051', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return [url for _, url in re.findall( r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.(?:tna|emp)flix\.com/video/\d+)\1', webpage)] class TNAEMPFlixBaseIE(TNAFlixNetworkBaseIE): _DESCRIPTION_REGEX = r'(?s)>Description:</[^>]+>(.+?)<' _UPLOADER_REGEX = r'<span>by\s*<a[^>]+\bhref=["\']/profile/[^>]+>([^<]+)<' _CATEGORIES_REGEX = r'(?s)<span[^>]*>Categories:</span>(.+?)</div>' class TNAFlixIE(TNAEMPFlixBaseIE): _VALID_URL = r'https?://(?:www\.)?tnaflix\.com/[^/]+/(?P<display_id>[^/]+)/video(?P<id>\d+)' _TITLE_REGEX = r'<title>(.+?) - (?:TNAFlix Porn Videos|TNAFlix\.com)</title>' _TESTS = [{ # anonymous uploader, no categories 'url': 'http://www.tnaflix.com/porn-stars/Carmella-Decesare-striptease/video553878', 'md5': '7e569419fe6d69543d01e6be22f5f7c4', 'info_dict': { 'id': '553878', 'display_id': 'Carmella-Decesare-striptease', 'ext': 'mp4', 'title': 'Carmella Decesare - striptease', 'thumbnail': r're:https?://.*\.jpg$', 'duration': 91, 'age_limit': 18, 'categories': ['Porn Stars'], } }, { # non-anonymous uploader, categories 'url': 'https://www.tnaflix.com/teen-porn/Educational-xxx-video/video6538', 'md5': '0f5d4d490dbfd117b8607054248a07c0', 'info_dict': { 'id': '6538', 'display_id': 'Educational-xxx-video', 'ext': 'mp4', 'title': 'Educational xxx video', 'description': 'md5:b4fab8f88a8621c8fabd361a173fe5b8', 'thumbnail': r're:https?://.*\.jpg$', 'duration': 164, 'age_limit': 18, 'uploader': 'bobwhite39', 'categories': list, } }, { 'url': 'https://www.tnaflix.com/amateur-porn/bunzHD-Ms.Donk/video358632', 'only_matching': True, }] class EMPFlixIE(TNAEMPFlixBaseIE): _VALID_URL = r'https?://(?:www\.)?empflix\.com/(?:videos/(?P<display_id>.+?)-|[^/]+/(?P<display_id_2>[^/]+)/video)(?P<id>[0-9]+)' _HOST = 'emp' _VKEY_SUFFIX = '-1' _TESTS = [{ 'url': 'http://www.empflix.com/videos/Amateur-Finger-Fuck-33051.html', 'md5': 'bc30d48b91a7179448a0bda465114676', 'info_dict': { 'id': '33051', 'display_id': 'Amateur-Finger-Fuck', 'ext': 'mp4', 'title': 'Amateur Finger Fuck', 'description': 'Amateur solo finger fucking.', 'thumbnail': r're:https?://.*\.jpg$', 'duration': 83, 'age_limit': 18, 'uploader': 'cwbike', 'categories': ['Amateur', 'Anal', 'Fisting', 'Home made', 'Solo'], } }, { 'url': 'http://www.empflix.com/videos/[AROMA][ARMD-718]-Aoi-Yoshino-Sawa-25826.html', 'only_matching': True, }, { 'url': 'https://www.empflix.com/amateur-porn/Amateur-Finger-Fuck/video33051', 'only_matching': True, }] class MovieFapIE(TNAFlixNetworkBaseIE): _VALID_URL = r'https?://(?:www\.)?moviefap\.com/videos/(?P<id>[0-9a-f]+)/(?P<display_id>[^/]+)\.html' _VIEW_COUNT_REGEX = r'<br>Views\s*<strong>([\d,.]+)</strong>' _COMMENT_COUNT_REGEX = r'<span[^>]+id="comCount"[^>]*>([\d,.]+)</span>' _AVERAGE_RATING_REGEX = r'Current Rating\s*<br>\s*<strong>([\d.]+)</strong>' _CATEGORIES_REGEX = r'(?s)<div[^>]+id="vid_info"[^>]*>\s*<div[^>]*>.+?</div>(.*?)<br>' _TESTS = [{ # normal, multi-format video 'url': 'http://www.moviefap.com/videos/be9867c9416c19f54a4a/experienced-milf-amazing-handjob.html', 'md5': '26624b4e2523051b550067d547615906', 'info_dict': { 'id': 'be9867c9416c19f54a4a', 'display_id': 'experienced-milf-amazing-handjob', 'ext': 'mp4', 'title': 'Experienced MILF Amazing Handjob', 'description': 'Experienced MILF giving an Amazing Handjob', 'thumbnail': r're:https?://.*\.jpg$', 'age_limit': 18, 'uploader': 'darvinfred06', 'view_count': int, 'comment_count': int, 'average_rating': float, 'categories': ['Amateur', 'Masturbation', 'Mature', 'Flashing'], } }, { # quirky single-format case where the extension is given as fid, but the video is really an flv 'url': 'http://www.moviefap.com/videos/e5da0d3edce5404418f5/jeune-couple-russe.html', 'md5': 'fa56683e291fc80635907168a743c9ad', 'info_dict': { 'id': 'e5da0d3edce5404418f5', 'display_id': 'jeune-couple-russe', 'ext': 'flv', 'title': 'Jeune Couple Russe', 'description': 'Amateur', 'thumbnail': r're:https?://.*\.jpg$', 'age_limit': 18, 'uploader': 'whiskeyjar', 'view_count': int, 'comment_count': int, 'average_rating': float, 'categories': ['Amateur', 'Teen'], } }]
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/toggle.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import ( determine_ext, ExtractorError, float_or_none, int_or_none, parse_iso8601, sanitized_Request, ) class ToggleIE(InfoExtractor): IE_NAME = 'toggle' _VALID_URL = r'https?://video\.toggle\.sg/(?:en|zh)/(?:[^/]+/){2,}(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://video.toggle.sg/en/series/lion-moms-tif/trailers/lion-moms-premier/343115', 'info_dict': { 'id': '343115', 'ext': 'mp4', 'title': 'Lion Moms Premiere', 'description': 'md5:aea1149404bff4d7f7b6da11fafd8e6b', 'upload_date': '20150910', 'timestamp': 1441858274, }, 'params': { 'skip_download': 'm3u8 download', } }, { 'note': 'DRM-protected video', 'url': 'http://video.toggle.sg/en/movies/dug-s-special-mission/341413', 'info_dict': { 'id': '341413', 'ext': 'wvm', 'title': 'Dug\'s Special Mission', 'description': 'md5:e86c6f4458214905c1772398fabc93e0', 'upload_date': '20150827', 'timestamp': 1440644006, }, 'params': { 'skip_download': 'DRM-protected wvm download', } }, { # this also tests correct video id extraction 'note': 'm3u8 links are geo-restricted, but Android/mp4 is okay', 'url': 'http://video.toggle.sg/en/series/28th-sea-games-5-show/28th-sea-games-5-show-ep11/332861', 'info_dict': { 'id': '332861', 'ext': 'mp4', 'title': '28th SEA Games (5 Show) - Episode 11', 'description': 'md5:3cd4f5f56c7c3b1340c50a863f896faa', 'upload_date': '20150605', 'timestamp': 1433480166, }, 'params': { 'skip_download': 'DRM-protected wvm download', }, 'skip': 'm3u8 links are geo-restricted' }, { 'url': 'http://video.toggle.sg/en/clips/seraph-sun-aloysius-will-suddenly-sing-some-old-songs-in-high-pitch-on-set/343331', 'only_matching': True, }, { 'url': 'http://video.toggle.sg/zh/series/zero-calling-s2-hd/ep13/336367', 'only_matching': True, }, { 'url': 'http://video.toggle.sg/en/series/vetri-s2/webisodes/jeeva-is-an-orphan-vetri-s2-webisode-7/342302', 'only_matching': True, }, { 'url': 'http://video.toggle.sg/en/movies/seven-days/321936', 'only_matching': True, }, { 'url': 'https://video.toggle.sg/en/tv-show/news/may-2017-cna-singapore-tonight/fri-19-may-2017/512456', 'only_matching': True, }, { 'url': 'http://video.toggle.sg/en/channels/eleven-plus/401585', 'only_matching': True, }] _FORMAT_PREFERENCES = { 'wvm-STBMain': -10, 'wvm-iPadMain': -20, 'wvm-iPhoneMain': -30, 'wvm-Android': -40, } _API_USER = 'tvpapi_147' _API_PASS = '11111' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( url, video_id, note='Downloading video page') api_user = self._search_regex( r'apiUser\s*:\s*(["\'])(?P<user>.+?)\1', webpage, 'apiUser', default=self._API_USER, group='user') api_pass = self._search_regex( r'apiPass\s*:\s*(["\'])(?P<pass>.+?)\1', webpage, 'apiPass', default=self._API_PASS, group='pass') params = { 'initObj': { 'Locale': { 'LocaleLanguage': '', 'LocaleCountry': '', 'LocaleDevice': '', 'LocaleUserState': 0 }, 'Platform': 0, 'SiteGuid': 0, 'DomainID': '0', 'UDID': '', 'ApiUser': api_user, 'ApiPass': api_pass }, 'MediaID': video_id, 'mediaType': 0, } req = sanitized_Request( 'http://tvpapi.as.tvinci.com/v2_9/gateways/jsonpostgw.aspx?m=GetMediaInfo', json.dumps(params).encode('utf-8')) info = self._download_json(req, video_id, 'Downloading video info json') title = info['MediaName'] formats = [] for video_file in info.get('Files', []): video_url, vid_format = video_file.get('URL'), video_file.get('Format') if not video_url or video_url == 'NA' or not vid_format: continue ext = determine_ext(video_url) vid_format = vid_format.replace(' ', '') # if geo-restricted, m3u8 is inaccessible, but mp4 is okay if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, ext='mp4', m3u8_id=vid_format, note='Downloading %s m3u8 information' % vid_format, errnote='Failed to download %s m3u8 information' % vid_format, fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( video_url, video_id, mpd_id=vid_format, note='Downloading %s MPD manifest' % vid_format, errnote='Failed to download %s MPD manifest' % vid_format, fatal=False)) elif ext == 'ism': formats.extend(self._extract_ism_formats( video_url, video_id, ism_id=vid_format, note='Downloading %s ISM manifest' % vid_format, errnote='Failed to download %s ISM manifest' % vid_format, fatal=False)) elif ext in ('mp4', 'wvm'): # wvm are drm-protected files formats.append({ 'ext': ext, 'url': video_url, 'format_id': vid_format, 'preference': self._FORMAT_PREFERENCES.get(ext + '-' + vid_format) or -1, 'format_note': 'DRM-protected video' if ext == 'wvm' else None }) if not formats: # Most likely because geo-blocked raise ExtractorError('No downloadable videos found', expected=True) self._sort_formats(formats) duration = int_or_none(info.get('Duration')) description = info.get('Description') created_at = parse_iso8601(info.get('CreationDate') or None) average_rating = float_or_none(info.get('Rating')) view_count = int_or_none(info.get('ViewCounter') or info.get('view_counter')) like_count = int_or_none(info.get('LikeCounter') or info.get('like_counter')) thumbnails = [] for picture in info.get('Pictures', []): if not isinstance(picture, dict): continue pic_url = picture.get('URL') if not pic_url: continue thumbnail = { 'url': pic_url, } pic_size = picture.get('PicSize', '') m = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', pic_size) if m: thumbnail.update({ 'width': int(m.group('width')), 'height': int(m.group('height')), }) thumbnails.append(thumbnail) return { 'id': video_id, 'title': title, 'description': description, 'duration': duration, 'timestamp': created_at, 'average_rating': average_rating, 'view_count': view_count, 'like_count': like_count, 'thumbnails': thumbnails, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tonline.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import int_or_none class TOnlineIE(InfoExtractor): IE_NAME = 't-online.de' _VALID_URL = r'https?://(?:www\.)?t-online\.de/tv/(?:[^/]+/)*id_(?P<id>\d+)' _TEST = { 'url': 'http://www.t-online.de/tv/sport/fussball/id_79166266/drittes-remis-zidane-es-muss-etwas-passieren-.html', 'md5': '7d94dbdde5f9d77c5accc73c39632c29', 'info_dict': { 'id': '79166266', 'ext': 'mp4', 'title': 'Drittes Remis! Zidane: "Es muss etwas passieren"', 'description': 'Es läuft nicht rund bei Real Madrid. Das 1:1 gegen den SD Eibar war das dritte Unentschieden in Folge in der Liga.', } } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( 'http://www.t-online.de/tv/id_%s/tid_json_video' % video_id, video_id) title = video_data['subtitle'] formats = [] for asset in video_data.get('assets', []): asset_source = asset.get('source') or asset.get('source2') if not asset_source: continue formats_id = [] for field_key in ('type', 'profile'): field_value = asset.get(field_key) if field_value: formats_id.append(field_value) formats.append({ 'format_id': '-'.join(formats_id), 'url': asset_source, }) thumbnails = [] for image in video_data.get('images', []): image_source = image.get('source') if not image_source: continue thumbnails.append({ 'url': image_source, }) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'duration': int_or_none(video_data.get('duration')), 'thumbnails': thumbnails, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/toongoggles.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, ) class ToonGogglesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?toongoggles\.com/shows/(?P<show_id>\d+)(?:/[^/]+/episodes/(?P<episode_id>\d+))?' _TESTS = [{ 'url': 'http://www.toongoggles.com/shows/217143/bernard-season-2/episodes/217147/football', 'md5': '18289fc2b951eff6b953a9d8f01e6831', 'info_dict': { 'id': '217147', 'ext': 'mp4', 'title': 'Football', 'uploader_id': '1', 'description': 'Bernard decides to play football in order to be better than Lloyd and tries to beat him no matter how, he even cheats.', 'upload_date': '20160718', 'timestamp': 1468879330, } }, { 'url': 'http://www.toongoggles.com/shows/227759/om-nom-stories-around-the-world', 'info_dict': { 'id': '227759', 'title': 'Om Nom Stories Around The World', }, 'playlist_mincount': 11, }] def _call_api(self, action, page_id, query): query.update({ 'for_ng': 1, 'for_web': 1, 'show_meta': 1, 'version': 7.0, }) return self._download_json('http://api.toongoggles.com/' + action, page_id, query=query) def _parse_episode_data(self, episode_data): title = episode_data['episode_name'] return { '_type': 'url_transparent', 'id': episode_data['episode_id'], 'title': title, 'url': 'kaltura:513551:' + episode_data['entry_id'], 'thumbnail': episode_data.get('thumbnail_url'), 'description': episode_data.get('description'), 'duration': parse_duration(episode_data.get('hms')), 'series': episode_data.get('show_name'), 'season_number': int_or_none(episode_data.get('season_num')), 'episode_id': episode_data.get('episode_id'), 'episode': title, 'episode_number': int_or_none(episode_data.get('episode_num')), 'categories': episode_data.get('categories'), 'ie_key': 'Kaltura', } def _real_extract(self, url): show_id, episode_id = re.match(self._VALID_URL, url).groups() if episode_id: episode_data = self._call_api('search', episode_id, { 'filter': 'episode', 'id': episode_id, })['objects'][0] return self._parse_episode_data(episode_data) else: show_data = self._call_api('getepisodesbyshow', show_id, { 'max': 1000000000, 'showid': show_id, }) entries = [] for episode_data in show_data.get('objects', []): entries.append(self._parse_episode_data(episode_data)) return self.playlist_result(entries, show_id, show_data.get('show_name'))
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/toutv.py
# coding: utf-8 from __future__ import unicode_literals import json from .radiocanada import RadioCanadaIE from ..compat import compat_HTTPError from ..utils import ( ExtractorError, int_or_none, merge_dicts, ) class TouTvIE(RadioCanadaIE): _NETRC_MACHINE = 'toutv' IE_NAME = 'tou.tv' _VALID_URL = r'https?://ici\.tou\.tv/(?P<id>[a-zA-Z0-9_-]+(?:/S[0-9]+[EC][0-9]+)?)' _TESTS = [{ 'url': 'http://ici.tou.tv/garfield-tout-court/S2015E17', 'info_dict': { 'id': '122017', 'ext': 'mp4', 'title': 'Saison 2015 Épisode 17', 'description': 'La photo de famille 2', 'upload_date': '20100717', }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': '404 Not Found', }, { 'url': 'http://ici.tou.tv/hackers', 'only_matching': True, }, { 'url': 'https://ici.tou.tv/l-age-adulte/S01C501', 'only_matching': True, }] _CLIENT_KEY = '90505c8d-9c34-4f34-8da1-3a85bdc6d4f4' def _real_initialize(self): email, password = self._get_login_info() if email is None: return try: self._access_token = self._download_json( 'https://services.radio-canada.ca/toutv/profiling/accounts/login', None, 'Logging in', data=json.dumps({ 'ClientId': self._CLIENT_KEY, 'ClientSecret': '34026772-244b-49b6-8b06-317b30ac9a20', 'Email': email, 'Password': password, 'Scope': 'id.write media-validation.read', }).encode(), headers={ 'Authorization': 'client-key ' + self._CLIENT_KEY, 'Content-Type': 'application/json;charset=utf-8', })['access_token'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: error = self._parse_json(e.cause.read().decode(), None)['Message'] raise ExtractorError(error, expected=True) raise self._claims = self._call_api('validation/v2/getClaims')['claims'] def _real_extract(self, url): path = self._match_id(url) metadata = self._download_json( 'https://services.radio-canada.ca/toutv/presentation/%s' % path, path, query={ 'client_key': self._CLIENT_KEY, 'device': 'web', 'version': 4, }) # IsDrm does not necessarily mean the video is DRM protected (see # https://github.com/ytdl-org/youtube-dl/issues/13994). if metadata.get('IsDrm'): self.report_warning('This video is probably DRM protected.', path) video_id = metadata['IdMedia'] details = metadata['Details'] return merge_dicts({ 'id': video_id, 'title': details.get('OriginalTitle'), 'description': details.get('Description'), 'thumbnail': details.get('ImageUrl'), 'duration': int_or_none(details.get('LengthInSeconds')), 'series': metadata.get('ProgramTitle'), 'season_number': int_or_none(metadata.get('SeasonNumber')), 'season': metadata.get('SeasonTitle'), 'episode_number': int_or_none(metadata.get('EpisodeNumber')), 'episode': metadata.get('EpisodeTitle'), }, self._extract_info(metadata.get('AppCode', 'toutv'), video_id))
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/toypics.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor import re class ToypicsIE(InfoExtractor): IE_DESC = 'Toypics video' _VALID_URL = r'https?://videos\.toypics\.net/view/(?P<id>[0-9]+)' _TEST = { 'url': 'http://videos.toypics.net/view/514/chancebulged,-2-1/', 'md5': '16e806ad6d6f58079d210fe30985e08b', 'info_dict': { 'id': '514', 'ext': 'mp4', 'title': "Chance-Bulge'd, 2", 'age_limit': 18, 'uploader': 'kidsune', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) formats = self._parse_html5_media_entries( url, webpage, video_id)[0]['formats'] title = self._html_search_regex([ r'<h1[^>]+class=["\']view-video-title[^>]+>([^<]+)</h', r'<title>([^<]+) - Toypics</title>', ], webpage, 'title') uploader = self._html_search_regex( r'More videos from <strong>([^<]+)</strong>', webpage, 'uploader', fatal=False) return { 'id': video_id, 'formats': formats, 'title': title, 'uploader': uploader, 'age_limit': 18, } class ToypicsUserIE(InfoExtractor): IE_DESC = 'Toypics user profile' _VALID_URL = r'https?://videos\.toypics\.net/(?!view)(?P<id>[^/?#&]+)' _TEST = { 'url': 'http://videos.toypics.net/Mikey', 'info_dict': { 'id': 'Mikey', }, 'playlist_mincount': 19, } def _real_extract(self, url): username = self._match_id(url) profile_page = self._download_webpage( url, username, note='Retrieving profile page') video_count = int(self._search_regex( r'public/">Public Videos \(([0-9]+)\)</a></li>', profile_page, 'video count')) PAGE_SIZE = 8 urls = [] page_count = (video_count + PAGE_SIZE + 1) // PAGE_SIZE for n in range(1, page_count + 1): lpage_url = url + '/public/%d' % n lpage = self._download_webpage( lpage_url, username, note='Downloading page %d/%d' % (n, page_count)) urls.extend( re.findall( r'<div[^>]+class=["\']preview[^>]+>\s*<a[^>]+href="(https?://videos\.toypics\.net/view/[^"]+)"', lpage)) return { '_type': 'playlist', 'id': username, 'entries': [{ '_type': 'url', 'url': eurl, 'ie_key': 'Toypics', } for eurl in urls] }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/traileraddict.py
from __future__ import unicode_literals import re from .common import InfoExtractor class TrailerAddictIE(InfoExtractor): _WORKING = False _VALID_URL = r'(?:https?://)?(?:www\.)?traileraddict\.com/(?:trailer|clip)/(?P<movie>.+?)/(?P<trailer_name>.+)' _TEST = { 'url': 'http://www.traileraddict.com/trailer/prince-avalanche/trailer', 'md5': '41365557f3c8c397d091da510e73ceb4', 'info_dict': { 'id': '76184', 'ext': 'mp4', 'title': 'Prince Avalanche Trailer', 'description': 'Trailer for Prince Avalanche.\n\nTwo highway road workers spend the summer of 1988 away from their city lives. The isolated landscape becomes a place of misadventure as the men find themselves at odds with each other and the women they left behind.', } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) name = mobj.group('movie') + '/' + mobj.group('trailer_name') webpage = self._download_webpage(url, name) title = self._search_regex(r'<title>(.+?)</title>', webpage, 'video title').replace(' - Trailer Addict', '') view_count_str = self._search_regex( r'<span class="views_n">([0-9,.]+)</span>', webpage, 'view count', fatal=False) view_count = ( None if view_count_str is None else int(view_count_str.replace(',', ''))) video_id = self._search_regex( r'<param\s+name="movie"\s+value="/emb/([0-9]+)"\s*/>', webpage, 'video id') # Presence of (no)watchplus function indicates HD quality is available if re.search(r'function (no)?watchplus()', webpage): fvar = 'fvarhd' else: fvar = 'fvar' info_url = 'http://www.traileraddict.com/%s.php?tid=%s' % (fvar, str(video_id)) info_webpage = self._download_webpage(info_url, video_id, 'Downloading the info webpage') final_url = self._search_regex(r'&fileurl=(.+)', info_webpage, 'Download url').replace('%3F', '?') thumbnail_url = self._search_regex(r'&image=(.+?)&', info_webpage, 'thumbnail url') description = self._html_search_regex( r'(?s)<div class="synopsis">.*?<div class="movie_label_info"[^>]*>(.*?)</div>', webpage, 'description', fatal=False) return { 'id': video_id, 'url': final_url, 'title': title, 'thumbnail': thumbnail_url, 'description': description, 'view_count': view_count, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/trilulilu.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_iso8601, ) class TriluliluIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|m)\.)?trilulilu\.ro/(?:[^/]+/)?(?P<id>[^/#\?]+)' _TESTS = [{ 'url': 'http://www.trilulilu.ro/big-buck-bunny-1', 'md5': '68da087b676a6196a413549212f60cc6', 'info_dict': { 'id': 'ae2899e124140b', 'ext': 'mp4', 'title': 'Big Buck Bunny', 'description': ':) pentru copilul din noi', 'uploader_id': 'chipy', 'upload_date': '20120304', 'timestamp': 1330830647, 'uploader': 'chipy', 'view_count': int, 'like_count': int, 'comment_count': int, }, }, { 'url': 'http://www.trilulilu.ro/adena-ft-morreti-inocenta', 'md5': '929dfb8729dc71750463af88bbbbf4a4', 'info_dict': { 'id': 'f299710e3c91c5', 'ext': 'mp4', 'title': 'Adena ft. Morreti - Inocenta', 'description': 'pop music', 'uploader_id': 'VEVOmixt', 'upload_date': '20151204', 'uploader': 'VEVOmixt', 'timestamp': 1449187937, 'view_count': int, 'like_count': int, 'comment_count': int, }, }] def _real_extract(self, url): display_id = self._match_id(url) media_info = self._download_json('http://m.trilulilu.ro/%s?format=json' % display_id, display_id) age_limit = 0 errors = media_info.get('errors', {}) if errors.get('friends'): raise ExtractorError('This video is private.', expected=True) elif errors.get('geoblock'): raise ExtractorError('This video is not available in your country.', expected=True) elif errors.get('xxx_unlogged'): age_limit = 18 media_class = media_info.get('class') if media_class not in ('video', 'audio'): raise ExtractorError('not a video or an audio') user = media_info.get('user', {}) thumbnail = media_info.get('cover_url') if thumbnail: thumbnail.format(width='1600', height='1200') # TODO: get correct ext for audio files stream_type = media_info.get('stream_type') formats = [{ 'url': media_info['href'], 'ext': stream_type, }] if media_info.get('is_hd'): formats.append({ 'format_id': 'hd', 'url': media_info['hrefhd'], 'ext': stream_type, }) if media_class == 'audio': formats[0]['vcodec'] = 'none' else: formats[0]['format_id'] = 'sd' return { 'id': media_info['identifier'].split('|')[1], 'display_id': display_id, 'formats': formats, 'title': media_info['title'], 'description': media_info.get('description'), 'thumbnail': thumbnail, 'uploader_id': user.get('username'), 'uploader': user.get('fullname'), 'timestamp': parse_iso8601(media_info.get('published'), ' '), 'duration': int_or_none(media_info.get('duration')), 'view_count': int_or_none(media_info.get('count_views')), 'like_count': int_or_none(media_info.get('count_likes')), 'comment_count': int_or_none(media_info.get('count_comments')), 'age_limit': age_limit, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/trunews.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( dict_get, float_or_none, int_or_none, unified_timestamp, update_url_query, url_or_none, ) class TruNewsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?trunews\.com/stream/(?P<id>[^/?#&]+)' _TEST = { 'url': 'https://www.trunews.com/stream/will-democrats-stage-a-circus-during-president-trump-s-state-of-the-union-speech', 'md5': 'a19c024c3906ff954fac9b96ce66bb08', 'info_dict': { 'id': '5c5a21e65d3c196e1c0020cc', 'display_id': 'will-democrats-stage-a-circus-during-president-trump-s-state-of-the-union-speech', 'ext': 'mp4', 'title': "Will Democrats Stage a Circus During President Trump's State of the Union Speech?", 'description': 'md5:c583b72147cc92cf21f56a31aff7a670', 'duration': 3685, 'timestamp': 1549411440, 'upload_date': '20190206', }, 'add_ie': ['Zype'], } def _real_extract(self, url): display_id = self._match_id(url) video = self._download_json( 'https://api.zype.com/videos', display_id, query={ 'app_key': 'PUVKp9WgGUb3-JUw6EqafLx8tFVP6VKZTWbUOR-HOm__g4fNDt1bCsm_LgYf_k9H', 'per_page': 1, 'active': 'true', 'friendly_title': display_id, })['response'][0] zype_id = video['_id'] thumbnails = [] thumbnails_list = video.get('thumbnails') if isinstance(thumbnails_list, list): for thumbnail in thumbnails_list: if not isinstance(thumbnail, dict): continue thumbnail_url = url_or_none(thumbnail.get('url')) if not thumbnail_url: continue thumbnails.append({ 'url': thumbnail_url, 'width': int_or_none(thumbnail.get('width')), 'height': int_or_none(thumbnail.get('height')), }) return { '_type': 'url_transparent', 'url': update_url_query( 'https://player.zype.com/embed/%s.js' % zype_id, {'api_key': 'X5XnahkjCwJrT_l5zUqypnaLEObotyvtUKJWWlONxDoHVjP8vqxlArLV8llxMbyt'}), 'ie_key': 'Zype', 'id': zype_id, 'display_id': display_id, 'title': video.get('title'), 'description': dict_get(video, ('description', 'ott_description', 'short_description')), 'duration': int_or_none(video.get('duration')), 'timestamp': unified_timestamp(video.get('published_at')), 'average_rating': float_or_none(video.get('rating')), 'view_count': int_or_none(video.get('request_count')), 'thumbnails': thumbnails, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/trutv.py
# coding: utf-8 from __future__ import unicode_literals import re from .turner import TurnerBaseIE from ..utils import ( int_or_none, parse_iso8601, ) class TruTVIE(TurnerBaseIE): _VALID_URL = r'https?://(?:www\.)?trutv\.com/(?:shows|full-episodes)/(?P<series_slug>[0-9A-Za-z-]+)/(?:videos/(?P<clip_slug>[0-9A-Za-z-]+)|(?P<id>\d+))' _TEST = { 'url': 'https://www.trutv.com/shows/the-carbonaro-effect/videos/sunlight-activated-flower.html', 'info_dict': { 'id': 'f16c03beec1e84cd7d1a51f11d8fcc29124cc7f1', 'ext': 'mp4', 'title': 'Sunlight-Activated Flower', 'description': "A customer is stunned when he sees Michael's sunlight-activated flower.", }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): series_slug, clip_slug, video_id = re.match(self._VALID_URL, url).groups() if video_id: path = 'episode' display_id = video_id else: path = 'series/clip' display_id = clip_slug data = self._download_json( 'https://api.trutv.com/v2/web/%s/%s/%s' % (path, series_slug, display_id), display_id) video_data = data['episode'] if video_id else data['info'] media_id = video_data['mediaId'] title = video_data['title'].strip() info = self._extract_ngtv_info( media_id, {}, { 'url': url, 'site_name': 'truTV', 'auth_required': video_data.get('isAuthRequired'), }) thumbnails = [] for image in video_data.get('images', []): image_url = image.get('srcUrl') if not image_url: continue thumbnails.append({ 'url': image_url, 'width': int_or_none(image.get('width')), 'height': int_or_none(image.get('height')), }) info.update({ 'id': media_id, 'display_id': display_id, 'title': title, 'description': video_data.get('description'), 'thumbnails': thumbnails, 'timestamp': parse_iso8601(video_data.get('publicationDate')), 'series': video_data.get('showTitle'), 'season_number': int_or_none(video_data.get('seasonNum')), 'episode_number': int_or_none(video_data.get('episodeNum')), }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tube8.py
from __future__ import unicode_literals import re from ..utils import ( int_or_none, str_to_int, ) from .keezmovies import KeezMoviesIE class Tube8IE(KeezMoviesIE): _VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.tube8.com/teen/kasia-music-video/229795/', 'md5': '65e20c48e6abff62ed0c3965fff13a39', 'info_dict': { 'id': '229795', 'display_id': 'kasia-music-video', 'ext': 'mp4', 'description': 'hot teen Kasia grinding', 'uploader': 'unknown', 'title': 'Kasia music video', 'age_limit': 18, 'duration': 230, 'categories': ['Teen'], 'tags': ['dancing'], }, }, { 'url': 'http://www.tube8.com/shemale/teen/blonde-cd-gets-kidnapped-by-two-blacks-and-punished-for-being-a-slutty-girl/19569151/', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?tube8\.com/embed/(?:[^/]+/)+\d+)', webpage) def _real_extract(self, url): webpage, info = self._extract_info(url) if not info['title']: info['title'] = self._html_search_regex( r'videoTitle\s*=\s*"([^"]+)', webpage, 'title') description = self._html_search_regex( r'(?s)Description:</dt>\s*<dd>(.+?)</dd>', webpage, 'description', fatal=False) uploader = self._html_search_regex( r'<span class="username">\s*(.+?)\s*<', webpage, 'uploader', fatal=False) like_count = int_or_none(self._search_regex( r'rupVar\s*=\s*"(\d+)"', webpage, 'like count', fatal=False)) dislike_count = int_or_none(self._search_regex( r'rdownVar\s*=\s*"(\d+)"', webpage, 'dislike count', fatal=False)) view_count = str_to_int(self._search_regex( r'Views:\s*</dt>\s*<dd>([\d,\.]+)', webpage, 'view count', fatal=False)) comment_count = str_to_int(self._search_regex( r'<span id="allCommentsCount">(\d+)</span>', webpage, 'comment count', fatal=False)) category = self._search_regex( r'Category:\s*</dt>\s*<dd>\s*<a[^>]+href=[^>]+>([^<]+)', webpage, 'category', fatal=False) categories = [category] if category else None tags_str = self._search_regex( r'(?s)Tags:\s*</dt>\s*<dd>(.+?)</(?!a)', webpage, 'tags', fatal=False) tags = [t for t in re.findall( r'<a[^>]+href=[^>]+>([^<]+)', tags_str)] if tags_str else None info.update({ 'description': description, 'uploader': uploader, 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'comment_count': comment_count, 'categories': categories, 'tags': tags, }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tubitv.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, sanitized_Request, urlencode_postdata, ) class TubiTvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tubitv\.com/(?:video|movies|tv-shows)/(?P<id>[0-9]+)' _LOGIN_URL = 'http://tubitv.com/login' _NETRC_MACHINE = 'tubitv' _GEO_COUNTRIES = ['US'] _TESTS = [{ 'url': 'http://tubitv.com/video/283829/the_comedian_at_the_friday', 'md5': '43ac06be9326f41912dc64ccf7a80320', 'info_dict': { 'id': '283829', 'ext': 'mp4', 'title': 'The Comedian at The Friday', 'description': 'A stand up comedian is forced to look at the decisions in his life while on a one week trip to the west coast.', 'uploader_id': 'bc168bee0d18dd1cb3b86c68706ab434', }, }, { 'url': 'http://tubitv.com/tv-shows/321886/s01_e01_on_nom_stories', 'only_matching': True, }, { 'url': 'http://tubitv.com/movies/383676/tracker', 'only_matching': True, }] def _login(self): username, password = self._get_login_info() if username is None: return self.report_login() form_data = { 'username': username, 'password': password, } payload = urlencode_postdata(form_data) request = sanitized_Request(self._LOGIN_URL, payload) request.add_header('Content-Type', 'application/x-www-form-urlencoded') login_page = self._download_webpage( request, None, False, 'Wrong login info') if not re.search(r'id="tubi-logout"', login_page): raise ExtractorError( 'Login failed (invalid username/password)', expected=True) def _real_initialize(self): self._login() def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( 'http://tubitv.com/oz/videos/%s/content' % video_id, video_id) title = video_data['title'] formats = self._extract_m3u8_formats( self._proto_relative_url(video_data['url']), video_id, 'mp4', 'm3u8_native') self._sort_formats(formats) thumbnails = [] for thumbnail_url in video_data.get('thumbnails', []): if not thumbnail_url: continue thumbnails.append({ 'url': self._proto_relative_url(thumbnail_url), }) subtitles = {} for sub in video_data.get('subtitles', []): sub_url = sub.get('url') if not sub_url: continue subtitles.setdefault(sub.get('lang', 'English'), []).append({ 'url': self._proto_relative_url(sub_url), }) return { 'id': video_id, 'title': title, 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, 'description': video_data.get('description'), 'duration': int_or_none(video_data.get('duration')), 'uploader_id': video_data.get('publisher_id'), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tudou.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class TudouPlaylistIE(InfoExtractor): IE_NAME = 'tudou:playlist' _VALID_URL = r'https?://(?:www\.)?tudou\.com/listplay/(?P<id>[\w-]{11})\.html' _TESTS = [{ 'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo.html', 'info_dict': { 'id': 'zzdE77v6Mmo', }, 'playlist_mincount': 209, }] def _real_extract(self, url): playlist_id = self._match_id(url) playlist_data = self._download_json( 'http://www.tudou.com/tvp/plist.action?lcode=%s' % playlist_id, playlist_id) entries = [self.url_result( 'http://www.tudou.com/programs/view/%s' % item['icode'], 'Tudou', item['icode'], item['kw']) for item in playlist_data['items']] return self.playlist_result(entries, playlist_id) class TudouAlbumIE(InfoExtractor): IE_NAME = 'tudou:album' _VALID_URL = r'https?://(?:www\.)?tudou\.com/album(?:cover|play)/(?P<id>[\w-]{11})' _TESTS = [{ 'url': 'http://www.tudou.com/albumplay/v5qckFJvNJg.html', 'info_dict': { 'id': 'v5qckFJvNJg', }, 'playlist_mincount': 45, }] def _real_extract(self, url): album_id = self._match_id(url) album_data = self._download_json( 'http://www.tudou.com/tvp/alist.action?acode=%s' % album_id, album_id) entries = [self.url_result( 'http://www.tudou.com/programs/view/%s' % item['icode'], 'Tudou', item['icode'], item['kw']) for item in album_data['items']] return self.playlist_result(entries, album_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tumblr.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, urlencode_postdata ) class TumblrIE(InfoExtractor): _VALID_URL = r'https?://(?P<blog_name>[^/?#&]+)\.tumblr\.com/(?:post|video)/(?P<id>[0-9]+)(?:$|[/?#])' _NETRC_MACHINE = 'tumblr' _LOGIN_URL = 'https://www.tumblr.com/login' _TESTS = [{ 'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes', 'md5': '479bb068e5b16462f5176a6828829767', 'info_dict': { 'id': '54196191430', 'ext': 'mp4', 'title': 'tatiana maslany news, Orphan Black || DVD extra - behind the scenes ↳...', 'description': 'md5:37db8211e40b50c7c44e95da14f630b7', 'thumbnail': r're:http://.*\.jpg', } }, { 'url': 'http://5sostrum.tumblr.com/post/90208453769/yall-forgetting-the-greatest-keek-of-them-all', 'md5': 'bf348ef8c0ef84fbf1cbd6fa6e000359', 'info_dict': { 'id': '90208453769', 'ext': 'mp4', 'title': '5SOS STRUM ;]', 'description': 'md5:dba62ac8639482759c8eb10ce474586a', 'thumbnail': r're:http://.*\.jpg', } }, { 'url': 'http://hdvideotest.tumblr.com/post/130323439814/test-description-for-my-hd-video', 'md5': '7ae503065ad150122dc3089f8cf1546c', 'info_dict': { 'id': '130323439814', 'ext': 'mp4', 'title': 'HD Video Testing \u2014 Test description for my HD video', 'description': 'md5:97cc3ab5fcd27ee4af6356701541319c', 'thumbnail': r're:http://.*\.jpg', }, 'params': { 'format': 'hd', }, }, { 'url': 'http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching', 'md5': 'de07e5211d60d4f3a2c3df757ea9f6ab', 'info_dict': { 'id': 'Wmur', 'ext': 'mp4', 'title': 'naked smoking & stretching', 'upload_date': '20150506', 'timestamp': 1430931613, 'age_limit': 18, 'uploader_id': '1638622', 'uploader': 'naked-yogi', }, 'add_ie': ['Vidme'], }, { 'url': 'http://camdamage.tumblr.com/post/98846056295/', 'md5': 'a9e0c8371ea1ca306d6554e3fecf50b6', 'info_dict': { 'id': '105463834', 'ext': 'mp4', 'title': 'Cam Damage-HD 720p', 'uploader': 'John Moyer', 'uploader_id': 'user32021558', }, 'add_ie': ['Vimeo'], }, { 'url': 'http://sutiblr.tumblr.com/post/139638707273', 'md5': '2dd184b3669e049ba40563a7d423f95c', 'info_dict': { 'id': 'ir7qBEIKqvq', 'ext': 'mp4', 'title': 'Vine by sutiblr', 'alt_title': 'Vine by sutiblr', 'uploader': 'sutiblr', 'uploader_id': '1198993975374495744', 'upload_date': '20160220', 'like_count': int, 'comment_count': int, 'repost_count': int, }, 'add_ie': ['Vine'], }, { 'url': 'http://vitasidorkina.tumblr.com/post/134652425014/joskriver-victoriassecret-invisibility-or', 'md5': '01c12ceb82cbf6b2fe0703aa56b3ad72', 'info_dict': { 'id': '-7LnUPGlSo', 'ext': 'mp4', 'title': 'Video by victoriassecret', 'description': 'Invisibility or flight…which superpower would YOU choose? #VSFashionShow #ThisOrThat', 'uploader_id': 'victoriassecret', 'thumbnail': r're:^https?://.*\.jpg' }, 'add_ie': ['Instagram'], }] def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading login page') login_form = self._hidden_inputs(login_page) login_form.update({ 'user[email]': username, 'user[password]': password }) response, urlh = self._download_webpage_handle( self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(login_form), headers={ 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': self._LOGIN_URL, }) # Successful login if '/dashboard' in urlh.geturl(): return login_errors = self._parse_json( self._search_regex( r'RegistrationForm\.errors\s*=\s*(\[.+?\])\s*;', response, 'login errors', default='[]'), None, fatal=False) if login_errors: raise ExtractorError( 'Unable to login: %s' % login_errors[0], expected=True) self.report_warning('Login has probably failed') def _real_extract(self, url): m_url = re.match(self._VALID_URL, url) video_id = m_url.group('id') blog = m_url.group('blog_name') url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id) webpage, urlh = self._download_webpage_handle(url, video_id) redirect_url = compat_str(urlh.geturl()) if 'tumblr.com/safe-mode' in redirect_url or redirect_url.startswith('/safe-mode'): raise ExtractorError( 'This Tumblr may contain sensitive media. ' 'Disable safe mode in your account settings ' 'at https://www.tumblr.com/settings/account#safe_mode', expected=True) iframe_url = self._search_regex( r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'', webpage, 'iframe url', default=None) if iframe_url is None: return self.url_result(redirect_url, 'Generic') iframe = self._download_webpage(iframe_url, video_id, 'Downloading iframe page') duration = None sources = [] sd_url = self._search_regex( r'<source[^>]+src=(["\'])(?P<url>.+?)\1', iframe, 'sd video url', default=None, group='url') if sd_url: sources.append((sd_url, 'sd')) options = self._parse_json( self._search_regex( r'data-crt-options=(["\'])(?P<options>.+?)\1', iframe, 'hd video url', default='', group='options'), video_id, fatal=False) if options: duration = int_or_none(options.get('duration')) hd_url = options.get('hdUrl') if hd_url: sources.append((hd_url, 'hd')) formats = [{ 'url': video_url, 'ext': 'mp4', 'format_id': format_id, 'height': int_or_none(self._search_regex( r'/(\d{3,4})$', video_url, 'height', default=None)), 'quality': quality, } for quality, (video_url, format_id) in enumerate(sources)] self._sort_formats(formats) # The only place where you can get a title, it's not complete, # but searching in other places doesn't work for all videos video_title = self._html_search_regex( r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>', webpage, 'title') return { 'id': video_id, 'title': video_title, 'description': self._og_search_description(webpage, default=None), 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'duration': duration, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tunein.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError from ..compat import compat_urlparse class TuneInBaseIE(InfoExtractor): _API_BASE_URL = 'http://tunein.com/tuner/tune/' @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+src=["\'](?P<url>(?:https?://)?tunein\.com/embed/player/[pst]\d+)', webpage) def _real_extract(self, url): content_id = self._match_id(url) content_info = self._download_json( self._API_BASE_URL + self._API_URL_QUERY % content_id, content_id, note='Downloading JSON metadata') title = content_info['Title'] thumbnail = content_info.get('Logo') location = content_info.get('Location') streams_url = content_info.get('StreamUrl') if not streams_url: raise ExtractorError('No downloadable streams found', expected=True) if not streams_url.startswith('http://'): streams_url = compat_urlparse.urljoin(url, streams_url) streams = self._download_json( streams_url, content_id, note='Downloading stream data', transform_source=lambda s: re.sub(r'^\s*\((.*)\);\s*$', r'\1', s))['Streams'] is_live = None formats = [] for stream in streams: if stream.get('Type') == 'Live': is_live = True reliability = stream.get('Reliability') format_note = ( 'Reliability: %d%%' % reliability if reliability is not None else None) formats.append({ 'preference': ( 0 if reliability is None or reliability > 90 else 1), 'abr': stream.get('Bandwidth'), 'ext': stream.get('MediaType').lower(), 'acodec': stream.get('MediaType'), 'vcodec': 'none', 'url': stream.get('Url'), 'source_preference': reliability, 'format_note': format_note, }) self._sort_formats(formats) return { 'id': content_id, 'title': self._live_title(title) if is_live else title, 'formats': formats, 'thumbnail': thumbnail, 'location': location, 'is_live': is_live, } class TuneInClipIE(TuneInBaseIE): IE_NAME = 'tunein:clip' _VALID_URL = r'https?://(?:www\.)?tunein\.com/station/.*?audioClipId\=(?P<id>\d+)' _API_URL_QUERY = '?tuneType=AudioClip&audioclipId=%s' _TESTS = [{ 'url': 'http://tunein.com/station/?stationId=246119&audioClipId=816', 'md5': '99f00d772db70efc804385c6b47f4e77', 'info_dict': { 'id': '816', 'title': '32m', 'ext': 'mp3', }, }] class TuneInStationIE(TuneInBaseIE): IE_NAME = 'tunein:station' _VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:radio/.*?-s|station/.*?StationId=|embed/player/s)(?P<id>\d+)' _API_URL_QUERY = '?tuneType=Station&stationId=%s' @classmethod def suitable(cls, url): return False if TuneInClipIE.suitable(url) else super(TuneInStationIE, cls).suitable(url) _TESTS = [{ 'url': 'http://tunein.com/radio/Jazz24-885-s34682/', 'info_dict': { 'id': '34682', 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2', 'ext': 'mp3', 'location': 'Tacoma, WA', }, 'params': { 'skip_download': True, # live stream }, }, { 'url': 'http://tunein.com/embed/player/s6404/', 'only_matching': True, }] class TuneInProgramIE(TuneInBaseIE): IE_NAME = 'tunein:program' _VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:radio/.*?-p|program/.*?ProgramId=|embed/player/p)(?P<id>\d+)' _API_URL_QUERY = '?tuneType=Program&programId=%s' _TESTS = [{ 'url': 'http://tunein.com/radio/Jazz-24-p2506/', 'info_dict': { 'id': '2506', 'title': 'Jazz 24 on 91.3 WUKY-HD3', 'ext': 'mp3', 'location': 'Lexington, KY', }, 'params': { 'skip_download': True, # live stream }, }, { 'url': 'http://tunein.com/embed/player/p191660/', 'only_matching': True, }] class TuneInTopicIE(TuneInBaseIE): IE_NAME = 'tunein:topic' _VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:topic/.*?TopicId=|embed/player/t)(?P<id>\d+)' _API_URL_QUERY = '?tuneType=Topic&topicId=%s' _TESTS = [{ 'url': 'http://tunein.com/topic/?TopicId=101830576', 'md5': 'c31a39e6f988d188252eae7af0ef09c9', 'info_dict': { 'id': '101830576', 'title': 'Votez pour moi du 29 octobre 2015 (29/10/15)', 'ext': 'mp3', 'location': 'Belgium', }, }, { 'url': 'http://tunein.com/embed/player/t101830576/', 'only_matching': True, }] class TuneInShortenerIE(InfoExtractor): IE_NAME = 'tunein:shortener' IE_DESC = False # Do not list _VALID_URL = r'https?://tun\.in/(?P<id>[A-Za-z0-9]+)' _TEST = { # test redirection 'url': 'http://tun.in/ser7s', 'info_dict': { 'id': '34682', 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2', 'ext': 'mp3', 'location': 'Tacoma, WA', }, 'params': { 'skip_download': True, # live stream }, } def _real_extract(self, url): redirect_id = self._match_id(url) # The server doesn't support HEAD requests urlh = self._request_webpage( url, redirect_id, note='Downloading redirect page') url = urlh.geturl() self.to_screen('Following redirect: %s' % url) return self.url_result(url)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tunepk.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, try_get, unified_timestamp, ) class TunePkIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?: (?:www\.)?tune\.pk/(?:video/|player/embed_player.php?.*?\bvid=)| embed\.tune\.pk/play/ ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'https://tune.pk/video/6919541/maudie-2017-international-trailer-1-ft-ethan-hawke-sally-hawkins', 'md5': '0c537163b7f6f97da3c5dd1e3ef6dd55', 'info_dict': { 'id': '6919541', 'ext': 'mp4', 'title': 'Maudie (2017) | International Trailer # 1 ft Ethan Hawke, Sally Hawkins', 'description': 'md5:eb5a04114fafef5cec90799a93a2d09c', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1487327564, 'upload_date': '20170217', 'uploader': 'Movie Trailers', 'duration': 107, 'view_count': int, } }, { 'url': 'https://tune.pk/player/embed_player.php?vid=6919541&folder=2017/02/17/&width=600&height=350&autoplay=no', 'only_matching': True, }, { 'url': 'https://embed.tune.pk/play/6919541?autoplay=no&ssl=yes&inline=true', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://tune.pk/video/%s' % video_id, video_id) details = self._parse_json( self._search_regex( r'new\s+TunePlayer\(({.+?})\)\s*;\s*\n', webpage, 'tune player'), video_id)['details'] video = details['video'] title = video.get('title') or self._og_search_title( webpage, default=None) or self._html_search_meta( 'title', webpage, 'title', fatal=True) formats = self._parse_jwplayer_formats( details['player']['sources'], video_id) self._sort_formats(formats) description = self._og_search_description( webpage, default=None) or self._html_search_meta( 'description', webpage, 'description') thumbnail = video.get('thumb') or self._og_search_thumbnail( webpage, default=None) or self._html_search_meta( 'thumbnail', webpage, 'thumbnail') timestamp = unified_timestamp(video.get('date_added')) uploader = try_get( video, lambda x: x['uploader']['name'], compat_str) or self._html_search_meta('author', webpage, 'author') duration = int_or_none(video.get('duration')) view_count = int_or_none(video.get('views')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'uploader': uploader, 'duration': duration, 'view_count': view_count, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/turbo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, qualities, xpath_text, ) class TurboIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?turbo\.fr/videos-voiture/(?P<id>[0-9]+)-' _API_URL = 'http://www.turbo.fr/api/tv/xml.php?player_generique=player_generique&id={0:}' _TEST = { 'url': 'http://www.turbo.fr/videos-voiture/454443-turbo-du-07-09-2014-renault-twingo-3-bentley-continental-gt-speed-ces-guide-achat-dacia.html', 'md5': '33f4b91099b36b5d5a91f84b5bcba600', 'info_dict': { 'id': '454443', 'ext': 'mp4', 'duration': 3715, 'title': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ', 'description': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia...', 'thumbnail': r're:^https?://.*\.jpg$', } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) playlist = self._download_xml(self._API_URL.format(video_id), video_id) item = playlist.find('./channel/item') if item is None: raise ExtractorError('Playlist item was not found', expected=True) title = xpath_text(item, './title', 'title') duration = int_or_none(xpath_text(item, './durate', 'duration')) thumbnail = xpath_text(item, './visuel_clip', 'thumbnail') description = self._html_search_meta('description', webpage) formats = [] get_quality = qualities(['3g', 'sd', 'hq']) for child in item: m = re.search(r'url_video_(?P<quality>.+)', child.tag) if m: quality = compat_str(m.group('quality')) formats.append({ 'format_id': quality, 'url': child.text, 'quality': get_quality(quality), }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'duration': duration, 'thumbnail': thumbnail, 'description': description, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/turner.py
# coding: utf-8 from __future__ import unicode_literals import re from .adobepass import AdobePassIE from ..compat import compat_str from ..utils import ( xpath_text, int_or_none, determine_ext, float_or_none, parse_duration, xpath_attr, update_url_query, ExtractorError, strip_or_none, url_or_none, ) class TurnerBaseIE(AdobePassIE): _AKAMAI_SPE_TOKEN_CACHE = {} def _extract_timestamp(self, video_data): return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts')) def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data, custom_tokenizer_query=None): secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*' token = self._AKAMAI_SPE_TOKEN_CACHE.get(secure_path) if not token: query = { 'path': secure_path, } if custom_tokenizer_query: query.update(custom_tokenizer_query) else: query['videoId'] = content_id if ap_data.get('auth_required'): query['accessToken'] = self._extract_mvpd_auth(ap_data['url'], content_id, ap_data['site_name'], ap_data['site_name']) auth = self._download_xml( tokenizer_src, content_id, query=query) error_msg = xpath_text(auth, 'error/msg') if error_msg: raise ExtractorError(error_msg, expected=True) token = xpath_text(auth, 'token') if not token: return video_url self._AKAMAI_SPE_TOKEN_CACHE[secure_path] = token return video_url + '?hdnea=' + token def _extract_cvp_info(self, data_src, video_id, path_data={}, ap_data={}): video_data = self._download_xml(data_src, video_id) video_id = video_data.attrib['id'] title = xpath_text(video_data, 'headline', fatal=True) content_id = xpath_text(video_data, 'contentId') or video_id # rtmp_src = xpath_text(video_data, 'akamai/src') # if rtmp_src: # splited_rtmp_src = rtmp_src.split(',') # if len(splited_rtmp_src) == 2: # rtmp_src = splited_rtmp_src[1] # aifp = xpath_text(video_data, 'akamai/aifp', default='') urls = [] formats = [] rex = re.compile( r'(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?') # Possible formats locations: files/file, files/groupFiles/files # and maybe others for video_file in video_data.findall('.//file'): video_url = video_file.text.strip() if not video_url: continue ext = determine_ext(video_url) if video_url.startswith('/mp4:protected/'): continue # TODO Correct extraction for these files # protected_path_data = path_data.get('protected') # if not protected_path_data or not rtmp_src: # continue # protected_path = self._search_regex( # r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path') # auth = self._download_webpage( # protected_path_data['tokenizer_src'], query={ # 'path': protected_path, # 'videoId': content_id, # 'aifp': aifp, # }) # token = xpath_text(auth, 'token') # if not token: # continue # video_url = rtmp_src + video_url + '?' + token elif video_url.startswith('/secure/'): secure_path_data = path_data.get('secure') if not secure_path_data: continue video_url = self._add_akamai_spe_token( secure_path_data['tokenizer_src'], secure_path_data['media_src'] + video_url, content_id, ap_data) elif not re.match('https?://', video_url): base_path_data = path_data.get(ext, path_data.get('default', {})) media_src = base_path_data.get('media_src') if not media_src: continue video_url = media_src + video_url if video_url in urls: continue urls.append(video_url) format_id = video_file.get('bitrate') if ext == 'smil': formats.extend(self._extract_smil_formats( video_url, video_id, fatal=False)) elif ext == 'm3u8': m3u8_formats = self._extract_m3u8_formats( video_url, video_id, 'mp4', m3u8_id=format_id or 'hls', fatal=False) if '/secure/' in video_url and '?hdnea=' in video_url: for f in m3u8_formats: f['_seekable'] = False formats.extend(m3u8_formats) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( update_url_query(video_url, {'hdcore': '3.7.0'}), video_id, f4m_id=format_id or 'hds', fatal=False)) else: f = { 'format_id': format_id, 'url': video_url, 'ext': ext, } mobj = rex.search(format_id + video_url) if mobj: f.update({ 'width': int(mobj.group('width')), 'height': int(mobj.group('height')), 'tbr': int_or_none(mobj.group('bitrate')), }) elif isinstance(format_id, compat_str): if format_id.isdigit(): f['tbr'] = int(format_id) else: mobj = re.match(r'ios_(audio|[0-9]+)$', format_id) if mobj: if mobj.group(1) == 'audio': f.update({ 'vcodec': 'none', 'ext': 'm4a', }) else: f['tbr'] = int(mobj.group(1)) formats.append(f) self._sort_formats(formats) subtitles = {} for source in video_data.findall('closedCaptions/source'): for track in source.findall('track'): track_url = url_or_none(track.get('url')) if not track_url or track_url.endswith('/big'): continue lang = track.get('lang') or track.get('label') or 'en' subtitles.setdefault(lang, []).append({ 'url': track_url, 'ext': { 'scc': 'scc', 'webvtt': 'vtt', 'smptett': 'tt', }.get(source.get('format')) }) thumbnails = [{ 'id': image.get('cut'), 'url': image.text, 'width': int_or_none(image.get('width')), 'height': int_or_none(image.get('height')), } for image in video_data.findall('images/image')] is_live = xpath_text(video_data, 'isLive') == 'true' return { 'id': video_id, 'title': self._live_title(title) if is_live else title, 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, 'thumbnail': xpath_text(video_data, 'poster'), 'description': strip_or_none(xpath_text(video_data, 'description')), 'duration': parse_duration(xpath_text(video_data, 'length') or xpath_text(video_data, 'trt')), 'timestamp': self._extract_timestamp(video_data), 'upload_date': xpath_attr(video_data, 'metas', 'version'), 'series': xpath_text(video_data, 'showTitle'), 'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')), 'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')), 'is_live': is_live, } def _extract_ngtv_info(self, media_id, tokenizer_query, ap_data=None): streams_data = self._download_json( 'http://medium.ngtv.io/media/%s/tv' % media_id, media_id)['media']['tv'] duration = None chapters = [] formats = [] for supported_type in ('unprotected', 'bulkaes'): stream_data = streams_data.get(supported_type, {}) m3u8_url = stream_data.get('secureUrl') or stream_data.get('url') if not m3u8_url: continue if stream_data.get('playlistProtection') == 'spe': m3u8_url = self._add_akamai_spe_token( 'http://token.ngtv.io/token/token_spe', m3u8_url, media_id, ap_data or {}, tokenizer_query) formats.extend(self._extract_m3u8_formats( m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False)) duration = float_or_none(stream_data.get('totalRuntime')) if not chapters: for chapter in stream_data.get('contentSegments', []): start_time = float_or_none(chapter.get('start')) chapter_duration = float_or_none(chapter.get('duration')) if start_time is None or chapter_duration is None: continue chapters.append({ 'start_time': start_time, 'end_time': start_time + chapter_duration, }) self._sort_formats(formats) return { 'formats': formats, 'chapters': chapters, 'duration': duration, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tv2.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, float_or_none, js_to_json, parse_iso8601, remove_end, try_get, ) class TV2IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tv2\.no/v/(?P<id>\d+)' _TEST = { 'url': 'http://www.tv2.no/v/916509/', 'info_dict': { 'id': '916509', 'ext': 'mp4', 'title': 'Se Frode Gryttens hyllest av Steven Gerrard', 'description': 'TV 2 Sportens huspoet tar avskjed med Liverpools kaptein Steven Gerrard.', 'timestamp': 1431715610, 'upload_date': '20150515', 'duration': 156.967, 'view_count': int, 'categories': list, }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) formats = [] format_urls = [] for protocol in ('HDS', 'HLS'): data = self._download_json( 'http://sumo.tv2.no/api/web/asset/%s/play.json?protocol=%s&videoFormat=SMIL+ISMUSP' % (video_id, protocol), video_id, 'Downloading play JSON')['playback'] items = try_get(data, lambda x: x['items']['item']) if not items: continue if not isinstance(items, list): items = [items] for item in items: if not isinstance(item, dict): continue video_url = item.get('url') if not video_url or video_url in format_urls: continue format_id = '%s-%s' % (protocol.lower(), item.get('mediaFormat')) if not self._is_valid_url(video_url, video_id, format_id): continue format_urls.append(video_url) ext = determine_ext(video_url) if ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id=format_id, fatal=False)) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False)) elif ext == 'ism' or video_url.endswith('.ism/Manifest'): pass else: formats.append({ 'url': video_url, 'format_id': format_id, 'tbr': int_or_none(item.get('bitrate')), 'filesize': int_or_none(item.get('fileSize')), }) self._sort_formats(formats) asset = self._download_json( 'http://sumo.tv2.no/api/web/asset/%s.json' % video_id, video_id, 'Downloading metadata JSON')['asset'] title = asset['title'] description = asset.get('description') timestamp = parse_iso8601(asset.get('createTime')) duration = float_or_none(asset.get('accurateDuration') or asset.get('duration')) view_count = int_or_none(asset.get('views')) categories = asset.get('keywords', '').split(',') thumbnails = [{ 'id': thumbnail.get('@type'), 'url': thumbnail.get('url'), } for _, thumbnail in asset.get('imageVersions', {}).items()] return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnails': thumbnails, 'timestamp': timestamp, 'duration': duration, 'view_count': view_count, 'categories': categories, 'formats': formats, } class TV2ArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tv2\.no/(?:a|\d{4}/\d{2}/\d{2}(/[^/]+)+)/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.tv2.no/2015/05/16/nyheter/alesund/krim/pingvin/6930542', 'info_dict': { 'id': '6930542', 'title': 'Russen hetses etter pingvintyveri - innrømmer å ha åpnet luken på buret', 'description': 'md5:339573779d3eea3542ffe12006190954', }, 'playlist_count': 2, }, { 'url': 'http://www.tv2.no/a/6930542', 'only_matching': True, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) # Old embed pattern (looks unused nowadays) assets = re.findall(r'data-assetid=["\'](\d+)', webpage) if not assets: # New embed pattern for v in re.findall(r'TV2ContentboxVideo\(({.+?})\)', webpage): video = self._parse_json( v, playlist_id, transform_source=js_to_json, fatal=False) if not video: continue asset = video.get('assetId') if asset: assets.append(asset) entries = [ self.url_result('http://www.tv2.no/v/%s' % asset_id, 'TV2') for asset_id in assets] title = remove_end(self._og_search_title(webpage), ' - TV2.no') description = remove_end(self._og_search_description(webpage), ' - TV2.no') return self.playlist_result(entries, playlist_id, title, description)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tv2dk.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import extract_attributes class TV2DKIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:www\.)? (?: tvsyd| tv2ostjylland| tvmidtvest| tv2fyn| tv2east| tv2lorry| tv2nord )\.dk/ (:[^/]+/)* (?P<id>[^/?\#&]+) ''' _TESTS = [{ 'url': 'https://www.tvsyd.dk/nyheder/28-10-2019/1930/1930-28-okt-2019?autoplay=1#player', 'info_dict': { 'id': '0_52jmwa0p', 'ext': 'mp4', 'title': '19:30 - 28. okt. 2019', 'timestamp': 1572290248, 'upload_date': '20191028', 'uploader_id': 'tvsyd', 'duration': 1347, 'view_count': int, }, 'params': { 'skip_download': True, }, 'add_ie': ['Kaltura'], }, { 'url': 'https://www.tv2ostjylland.dk/artikel/minister-gaar-ind-i-sag-om-diabetes-teknologi', 'only_matching': True, }, { 'url': 'https://www.tv2ostjylland.dk/nyheder/28-10-2019/22/2200-nyhederne-mandag-d-28-oktober-2019?autoplay=1#player', 'only_matching': True, }, { 'url': 'https://www.tvmidtvest.dk/nyheder/27-10-2019/1930/1930-27-okt-2019', 'only_matching': True, }, { 'url': 'https://www.tv2fyn.dk/artikel/fyn-kan-faa-landets-foerste-fabrik-til-groent-jetbraendstof', 'only_matching': True, }, { 'url': 'https://www.tv2east.dk/artikel/gods-faar-indleveret-tonsvis-af-aebler-100-kilo-aebler-gaar-til-en-aeblebrandy', 'only_matching': True, }, { 'url': 'https://www.tv2lorry.dk/koebenhavn/rasmus-paludan-evakueret-til-egen-demonstration#player', 'only_matching': True, }, { 'url': 'https://www.tv2nord.dk/artikel/dybt-uacceptabelt', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) entries = [] for video_el in re.findall(r'(?s)<[^>]+\bdata-entryid\s*=[^>]*>', webpage): video = extract_attributes(video_el) kaltura_id = video.get('data-entryid') if not kaltura_id: continue partner_id = video.get('data-partnerid') if not partner_id: continue entries.append(self.url_result( 'kaltura:%s:%s' % (partner_id, kaltura_id), 'Kaltura', video_id=kaltura_id)) return self.playlist_result(entries)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tv2hu.py
# encoding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import int_or_none class TV2HuIE(InfoExtractor): IE_NAME = 'tv2.hu' _VALID_URL = r'https?://(?:www\.)?tv2\.hu/(?:[^/]+/)+(?P<id>\d+)_[^/?#]+?\.html' _TESTS = [{ 'url': 'http://tv2.hu/ezek_megorultek/217679_ezek-megorultek---1.-adas-1.-resz.html', 'md5': '585e58e2e090f34603804bb2c48e98d8', 'info_dict': { 'id': '217679', 'ext': 'mp4', 'title': 'Ezek megőrültek! - 1. adás 1. rész', 'upload_date': '20160826', 'thumbnail': r're:^https?://.*\.jpg$' } }, { 'url': 'http://tv2.hu/ezek_megorultek/teljes_adasok/217677_ezek-megorultek---1.-adas-2.-resz.html', 'only_matching': True }, { 'url': 'http://tv2.hu/musoraink/aktiv/aktiv_teljes_adas/217963_aktiv-teljes-adas---2016.08.30..html', 'only_matching': True }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) json_url = self._search_regex( r'jsonUrl\s*=\s*"([^"]+)"', webpage, 'json url') json_data = self._download_json(json_url, video_id) formats = [] for b in ('bitrates', 'backupBitrates'): bitrates = json_data.get(b, {}) m3u8_url = bitrates.get('hls') if m3u8_url: formats.extend(self._extract_wowza_formats( m3u8_url, video_id, skip_protocols=['rtmp', 'rtsp'])) for mp4_url in bitrates.get('mp4', []): height = int_or_none(self._search_regex( r'\.(\d+)p\.mp4', mp4_url, 'height', default=None)) formats.append({ 'format_id': 'http' + ('-%d' % height if height else ''), 'url': mp4_url, 'height': height, 'width': int_or_none(height / 9.0 * 16.0 if height else None), }) self._sort_formats(formats) return { 'id': video_id, 'title': self._og_search_title(webpage).strip(), 'thumbnail': self._og_search_thumbnail(webpage), 'upload_date': self._search_regex( r'/vod/(\d{8})/', json_url, 'upload_date', default=None), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tv4.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, ) class TV4IE(InfoExtractor): IE_DESC = 'tv4.se and tv4play.se' _VALID_URL = r'''(?x)https?://(?:www\.)? (?: tv4\.se/(?:[^/]+)/klipp/(?:.*)-| tv4play\.se/ (?: (?:program|barn)/(?:[^/]+/|(?:[^\?]+)\?video_id=)| iframe/video/| film/| sport/| ) )(?P<id>[0-9]+)''' _GEO_COUNTRIES = ['SE'] _TESTS = [ { 'url': 'http://www.tv4.se/kalla-fakta/klipp/kalla-fakta-5-english-subtitles-2491650', 'md5': 'cb837212f342d77cec06e6dad190e96d', 'info_dict': { 'id': '2491650', 'ext': 'mp4', 'title': 'Kalla Fakta 5 (english subtitles)', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': int, 'upload_date': '20131125', }, }, { 'url': 'http://www.tv4play.se/iframe/video/3054113', 'md5': 'cb837212f342d77cec06e6dad190e96d', 'info_dict': { 'id': '3054113', 'ext': 'mp4', 'title': 'Så här jobbar ficktjuvarna - se avslöjande bilder', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'Unika bilder avslöjar hur turisternas fickor vittjas mitt på Stockholms central. Två experter på ficktjuvarna avslöjar knepen du ska se upp för.', 'timestamp': int, 'upload_date': '20150130', }, }, { 'url': 'http://www.tv4play.se/sport/3060959', 'only_matching': True, }, { 'url': 'http://www.tv4play.se/film/2378136', 'only_matching': True, }, { 'url': 'http://www.tv4play.se/barn/looney-tunes?video_id=3062412', 'only_matching': True, }, { 'url': 'http://www.tv4play.se/program/farang/3922081', 'only_matching': True, } ] def _real_extract(self, url): video_id = self._match_id(url) info = self._download_json( 'https://playback-api.b17g.net/asset/%s' % video_id, video_id, 'Downloading video info JSON', query={ 'service': 'tv4', 'device': 'browser', 'protocol': 'hls,dash', 'drm': 'widevine', })['metadata'] title = info['title'] manifest_url = self._download_json( 'https://playback-api.b17g.net/media/' + video_id, video_id, query={ 'service': 'tv4', 'device': 'browser', 'protocol': 'hls', })['playbackItem']['manifestUrl'] formats = self._extract_m3u8_formats( manifest_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) formats.extend(self._extract_mpd_formats( manifest_url.replace('.m3u8', '.mpd'), video_id, mpd_id='dash', fatal=False)) formats.extend(self._extract_f4m_formats( manifest_url.replace('.m3u8', '.f4m'), video_id, f4m_id='hds', fatal=False)) formats.extend(self._extract_ism_formats( re.sub(r'\.ism/.+?\.m3u8', r'.ism/Manifest', manifest_url), video_id, ism_id='mss', fatal=False)) if not formats and info.get('is_geo_restricted'): self.raise_geo_restricted(countries=self._GEO_COUNTRIES) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, # 'subtitles': subtitles, 'description': info.get('description'), 'timestamp': parse_iso8601(info.get('broadcast_date_time')), 'duration': int_or_none(info.get('duration')), 'thumbnail': info.get('image'), 'is_live': info.get('isLive') is True, 'series': info.get('seriesTitle'), 'season_number': int_or_none(info.get('seasonNumber')), 'episode': info.get('episodeTitle'), 'episode_number': int_or_none(info.get('episodeNumber')), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tv5mondeplus.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, extract_attributes, get_element_by_class, int_or_none, parse_duration, parse_iso8601, ) class TV5MondePlusIE(InfoExtractor): IE_DESC = 'TV5MONDE+' _VALID_URL = r'https?://(?:www\.)?tv5mondeplus\.com/toutes-les-videos/[^/]+/(?P<id>[^/?#]+)' _TEST = { 'url': 'http://www.tv5mondeplus.com/toutes-les-videos/documentaire/tdah-mon-amour-tele-quebec-tdah-mon-amour-ep001-enfants', 'md5': '12130fc199f020673138a83466542ec6', 'info_dict': { 'id': 'tdah-mon-amour-tele-quebec-tdah-mon-amour-ep001-enfants', 'ext': 'mp4', 'title': 'Tdah, mon amour - Enfants', 'description': 'md5:230e3aca23115afcf8006d1bece6df74', 'upload_date': '20170401', 'timestamp': 1491022860, } } _GEO_BYPASS = False def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) if ">Ce programme n'est malheureusement pas disponible pour votre zone géographique.<" in webpage: self.raise_geo_restricted(countries=['FR']) series = get_element_by_class('video-detail__title', webpage) title = episode = get_element_by_class( 'video-detail__subtitle', webpage) or series if series and series != title: title = '%s - %s' % (series, title) vpl_data = extract_attributes(self._search_regex( r'(<[^>]+class="video_player_loader"[^>]+>)', webpage, 'video player loader')) video_files = self._parse_json( vpl_data['data-broadcast'], display_id).get('files', []) formats = [] for video_file in video_files: v_url = video_file.get('url') if not v_url: continue video_format = video_file.get('format') or determine_ext(v_url) if video_format == 'm3u8': formats.extend(self._extract_m3u8_formats( v_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': v_url, 'format_id': video_format, }) self._sort_formats(formats) return { 'id': display_id, 'display_id': display_id, 'title': title, 'description': clean_html(get_element_by_class('video-detail__description', webpage)), 'thumbnail': vpl_data.get('data-image'), 'duration': int_or_none(vpl_data.get('data-duration')) or parse_duration(self._html_search_meta('duration', webpage)), 'timestamp': parse_iso8601(self._html_search_meta('uploadDate', webpage)), 'formats': formats, 'episode': episode, 'series': series, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tva.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( float_or_none, smuggle_url, ) class TVAIE(InfoExtractor): _VALID_URL = r'https?://videos\.tva\.ca/details/_(?P<id>\d+)' _TEST = { 'url': 'https://videos.tva.ca/details/_5596811470001', 'info_dict': { 'id': '5596811470001', 'ext': 'mp4', 'title': 'Un extrait de l\'épisode du dimanche 8 octobre 2017 !', 'uploader_id': '5481942443001', 'upload_date': '20171003', 'timestamp': 1507064617, }, 'params': { # m3u8 download 'skip_download': True, } } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/5481942443001/default_default/index.html?videoId=%s' def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( 'https://videos.tva.ca/proxy/item/_' + video_id, video_id, headers={ 'Accept': 'application/json', }, query={ 'appId': '5955fc5f23eec60006c951f1', }) def get_attribute(key): for attribute in video_data.get('attributes', []): if attribute.get('key') == key: return attribute.get('value') return None return { '_type': 'url_transparent', 'id': video_id, 'title': get_attribute('title'), 'url': smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % video_id, {'geo_countries': ['CA']}), 'description': get_attribute('description'), 'thumbnail': get_attribute('image-background') or get_attribute('image-landscape'), 'duration': float_or_none(get_attribute('video-duration'), 1000), 'ie_key': 'BrightcoveNew', }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tvanouvelles.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from .brightcove import BrightcoveNewIE class TVANouvellesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tvanouvelles\.ca/videos/(?P<id>\d+)' _TEST = { 'url': 'http://www.tvanouvelles.ca/videos/5117035533001', 'info_dict': { 'id': '5117035533001', 'ext': 'mp4', 'title': 'L’industrie du taxi dénonce l’entente entre Québec et Uber: explications', 'description': 'md5:479653b7c8cf115747bf5118066bd8b3', 'uploader_id': '1741764581', 'timestamp': 1473352030, 'upload_date': '20160908', }, 'add_ie': ['BrightcoveNew'], } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1741764581/default_default/index.html?videoId=%s' def _real_extract(self, url): brightcove_id = self._match_id(url) return self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, BrightcoveNewIE.ie_key(), brightcove_id) class TVANouvellesArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tvanouvelles\.ca/(?:[^/]+/)+(?P<id>[^/?#&]+)' _TEST = { 'url': 'http://www.tvanouvelles.ca/2016/11/17/des-policiers-qui-ont-la-meche-un-peu-courte', 'info_dict': { 'id': 'des-policiers-qui-ont-la-meche-un-peu-courte', 'title': 'Des policiers qui ont «la mèche un peu courte»?', 'description': 'md5:92d363c8eb0f0f030de9a4a84a90a3a0', }, 'playlist_mincount': 4, } @classmethod def suitable(cls, url): return False if TVANouvellesIE.suitable(url) else super(TVANouvellesArticleIE, cls).suitable(url) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) entries = [ self.url_result( 'http://www.tvanouvelles.ca/videos/%s' % mobj.group('id'), ie=TVANouvellesIE.ie_key(), video_id=mobj.group('id')) for mobj in re.finditer( r'data-video-id=(["\'])?(?P<id>\d+)', webpage)] title = self._og_search_title(webpage, fatal=False) description = self._og_search_description(webpage) return self.playlist_result(entries, display_id, title, description)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tvc.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, int_or_none, ) class TVCIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tvc\.ru/video/iframe/id/(?P<id>\d+)' _TEST = { 'url': 'http://www.tvc.ru/video/iframe/id/74622/isPlay/false/id_stat/channel/?acc_video_id=/channel/brand/id/17/show/episodes/episode_id/39702', 'md5': 'bbc5ff531d1e90e856f60fc4b3afd708', 'info_dict': { 'id': '74622', 'ext': 'mp4', 'title': 'События. "События". Эфир от 22.05.2015 14:30', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1122, }, } @classmethod def _extract_url(cls, webpage): mobj = re.search( r'<iframe[^>]+?src=(["\'])(?P<url>(?:http:)?//(?:www\.)?tvc\.ru/video/iframe/id/[^"]+)\1', webpage) if mobj: return mobj.group('url') def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'http://www.tvc.ru/video/json/id/%s' % video_id, video_id) formats = [] for info in video.get('path', {}).get('quality', []): video_url = info.get('url') if not video_url: continue format_id = self._search_regex( r'cdnvideo/([^/]+?)(?:-[^/]+?)?/', video_url, 'format id', default=None) formats.append({ 'url': video_url, 'format_id': format_id, 'width': int_or_none(info.get('width')), 'height': int_or_none(info.get('height')), 'tbr': int_or_none(info.get('bitrate')), }) self._sort_formats(formats) return { 'id': video_id, 'title': video['title'], 'thumbnail': video.get('picture'), 'duration': int_or_none(video.get('duration')), 'formats': formats, } class TVCArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tvc\.ru/(?!video/iframe/id/)(?P<id>[^?#]+)' _TESTS = [{ 'url': 'http://www.tvc.ru/channel/brand/id/29/show/episodes/episode_id/39702/', 'info_dict': { 'id': '74622', 'ext': 'mp4', 'title': 'События. "События". Эфир от 22.05.2015 14:30', 'description': 'md5:ad7aa7db22903f983e687b8a3e98c6dd', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1122, }, }, { 'url': 'http://www.tvc.ru/news/show/id/69944', 'info_dict': { 'id': '75399', 'ext': 'mp4', 'title': 'Эксперты: в столице встал вопрос о максимально безопасных остановках', 'description': 'md5:f2098f71e21f309e89f69b525fd9846e', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 278, }, }, { 'url': 'http://www.tvc.ru/channel/brand/id/47/show/episodes#', 'info_dict': { 'id': '2185', 'ext': 'mp4', 'title': 'Ещё не поздно. Эфир от 03.08.2013', 'description': 'md5:51fae9f3f8cfe67abce014e428e5b027', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 3316, }, }] def _real_extract(self, url): webpage = self._download_webpage(url, self._match_id(url)) return { '_type': 'url_transparent', 'ie_key': 'TVC', 'url': self._og_search_video_url(webpage), 'title': clean_html(self._og_search_title(webpage)), 'description': clean_html(self._og_search_description(webpage)), 'thumbnail': self._og_search_thumbnail(webpage), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tvigle.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, parse_age_limit, try_get, url_or_none, ) class TvigleIE(InfoExtractor): IE_NAME = 'tvigle' IE_DESC = 'Интернет-телевидение Tvigle.ru' _VALID_URL = r'https?://(?:www\.)?(?:tvigle\.ru/(?:[^/]+/)+(?P<display_id>[^/]+)/$|cloud\.tvigle\.ru/video/(?P<id>\d+))' _GEO_BYPASS = False _GEO_COUNTRIES = ['RU'] _TESTS = [ { 'url': 'http://www.tvigle.ru/video/sokrat/', 'info_dict': { 'id': '1848932', 'display_id': 'sokrat', 'ext': 'mp4', 'title': 'Сократ', 'description': 'md5:d6b92ffb7217b4b8ebad2e7665253c17', 'duration': 6586, 'age_limit': 12, }, 'skip': 'georestricted', }, { 'url': 'http://www.tvigle.ru/video/vladimir-vysotskii/vedushchii-teleprogrammy-60-minut-ssha-o-vladimire-vysotskom/', 'info_dict': { 'id': '5142516', 'ext': 'flv', 'title': 'Ведущий телепрограммы «60 минут» (США) о Владимире Высоцком', 'description': 'md5:027f7dc872948f14c96d19b4178428a4', 'duration': 186.080, 'age_limit': 0, }, 'skip': 'georestricted', }, { 'url': 'https://cloud.tvigle.ru/video/5267604/', 'only_matching': True, } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') if not video_id: webpage = self._download_webpage(url, display_id) video_id = self._html_search_regex( (r'<div[^>]+class=["\']player["\'][^>]+id=["\'](\d+)', r'cloudId\s*=\s*["\'](\d+)', r'class="video-preview current_playing" id="(\d+)"'), webpage, 'video id') video_data = self._download_json( 'http://cloud.tvigle.ru/api/play/video/%s/' % video_id, display_id) item = video_data['playlist']['items'][0] videos = item.get('videos') error_message = item.get('errorMessage') if not videos and error_message: if item.get('isGeoBlocked') is True: self.raise_geo_restricted( msg=error_message, countries=self._GEO_COUNTRIES) else: raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error_message), expected=True) title = item['title'] description = item.get('description') thumbnail = item.get('thumbnail') duration = float_or_none(item.get('durationMilliseconds'), 1000) age_limit = parse_age_limit(item.get('ageRestrictions')) formats = [] for vcodec, url_or_fmts in item['videos'].items(): if vcodec == 'hls': m3u8_url = url_or_none(url_or_fmts) if not m3u8_url: continue formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif vcodec == 'dash': mpd_url = url_or_none(url_or_fmts) if not mpd_url: continue formats.extend(self._extract_mpd_formats( mpd_url, video_id, mpd_id='dash', fatal=False)) else: if not isinstance(url_or_fmts, dict): continue for format_id, video_url in url_or_fmts.items(): if format_id == 'm3u8': continue video_url = url_or_none(video_url) if not video_url: continue height = self._search_regex( r'^(\d+)[pP]$', format_id, 'height', default=None) filesize = int_or_none(try_get( item, lambda x: x['video_files_size'][vcodec][format_id])) formats.append({ 'url': video_url, 'format_id': '%s-%s' % (vcodec, format_id), 'vcodec': vcodec, 'height': int_or_none(height), 'filesize': filesize, }) self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'age_limit': age_limit, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tvland.py
# coding: utf-8 from __future__ import unicode_literals from .spike import ParamountNetworkIE class TVLandIE(ParamountNetworkIE): IE_NAME = 'tvland.com' _VALID_URL = r'https?://(?:www\.)?tvland\.com/(?:video-clips|(?:full-)?episodes)/(?P<id>[^/?#.]+)' _FEED_URL = 'http://www.tvland.com/feeds/mrss/' _TESTS = [{ # Geo-restricted. Without a proxy metadata are still there. With a # proxy it redirects to http://m.tvland.com/app/ 'url': 'https://www.tvland.com/episodes/s04pzf/everybody-loves-raymond-the-dog-season-1-ep-19', 'info_dict': { 'description': 'md5:84928e7a8ad6649371fbf5da5e1ad75a', 'title': 'The Dog', }, 'playlist_mincount': 5, }, { 'url': 'https://www.tvland.com/video-clips/4n87f2/younger-a-first-look-at-younger-season-6', 'md5': 'e2c6389401cf485df26c79c247b08713', 'info_dict': { 'id': '891f7d3c-5b5b-4753-b879-b7ba1a601757', 'ext': 'mp4', 'title': 'Younger|April 30, 2019|6|NO-EPISODE#|A First Look at Younger Season 6', 'description': 'md5:595ea74578d3a888ae878dfd1c7d4ab2', 'upload_date': '20190430', 'timestamp': 1556658000, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.tvland.com/full-episodes/iu0hz6/younger-a-kiss-is-just-a-kiss-season-3-ep-301', 'only_matching': True, }]
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tvn24.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, NO_DEFAULT, unescapeHTML, ) class TVN24IE(InfoExtractor): _VALID_URL = r'https?://(?:(?:[^/]+)\.)?tvn24(?:bis)?\.pl/(?:[^/]+/)*(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.tvn24.pl/wiadomosci-z-kraju,3/oredzie-artura-andrusa,702428.html', 'md5': 'fbdec753d7bc29d96036808275f2130c', 'info_dict': { 'id': '1584444', 'ext': 'mp4', 'title': '"Święta mają być wesołe, dlatego, ludziska, wszyscy pod jemiołę"', 'description': 'Wyjątkowe orędzie Artura Andrusa, jednego z gości Szkła kontaktowego.', 'thumbnail': 're:https?://.*[.]jpeg', } }, { # different layout 'url': 'https://tvnmeteo.tvn24.pl/magazyny/maja-w-ogrodzie,13/odcinki-online,1,4,1,0/pnacza-ptaki-i-iglaki-odc-691-hgtv-odc-29,1771763.html', 'info_dict': { 'id': '1771763', 'ext': 'mp4', 'title': 'Pnącza, ptaki i iglaki (odc. 691 /HGTV odc. 29)', 'thumbnail': 're:https?://.*', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://fakty.tvn24.pl/ogladaj-online,60/53-konferencja-bezpieczenstwa-w-monachium,716431.html', 'only_matching': True, }, { 'url': 'http://sport.tvn24.pl/pilka-nozna,105/ligue-1-kamil-glik-rozcial-glowe-monaco-tylko-remisuje-z-bastia,716522.html', 'only_matching': True, }, { 'url': 'http://tvn24bis.pl/poranek,146,m/gen-koziej-w-tvn24-bis-wracamy-do-czasow-zimnej-wojny,715660.html', 'only_matching': True, }, { 'url': 'https://www.tvn24.pl/magazyn-tvn24/angie-w-jednej-czwartej-polka-od-szarej-myszki-do-cesarzowej-europy,119,2158', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = self._og_search_title( webpage, default=None) or self._search_regex( r'<h\d+[^>]+class=["\']magazineItemHeader[^>]+>(.+?)</h', webpage, 'title') def extract_json(attr, name, default=NO_DEFAULT, fatal=True): return self._parse_json( self._search_regex( r'\b%s=(["\'])(?P<json>(?!\1).+?)\1' % attr, webpage, name, group='json', default=default, fatal=fatal) or '{}', display_id, transform_source=unescapeHTML, fatal=fatal) quality_data = extract_json('data-quality', 'formats') formats = [] for format_id, url in quality_data.items(): formats.append({ 'url': url, 'format_id': format_id, 'height': int_or_none(format_id.rstrip('p')), }) self._sort_formats(formats) description = self._og_search_description(webpage, default=None) thumbnail = self._og_search_thumbnail( webpage, default=None) or self._html_search_regex( r'\bdata-poster=(["\'])(?P<url>(?!\1).+?)\1', webpage, 'thumbnail', group='url') video_id = None share_params = extract_json( 'data-share-params', 'share params', default=None) if isinstance(share_params, dict): video_id = share_params.get('id') if not video_id: video_id = self._search_regex( r'data-vid-id=["\'](\d+)', webpage, 'video id', default=None) or self._search_regex( r',(\d+)\.html', url, 'video id', default=display_id) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tvnet.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, unescapeHTML, url_or_none, ) class TVNetIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+)\.tvnet\.gov\.vn/[^/]+/(?:\d+/)?(?P<id>\d+)(?:/|$)' _TESTS = [{ # video 'url': 'http://de.tvnet.gov.vn/video/109788/vtv1---bac-tuyet-tai-lao-cai-va-ha-giang/tin-nong-24h', 'md5': 'b4d7abe0252c9b47774760b7519c7558', 'info_dict': { 'id': '109788', 'ext': 'mp4', 'title': 'VTV1 - Bắc tuyết tại Lào Cai và Hà Giang', 'thumbnail': r're:(?i)https?://.*\.(?:jpg|png)', 'is_live': False, 'view_count': int, }, }, { # audio 'url': 'http://vn.tvnet.gov.vn/radio/27017/vov1---ban-tin-chieu-10062018/doi-song-va-xa-hoi', 'md5': 'b5875ce9b0a2eecde029216d0e6db2ae', 'info_dict': { 'id': '27017', 'ext': 'm4a', 'title': 'VOV1 - Bản tin chiều (10/06/2018)', 'thumbnail': r're:(?i)https?://.*\.(?:jpg|png)', 'is_live': False, }, }, { 'url': 'http://us.tvnet.gov.vn/video/118023/129999/ngay-0705', 'info_dict': { 'id': '129999', 'ext': 'mp4', 'title': 'VTV1 - Quốc hội với cử tri (11/06/2018)', 'thumbnail': r're:(?i)https?://.*\.(?:jpg|png)', 'is_live': False, }, 'params': { 'skip_download': True, }, }, { # live stream 'url': 'http://us.tvnet.gov.vn/kenh-truyen-hinh/1011/vtv1', 'info_dict': { 'id': '1011', 'ext': 'mp4', 'title': r're:^VTV1 \| LiveTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'thumbnail': r're:(?i)https?://.*\.(?:jpg|png)', 'is_live': True, }, 'params': { 'skip_download': True, }, }, { # radio live stream 'url': 'http://vn.tvnet.gov.vn/kenh-truyen-hinh/1014', 'info_dict': { 'id': '1014', 'ext': 'm4a', 'title': r're:VOV1 \| LiveTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'thumbnail': r're:(?i)https?://.*\.(?:jpg|png)', 'is_live': True, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://us.tvnet.gov.vn/phim/6136/25510/vtv3---ca-mot-doi-an-oan-tap-1-50/phim-truyen-hinh', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title( webpage, default=None) or self._html_search_meta( 'title', webpage, default=None) or self._search_regex( r'<title>([^<]+)<', webpage, 'title') title = re.sub(r'\s*-\s*TV Net\s*$', '', title) if '/video/' in url or '/radio/' in url: is_live = False elif '/kenh-truyen-hinh/' in url: is_live = True else: is_live = None data_file = unescapeHTML(self._search_regex( r'data-file=(["\'])(?P<url>(?:https?:)?//.+?)\1', webpage, 'data file', group='url')) stream_urls = set() formats = [] for stream in self._download_json(data_file, video_id): if not isinstance(stream, dict): continue stream_url = url_or_none(stream.get('url')) if stream_url in stream_urls or not stream_url: continue stream_urls.add(stream_url) formats.extend(self._extract_m3u8_formats( stream_url, video_id, 'mp4', entry_protocol='m3u8' if is_live else 'm3u8_native', m3u8_id='hls', fatal=False)) self._sort_formats(formats) # better support for radio streams if title.startswith('VOV'): for f in formats: f.update({ 'ext': 'm4a', 'vcodec': 'none', }) thumbnail = self._og_search_thumbnail( webpage, default=None) or unescapeHTML( self._search_regex( r'data-image=(["\'])(?P<url>(?:https?:)?//.+?)\1', webpage, 'thumbnail', default=None, group='url')) if is_live: title = self._live_title(title) view_count = int_or_none(self._search_regex( r'(?s)<div[^>]+\bclass=["\'].*?view-count[^>]+>.*?(\d+).*?</div>', webpage, 'view count', default=None)) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'is_live': is_live, 'view_count': view_count, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tvnoe.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( clean_html, get_element_by_class, js_to_json, ) class TVNoeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tvnoe\.cz/video/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.tvnoe.cz/video/10362', 'md5': 'aee983f279aab96ec45ab6e2abb3c2ca', 'info_dict': { 'id': '10362', 'ext': 'mp4', 'series': 'Noční univerzita', 'title': 'prof. Tomáš Halík, Th.D. - Návrat náboženství a střet civilizací', 'description': 'md5:f337bae384e1a531a52c55ebc50fff41', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) iframe_url = self._search_regex( r'<iframe[^>]+src="([^"]+)"', webpage, 'iframe URL') ifs_page = self._download_webpage(iframe_url, video_id) jwplayer_data = self._find_jwplayer_data( ifs_page, video_id, transform_source=js_to_json) info_dict = self._parse_jwplayer_data( jwplayer_data, video_id, require_title=False, base_url=iframe_url) info_dict.update({ 'id': video_id, 'title': clean_html(get_element_by_class( 'field-name-field-podnazev', webpage)), 'description': clean_html(get_element_by_class( 'field-name-body', webpage)), 'series': clean_html(get_element_by_class('title', webpage)) }) return info_dict
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tvnow.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, parse_iso8601, parse_duration, str_or_none, update_url_query, urljoin, ) class TVNowBaseIE(InfoExtractor): _VIDEO_FIELDS = ( 'id', 'title', 'free', 'geoblocked', 'articleLong', 'articleShort', 'broadcastStartDate', 'isDrm', 'duration', 'season', 'episode', 'manifest.dashclear', 'manifest.hlsclear', 'manifest.smoothclear', 'format.title', 'format.defaultImage169Format', 'format.defaultImage169Logo') def _call_api(self, path, video_id, query): return self._download_json( 'https://api.tvnow.de/v3/' + path, video_id, query=query) def _extract_video(self, info, display_id): video_id = compat_str(info['id']) title = info['title'] paths = [] for manifest_url in (info.get('manifest') or {}).values(): if not manifest_url: continue manifest_url = update_url_query(manifest_url, {'filter': ''}) path = self._search_regex(r'https?://[^/]+/(.+?)\.ism/', manifest_url, 'path') if path in paths: continue paths.append(path) def url_repl(proto, suffix): return re.sub( r'(?:hls|dash|hss)([.-])', proto + r'\1', re.sub( r'\.ism/(?:[^.]*\.(?:m3u8|mpd)|[Mm]anifest)', '.ism/' + suffix, manifest_url)) def make_urls(proto, suffix): urls = [url_repl(proto, suffix)] hd_url = urls[0].replace('/manifest/', '/ngvod/') if hd_url != urls[0]: urls.append(hd_url) return urls for man_url in make_urls('dash', '.mpd'): formats = self._extract_mpd_formats( man_url, video_id, mpd_id='dash', fatal=False) for man_url in make_urls('hss', 'Manifest'): formats.extend(self._extract_ism_formats( man_url, video_id, ism_id='mss', fatal=False)) for man_url in make_urls('hls', '.m3u8'): formats.extend(self._extract_m3u8_formats( man_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) if formats: break else: if info.get('isDrm'): raise ExtractorError( 'Video %s is DRM protected' % video_id, expected=True) if info.get('geoblocked'): raise self.raise_geo_restricted() if not info.get('free', True): raise ExtractorError( 'Video %s is not available for free' % video_id, expected=True) self._sort_formats(formats) description = info.get('articleLong') or info.get('articleShort') timestamp = parse_iso8601(info.get('broadcastStartDate'), ' ') duration = parse_duration(info.get('duration')) f = info.get('format', {}) thumbnails = [{ 'url': 'https://aistvnow-a.akamaihd.net/tvnow/movie/%s' % video_id, }] thumbnail = f.get('defaultImage169Format') or f.get('defaultImage169Logo') if thumbnail: thumbnails.append({ 'url': thumbnail, }) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnails': thumbnails, 'timestamp': timestamp, 'duration': duration, 'series': f.get('title'), 'season_number': int_or_none(info.get('season')), 'episode_number': int_or_none(info.get('episode')), 'episode': title, 'formats': formats, } class TVNowIE(TVNowBaseIE): _VALID_URL = r'''(?x) https?:// (?:www\.)?tvnow\.(?:de|at|ch)/(?P<station>[^/]+)/ (?P<show_id>[^/]+)/ (?!(?:list|jahr)(?:/|$))(?P<id>[^/?\#&]+) ''' @classmethod def suitable(cls, url): return (False if TVNowNewIE.suitable(url) or TVNowSeasonIE.suitable(url) or TVNowAnnualIE.suitable(url) or TVNowShowIE.suitable(url) else super(TVNowIE, cls).suitable(url)) _TESTS = [{ 'url': 'https://www.tvnow.de/rtl2/grip-das-motormagazin/der-neue-porsche-911-gt-3/player', 'info_dict': { 'id': '331082', 'display_id': 'grip-das-motormagazin/der-neue-porsche-911-gt-3', 'ext': 'mp4', 'title': 'Der neue Porsche 911 GT 3', 'description': 'md5:6143220c661f9b0aae73b245e5d898bb', 'timestamp': 1495994400, 'upload_date': '20170528', 'duration': 5283, 'series': 'GRIP - Das Motormagazin', 'season_number': 14, 'episode_number': 405, 'episode': 'Der neue Porsche 911 GT 3', }, }, { # rtl2 'url': 'https://www.tvnow.de/rtl2/armes-deutschland/episode-0008/player', 'only_matching': True, }, { # rtlnitro 'url': 'https://www.tvnow.de/nitro/alarm-fuer-cobra-11-die-autobahnpolizei/auf-eigene-faust-pilot/player', 'only_matching': True, }, { # superrtl 'url': 'https://www.tvnow.de/superrtl/die-lustigsten-schlamassel-der-welt/u-a-ketchup-effekt/player', 'only_matching': True, }, { # ntv 'url': 'https://www.tvnow.de/ntv/startup-news/goetter-in-weiss/player', 'only_matching': True, }, { # vox 'url': 'https://www.tvnow.de/vox/auto-mobil/neues-vom-automobilmarkt-2017-11-19-17-00-00/player', 'only_matching': True, }, { # rtlplus 'url': 'https://www.tvnow.de/rtlplus/op-ruft-dr-bruckner/die-vernaehte-frau/player', 'only_matching': True, }, { 'url': 'https://www.tvnow.de/rtl2/grip-das-motormagazin/der-neue-porsche-911-gt-3', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id = '%s/%s' % mobj.group(2, 3) info = self._call_api( 'movies/' + display_id, display_id, query={ 'fields': ','.join(self._VIDEO_FIELDS), }) return self._extract_video(info, display_id) class TVNowNewIE(InfoExtractor): _VALID_URL = r'''(?x) (?P<base_url>https?:// (?:www\.)?tvnow\.(?:de|at|ch)/ (?:shows|serien))/ (?P<show>[^/]+)-\d+/ [^/]+/ episode-\d+-(?P<episode>[^/?$&]+)-(?P<id>\d+) ''' _TESTS = [{ 'url': 'https://www.tvnow.de/shows/grip-das-motormagazin-1669/2017-05/episode-405-der-neue-porsche-911-gt-3-331082', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) base_url = re.sub(r'(?:shows|serien)', '_', mobj.group('base_url')) show, episode = mobj.group('show', 'episode') return self.url_result( # Rewrite new URLs to the old format and use extraction via old API # at api.tvnow.de as a loophole for bypassing premium content checks '%s/%s/%s' % (base_url, show, episode), ie=TVNowIE.ie_key(), video_id=mobj.group('id')) class TVNowNewBaseIE(InfoExtractor): def _call_api(self, path, video_id, query={}): result = self._download_json( 'https://apigw.tvnow.de/module/' + path, video_id, query=query) error = result.get('error') if error: raise ExtractorError( '%s said: %s' % (self.IE_NAME, error), expected=True) return result r""" TODO: new apigw.tvnow.de based version of TVNowIE. Replace old TVNowIE with it when api.tvnow.de is shut down. This version can't bypass premium checks though. class TVNowIE(TVNowNewBaseIE): _VALID_URL = r'''(?x) https?:// (?:www\.)?tvnow\.(?:de|at|ch)/ (?:shows|serien)/[^/]+/ (?:[^/]+/)+ (?P<display_id>[^/?$&]+)-(?P<id>\d+) ''' _TESTS = [{ # episode with annual navigation 'url': 'https://www.tvnow.de/shows/grip-das-motormagazin-1669/2017-05/episode-405-der-neue-porsche-911-gt-3-331082', 'info_dict': { 'id': '331082', 'display_id': 'grip-das-motormagazin/der-neue-porsche-911-gt-3', 'ext': 'mp4', 'title': 'Der neue Porsche 911 GT 3', 'description': 'md5:6143220c661f9b0aae73b245e5d898bb', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1495994400, 'upload_date': '20170528', 'duration': 5283, 'series': 'GRIP - Das Motormagazin', 'season_number': 14, 'episode_number': 405, 'episode': 'Der neue Porsche 911 GT 3', }, }, { # rtl2, episode with season navigation 'url': 'https://www.tvnow.de/shows/armes-deutschland-11471/staffel-3/episode-14-bernd-steht-seit-der-trennung-von-seiner-frau-allein-da-526124', 'only_matching': True, }, { # rtlnitro 'url': 'https://www.tvnow.de/serien/alarm-fuer-cobra-11-die-autobahnpolizei-1815/staffel-13/episode-5-auf-eigene-faust-pilot-366822', 'only_matching': True, }, { # superrtl 'url': 'https://www.tvnow.de/shows/die-lustigsten-schlamassel-der-welt-1221/staffel-2/episode-14-u-a-ketchup-effekt-364120', 'only_matching': True, }, { # ntv 'url': 'https://www.tvnow.de/shows/startup-news-10674/staffel-2/episode-39-goetter-in-weiss-387630', 'only_matching': True, }, { # vox 'url': 'https://www.tvnow.de/shows/auto-mobil-174/2017-11/episode-46-neues-vom-automobilmarkt-2017-11-19-17-00-00-380072', 'only_matching': True, }, { 'url': 'https://www.tvnow.de/shows/grip-das-motormagazin-1669/2017-05/episode-405-der-neue-porsche-911-gt-3-331082', 'only_matching': True, }] def _extract_video(self, info, url, display_id): config = info['config'] source = config['source'] video_id = compat_str(info.get('id') or source['videoId']) title = source['title'].strip() paths = [] for manifest_url in (info.get('manifest') or {}).values(): if not manifest_url: continue manifest_url = update_url_query(manifest_url, {'filter': ''}) path = self._search_regex(r'https?://[^/]+/(.+?)\.ism/', manifest_url, 'path') if path in paths: continue paths.append(path) def url_repl(proto, suffix): return re.sub( r'(?:hls|dash|hss)([.-])', proto + r'\1', re.sub( r'\.ism/(?:[^.]*\.(?:m3u8|mpd)|[Mm]anifest)', '.ism/' + suffix, manifest_url)) formats = self._extract_mpd_formats( url_repl('dash', '.mpd'), video_id, mpd_id='dash', fatal=False) formats.extend(self._extract_ism_formats( url_repl('hss', 'Manifest'), video_id, ism_id='mss', fatal=False)) formats.extend(self._extract_m3u8_formats( url_repl('hls', '.m3u8'), video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) if formats: break else: if try_get(info, lambda x: x['rights']['isDrm']): raise ExtractorError( 'Video %s is DRM protected' % video_id, expected=True) if try_get(config, lambda x: x['boards']['geoBlocking']['block']): raise self.raise_geo_restricted() if not info.get('free', True): raise ExtractorError( 'Video %s is not available for free' % video_id, expected=True) self._sort_formats(formats) description = source.get('description') thumbnail = url_or_none(source.get('poster')) timestamp = unified_timestamp(source.get('previewStart')) duration = parse_duration(source.get('length')) series = source.get('format') season_number = int_or_none(self._search_regex( r'staffel-(\d+)', url, 'season number', default=None)) episode_number = int_or_none(self._search_regex( r'episode-(\d+)', url, 'episode number', default=None)) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, 'series': series, 'season_number': season_number, 'episode_number': episode_number, 'episode': title, 'formats': formats, } def _real_extract(self, url): display_id, video_id = re.match(self._VALID_URL, url).groups() info = self._call_api('player/' + video_id, video_id) return self._extract_video(info, video_id, display_id) """ class TVNowListBaseIE(TVNowNewBaseIE): _SHOW_VALID_URL = r'''(?x) (?P<base_url> https?:// (?:www\.)?tvnow\.(?:de|at|ch)/(?:shows|serien)/ [^/?#&]+-(?P<show_id>\d+) ) ''' @classmethod def suitable(cls, url): return (False if TVNowNewIE.suitable(url) else super(TVNowListBaseIE, cls).suitable(url)) def _extract_items(self, url, show_id, list_id, query): items = self._call_api( 'teaserrow/format/episode/' + show_id, list_id, query=query)['items'] entries = [] for item in items: if not isinstance(item, dict): continue item_url = urljoin(url, item.get('url')) if not item_url: continue video_id = str_or_none(item.get('id') or item.get('videoId')) item_title = item.get('subheadline') or item.get('text') entries.append(self.url_result( item_url, ie=TVNowNewIE.ie_key(), video_id=video_id, video_title=item_title)) return self.playlist_result(entries, '%s/%s' % (show_id, list_id)) class TVNowSeasonIE(TVNowListBaseIE): _VALID_URL = r'%s/staffel-(?P<id>\d+)' % TVNowListBaseIE._SHOW_VALID_URL _TESTS = [{ 'url': 'https://www.tvnow.de/serien/alarm-fuer-cobra-11-die-autobahnpolizei-1815/staffel-13', 'info_dict': { 'id': '1815/13', }, 'playlist_mincount': 22, }] def _real_extract(self, url): _, show_id, season_id = re.match(self._VALID_URL, url).groups() return self._extract_items( url, show_id, season_id, {'season': season_id}) class TVNowAnnualIE(TVNowListBaseIE): _VALID_URL = r'%s/(?P<year>\d{4})-(?P<month>\d{2})' % TVNowListBaseIE._SHOW_VALID_URL _TESTS = [{ 'url': 'https://www.tvnow.de/shows/grip-das-motormagazin-1669/2017-05', 'info_dict': { 'id': '1669/2017-05', }, 'playlist_mincount': 2, }] def _real_extract(self, url): _, show_id, year, month = re.match(self._VALID_URL, url).groups() return self._extract_items( url, show_id, '%s-%s' % (year, month), { 'year': int(year), 'month': int(month), }) class TVNowShowIE(TVNowListBaseIE): _VALID_URL = TVNowListBaseIE._SHOW_VALID_URL _TESTS = [{ # annual navigationType 'url': 'https://www.tvnow.de/shows/grip-das-motormagazin-1669', 'info_dict': { 'id': '1669', }, 'playlist_mincount': 73, }, { # season navigationType 'url': 'https://www.tvnow.de/shows/armes-deutschland-11471', 'info_dict': { 'id': '11471', }, 'playlist_mincount': 3, }] @classmethod def suitable(cls, url): return (False if TVNowNewIE.suitable(url) or TVNowSeasonIE.suitable(url) or TVNowAnnualIE.suitable(url) else super(TVNowShowIE, cls).suitable(url)) def _real_extract(self, url): base_url, show_id = re.match(self._VALID_URL, url).groups() result = self._call_api( 'teaserrow/format/navigation/' + show_id, show_id) items = result['items'] entries = [] navigation = result.get('navigationType') if navigation == 'annual': for item in items: if not isinstance(item, dict): continue year = int_or_none(item.get('year')) if year is None: continue months = item.get('months') if not isinstance(months, list): continue for month_dict in months: if not isinstance(month_dict, dict) or not month_dict: continue month_number = int_or_none(list(month_dict.keys())[0]) if month_number is None: continue entries.append(self.url_result( '%s/%04d-%02d' % (base_url, year, month_number), ie=TVNowAnnualIE.ie_key())) elif navigation == 'season': for item in items: if not isinstance(item, dict): continue season_number = int_or_none(item.get('season')) if season_number is None: continue entries.append(self.url_result( '%s/staffel-%d' % (base_url, season_number), ie=TVNowSeasonIE.ie_key())) else: raise ExtractorError('Unknown navigationType') return self.playlist_result(entries, show_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tvp.py
# coding: utf-8 from __future__ import unicode_literals import itertools import re from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, ExtractorError, get_element_by_attribute, orderedSet, ) class TVPIE(InfoExtractor): IE_NAME = 'tvp' IE_DESC = 'Telewizja Polska' _VALID_URL = r'https?://[^/]+\.tvp\.(?:pl|info)/(?:video/(?:[^,\s]*,)*|(?:(?!\d+/)[^/]+/)*)(?P<id>\d+)' _TESTS = [{ 'url': 'https://vod.tvp.pl/video/czas-honoru,i-seria-odc-13,194536', 'md5': 'a21eb0aa862f25414430f15fdfb9e76c', 'info_dict': { 'id': '194536', 'ext': 'mp4', 'title': 'Czas honoru, odc. 13 – Władek', 'description': 'md5:437f48b93558370b031740546b696e24', }, }, { 'url': 'http://www.tvp.pl/there-can-be-anything-so-i-shortened-it/17916176', 'md5': 'b0005b542e5b4de643a9690326ab1257', 'info_dict': { 'id': '17916176', 'ext': 'mp4', 'title': 'TVP Gorzów pokaże filmy studentów z podroży dookoła świata', 'description': 'TVP Gorzów pokaże filmy studentów z podroży dookoła świata', }, }, { # page id is not the same as video id(#7799) 'url': 'https://wiadomosci.tvp.pl/33908820/28092017-1930', 'md5': '84cd3c8aec4840046e5ab712416b73d0', 'info_dict': { 'id': '33908820', 'ext': 'mp4', 'title': 'Wiadomości, 28.09.2017, 19:30', 'description': 'Wydanie główne codziennego serwisu informacyjnego.' }, 'skip': 'HTTP Error 404: Not Found', }, { 'url': 'http://vod.tvp.pl/seriale/obyczajowe/na-sygnale/sezon-2-27-/odc-39/17834272', 'only_matching': True, }, { 'url': 'http://wiadomosci.tvp.pl/25169746/24052016-1200', 'only_matching': True, }, { 'url': 'http://krakow.tvp.pl/25511623/25lecie-mck-wyjatkowe-miejsce-na-mapie-krakowa', 'only_matching': True, }, { 'url': 'http://teleexpress.tvp.pl/25522307/wierni-wzieli-udzial-w-procesjach', 'only_matching': True, }, { 'url': 'http://sport.tvp.pl/25522165/krychowiak-uspokaja-w-sprawie-kontuzji-dwa-tygodnie-to-maksimum', 'only_matching': True, }, { 'url': 'http://www.tvp.info/25511919/trwa-rewolucja-wladza-zdecydowala-sie-na-pogwalcenie-konstytucji', 'only_matching': True, }] def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) video_id = self._search_regex([ r'<iframe[^>]+src="[^"]*?object_id=(\d+)', r"object_id\s*:\s*'(\d+)'", r'data-video-id="(\d+)"'], webpage, 'video id', default=page_id) return { '_type': 'url_transparent', 'url': 'tvp:' + video_id, 'description': self._og_search_description( webpage, default=None) or self._html_search_meta( 'description', webpage, default=None), 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'ie_key': 'TVPEmbed', } class TVPEmbedIE(InfoExtractor): IE_NAME = 'tvp:embed' IE_DESC = 'Telewizja Polska' _VALID_URL = r'(?:tvp:|https?://[^/]+\.tvp\.(?:pl|info)/sess/tvplayer\.php\?.*?object_id=)(?P<id>\d+)' _TESTS = [{ 'url': 'tvp:194536', 'md5': 'a21eb0aa862f25414430f15fdfb9e76c', 'info_dict': { 'id': '194536', 'ext': 'mp4', 'title': 'Czas honoru, odc. 13 – Władek', }, }, { # not available 'url': 'http://www.tvp.pl/sess/tvplayer.php?object_id=22670268', 'md5': '8c9cd59d16edabf39331f93bf8a766c7', 'info_dict': { 'id': '22670268', 'ext': 'mp4', 'title': 'Panorama, 07.12.2015, 15:40', }, 'skip': 'Transmisja została zakończona lub materiał niedostępny', }, { 'url': 'tvp:22670268', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'http://www.tvp.pl/sess/tvplayer.php?object_id=%s' % video_id, video_id) error = self._html_search_regex( r'(?s)<p[^>]+\bclass=["\']notAvailable__text["\'][^>]*>(.+?)</p>', webpage, 'error', default=None) or clean_html( get_element_by_attribute('class', 'msg error', webpage)) if error: raise ExtractorError('%s said: %s' % ( self.IE_NAME, clean_html(error)), expected=True) title = self._search_regex( r'name\s*:\s*([\'"])Title\1\s*,\s*value\s*:\s*\1(?P<title>.+?)\1', webpage, 'title', group='title') series_title = self._search_regex( r'name\s*:\s*([\'"])SeriesTitle\1\s*,\s*value\s*:\s*\1(?P<series>.+?)\1', webpage, 'series', group='series', default=None) if series_title: title = '%s, %s' % (series_title, title) thumbnail = self._search_regex( r"poster\s*:\s*'([^']+)'", webpage, 'thumbnail', default=None) video_url = self._search_regex( r'0:{src:([\'"])(?P<url>.*?)\1', webpage, 'formats', group='url', default=None) if not video_url or 'material_niedostepny.mp4' in video_url: video_url = self._download_json( 'http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % video_id, video_id)['video_url'] formats = [] video_url_base = self._search_regex( r'(https?://.+?/video)(?:\.(?:ism|f4m|m3u8)|-\d+\.mp4)', video_url, 'video base url', default=None) if video_url_base: # TODO: <Group> found instead of <AdaptationSet> in MPD manifest. # It's not mentioned in MPEG-DASH standard. Figure that out. # formats.extend(self._extract_mpd_formats( # video_url_base + '.ism/video.mpd', # video_id, mpd_id='dash', fatal=False)) formats.extend(self._extract_ism_formats( video_url_base + '.ism/Manifest', video_id, 'mss', fatal=False)) formats.extend(self._extract_f4m_formats( video_url_base + '.ism/video.f4m', video_id, f4m_id='hds', fatal=False)) m3u8_formats = self._extract_m3u8_formats( video_url_base + '.ism/video.m3u8', video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) self._sort_formats(m3u8_formats) m3u8_formats = list(filter( lambda f: f.get('vcodec') != 'none', m3u8_formats)) formats.extend(m3u8_formats) for i, m3u8_format in enumerate(m3u8_formats, 2): http_url = '%s-%d.mp4' % (video_url_base, i) if self._is_valid_url(http_url, video_id): f = m3u8_format.copy() f.update({ 'url': http_url, 'format_id': f['format_id'].replace('hls', 'http'), 'protocol': 'http', }) formats.append(f) else: formats = [{ 'format_id': 'direct', 'url': video_url, 'ext': determine_ext(video_url, 'mp4'), }] self._sort_formats(formats) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'formats': formats, } class TVPWebsiteIE(InfoExtractor): IE_NAME = 'tvp:series' _VALID_URL = r'https?://vod\.tvp\.pl/website/(?P<display_id>[^,]+),(?P<id>\d+)' _TESTS = [{ # series 'url': 'https://vod.tvp.pl/website/lzy-cennet,38678312/video', 'info_dict': { 'id': '38678312', }, 'playlist_count': 115, }, { # film 'url': 'https://vod.tvp.pl/website/gloria,35139666', 'info_dict': { 'id': '36637049', 'ext': 'mp4', 'title': 'Gloria, Gloria', }, 'params': { 'skip_download': True, }, 'add_ie': ['TVPEmbed'], }, { 'url': 'https://vod.tvp.pl/website/lzy-cennet,38678312', 'only_matching': True, }] def _entries(self, display_id, playlist_id): url = 'https://vod.tvp.pl/website/%s,%s/video' % (display_id, playlist_id) for page_num in itertools.count(1): page = self._download_webpage( url, display_id, 'Downloading page %d' % page_num, query={'page': page_num}) video_ids = orderedSet(re.findall( r'<a[^>]+\bhref=["\']/video/%s,[^,]+,(\d+)' % display_id, page)) if not video_ids: break for video_id in video_ids: yield self.url_result( 'tvp:%s' % video_id, ie=TVPEmbedIE.ie_key(), video_id=video_id) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id, playlist_id = mobj.group('display_id', 'id') return self.playlist_result( self._entries(display_id, playlist_id), playlist_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tvplay.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_str, compat_urlparse, ) from ..utils import ( determine_ext, ExtractorError, int_or_none, parse_iso8601, qualities, smuggle_url, try_get, unsmuggle_url, update_url_query, url_or_none, ) class TVPlayIE(InfoExtractor): IE_NAME = 'mtg' IE_DESC = 'MTG services' _VALID_URL = r'''(?x) (?: mtg:| https?:// (?:www\.)? (?: tvplay(?:\.skaties)?\.lv(?:/parraides)?| (?:tv3play|play\.tv3)\.lt(?:/programos)?| tv3play(?:\.tv3)?\.ee/sisu| (?:tv(?:3|6|8|10)play|viafree)\.se/program| (?:(?:tv3play|viasat4play|tv6play|viafree)\.no|(?:tv3play|viafree)\.dk)/programmer| play\.nova(?:tv)?\.bg/programi ) /(?:[^/]+/)+ ) (?P<id>\d+) ''' _TESTS = [ { 'url': 'http://www.tvplay.lv/parraides/vinas-melo-labak/418113?autostart=true', 'md5': 'a1612fe0849455423ad8718fe049be21', 'info_dict': { 'id': '418113', 'ext': 'mp4', 'title': 'Kādi ir īri? - Viņas melo labāk', 'description': 'Baiba apsmej īrus, kādi tie ir un ko viņi dara.', 'series': 'Viņas melo labāk', 'season': '2.sezona', 'season_number': 2, 'duration': 25, 'timestamp': 1406097056, 'upload_date': '20140723', }, }, { 'url': 'http://play.tv3.lt/programos/moterys-meluoja-geriau/409229?autostart=true', 'info_dict': { 'id': '409229', 'ext': 'flv', 'title': 'Moterys meluoja geriau', 'description': 'md5:9aec0fc68e2cbc992d2a140bd41fa89e', 'series': 'Moterys meluoja geriau', 'episode_number': 47, 'season': '1 sezonas', 'season_number': 1, 'duration': 1330, 'timestamp': 1403769181, 'upload_date': '20140626', }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://www.tv3play.ee/sisu/kodu-keset-linna/238551?autostart=true', 'info_dict': { 'id': '238551', 'ext': 'flv', 'title': 'Kodu keset linna 398537', 'description': 'md5:7df175e3c94db9e47c0d81ffa5d68701', 'duration': 1257, 'timestamp': 1292449761, 'upload_date': '20101215', }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://www.tv3play.se/program/husraddarna/395385?autostart=true', 'info_dict': { 'id': '395385', 'ext': 'mp4', 'title': 'Husräddarna S02E07', 'description': 'md5:f210c6c89f42d4fc39faa551be813777', 'duration': 2574, 'timestamp': 1400596321, 'upload_date': '20140520', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.tv6play.se/program/den-sista-dokusapan/266636?autostart=true', 'info_dict': { 'id': '266636', 'ext': 'mp4', 'title': 'Den sista dokusåpan S01E08', 'description': 'md5:295be39c872520221b933830f660b110', 'duration': 1492, 'timestamp': 1330522854, 'upload_date': '20120229', 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.tv8play.se/program/antikjakten/282756?autostart=true', 'info_dict': { 'id': '282756', 'ext': 'mp4', 'title': 'Antikjakten S01E10', 'description': 'md5:1b201169beabd97e20c5ad0ad67b13b8', 'duration': 2646, 'timestamp': 1348575868, 'upload_date': '20120925', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.tv3play.no/programmer/anna-anka-soker-assistent/230898?autostart=true', 'info_dict': { 'id': '230898', 'ext': 'mp4', 'title': 'Anna Anka søker assistent - Ep. 8', 'description': 'md5:f80916bf5bbe1c5f760d127f8dd71474', 'duration': 2656, 'timestamp': 1277720005, 'upload_date': '20100628', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.viasat4play.no/programmer/budbringerne/21873?autostart=true', 'info_dict': { 'id': '21873', 'ext': 'mp4', 'title': 'Budbringerne program 10', 'description': 'md5:4db78dc4ec8a85bb04fd322a3ee5092d', 'duration': 1297, 'timestamp': 1254205102, 'upload_date': '20090929', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.tv6play.no/programmer/hotelinspektor-alex-polizzi/361883?autostart=true', 'info_dict': { 'id': '361883', 'ext': 'mp4', 'title': 'Hotelinspektør Alex Polizzi - Ep. 10', 'description': 'md5:3ecf808db9ec96c862c8ecb3a7fdaf81', 'duration': 2594, 'timestamp': 1393236292, 'upload_date': '20140224', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://play.novatv.bg/programi/zdravei-bulgariya/624952?autostart=true', 'info_dict': { 'id': '624952', 'ext': 'flv', 'title': 'Здравей, България (12.06.2015 г.) ', 'description': 'md5:99f3700451ac5bb71a260268b8daefd7', 'duration': 8838, 'timestamp': 1434100372, 'upload_date': '20150612', }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'https://play.nova.bg/programi/zdravei-bulgariya/764300?autostart=true', 'only_matching': True, }, { 'url': 'http://tvplay.skaties.lv/parraides/vinas-melo-labak/418113?autostart=true', 'only_matching': True, }, { 'url': 'https://tvplay.skaties.lv/vinas-melo-labak/418113/?autostart=true', 'only_matching': True, }, { # views is null 'url': 'http://tvplay.skaties.lv/parraides/tv3-zinas/760183', 'only_matching': True, }, { 'url': 'http://tv3play.tv3.ee/sisu/kodu-keset-linna/238551?autostart=true', 'only_matching': True, }, { 'url': 'http://www.viafree.se/program/underhallning/i-like-radio-live/sasong-1/676869', 'only_matching': True, }, { 'url': 'mtg:418113', 'only_matching': True, } ] def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) self._initialize_geo_bypass({ 'countries': smuggled_data.get('geo_countries'), }) video_id = self._match_id(url) geo_country = self._search_regex( r'https?://[^/]+\.([a-z]{2})', url, 'geo country', default=None) if geo_country: self._initialize_geo_bypass({'countries': [geo_country.upper()]}) video = self._download_json( 'http://playapi.mtgx.tv/v3/videos/%s' % video_id, video_id, 'Downloading video JSON') title = video['title'] try: streams = self._download_json( 'http://playapi.mtgx.tv/v3/videos/stream/%s' % video_id, video_id, 'Downloading streams JSON') except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: msg = self._parse_json(e.cause.read().decode('utf-8'), video_id) raise ExtractorError(msg['msg'], expected=True) raise quality = qualities(['hls', 'medium', 'high']) formats = [] for format_id, video_url in streams.get('streams', {}).items(): video_url = url_or_none(video_url) if not video_url: continue ext = determine_ext(video_url) if ext == 'f4m': formats.extend(self._extract_f4m_formats( update_url_query(video_url, { 'hdcore': '3.5.0', 'plugin': 'aasp-3.5.0.151.81' }), video_id, f4m_id='hds', fatal=False)) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: fmt = { 'format_id': format_id, 'quality': quality(format_id), 'ext': ext, } if video_url.startswith('rtmp'): if smuggled_data.get('skip_rtmp'): continue m = re.search( r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', video_url) if not m: continue fmt.update({ 'ext': 'flv', 'url': m.group('url'), 'app': m.group('app'), 'play_path': m.group('playpath'), 'preference': -1, }) else: fmt.update({ 'url': video_url, }) formats.append(fmt) if not formats and video.get('is_geo_blocked'): self.raise_geo_restricted( 'This content might not be available in your country due to copyright reasons') self._sort_formats(formats) # TODO: webvtt in m3u8 subtitles = {} sami_path = video.get('sami_path') if sami_path: lang = self._search_regex( r'_([a-z]{2})\.xml', sami_path, 'lang', default=compat_urlparse.urlparse(url).netloc.rsplit('.', 1)[-1]) subtitles[lang] = [{ 'url': sami_path, }] series = video.get('format_title') episode_number = int_or_none(video.get('format_position', {}).get('episode')) season = video.get('_embedded', {}).get('season', {}).get('title') season_number = int_or_none(video.get('format_position', {}).get('season')) return { 'id': video_id, 'title': title, 'description': video.get('description'), 'series': series, 'episode_number': episode_number, 'season': season, 'season_number': season_number, 'duration': int_or_none(video.get('duration')), 'timestamp': parse_iso8601(video.get('created_at')), 'view_count': try_get(video, lambda x: x['views']['total'], int), 'age_limit': int_or_none(video.get('age_limit', 0)), 'formats': formats, 'subtitles': subtitles, } class ViafreeIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:www\.)? viafree\. (?: (?:dk|no)/programmer| se/program ) /(?:[^/]+/)+(?P<id>[^/?#&]+) ''' _TESTS = [{ 'url': 'http://www.viafree.se/program/livsstil/husraddarna/sasong-2/avsnitt-2', 'info_dict': { 'id': '395375', 'ext': 'mp4', 'title': 'Husräddarna S02E02', 'description': 'md5:4db5c933e37db629b5a2f75dfb34829e', 'series': 'Husräddarna', 'season': 'Säsong 2', 'season_number': 2, 'duration': 2576, 'timestamp': 1400596321, 'upload_date': '20140520', }, 'params': { 'skip_download': True, }, 'add_ie': [TVPlayIE.ie_key()], }, { # with relatedClips 'url': 'http://www.viafree.se/program/reality/sommaren-med-youtube-stjarnorna/sasong-1/avsnitt-1', 'info_dict': { 'id': '758770', 'ext': 'mp4', 'title': 'Sommaren med YouTube-stjärnorna S01E01', 'description': 'md5:2bc69dce2c4bb48391e858539bbb0e3f', 'series': 'Sommaren med YouTube-stjärnorna', 'season': 'Säsong 1', 'season_number': 1, 'duration': 1326, 'timestamp': 1470905572, 'upload_date': '20160811', }, 'params': { 'skip_download': True, }, 'add_ie': [TVPlayIE.ie_key()], }, { # Different og:image URL schema 'url': 'http://www.viafree.se/program/reality/sommaren-med-youtube-stjarnorna/sasong-1/avsnitt-2', 'only_matching': True, }, { 'url': 'http://www.viafree.no/programmer/underholdning/det-beste-vorspielet/sesong-2/episode-1', 'only_matching': True, }, { 'url': 'http://www.viafree.dk/programmer/reality/paradise-hotel/saeson-7/episode-5', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if TVPlayIE.suitable(url) else super(ViafreeIE, cls).suitable(url) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data = self._parse_json( self._search_regex( r'(?s)window\.App\s*=\s*({.+?})\s*;\s*</script', webpage, 'data', default='{}'), video_id, transform_source=lambda x: re.sub( r'(?s)function\s+[a-zA-Z_][\da-zA-Z_]*\s*\([^)]*\)\s*{[^}]*}\s*', 'null', x), fatal=False) video_id = None if data: video_id = try_get( data, lambda x: x['context']['dispatcher']['stores'][ 'ContentPageProgramStore']['currentVideo']['id'], compat_str) # Fallback #1 (extract from og:image URL schema) if not video_id: thumbnail = self._og_search_thumbnail(webpage, default=None) if thumbnail: video_id = self._search_regex( # Patterns seen: # http://cdn.playapi.mtgx.tv/imagecache/600x315/cloud/content-images/inbox/765166/a2e95e5f1d735bab9f309fa345cc3f25.jpg # http://cdn.playapi.mtgx.tv/imagecache/600x315/cloud/content-images/seasons/15204/758770/4a5ba509ca8bc043e1ebd1a76131cdf2.jpg r'https?://[^/]+/imagecache/(?:[^/]+/)+(\d{6,})/', thumbnail, 'video id', default=None) # Fallback #2. Extract from raw JSON string. # May extract wrong video id if relatedClips is present. if not video_id: video_id = self._search_regex( r'currentVideo["\']\s*:\s*.+?["\']id["\']\s*:\s*["\'](\d{6,})', webpage, 'video id') return self.url_result( smuggle_url( 'mtg:%s' % video_id, { 'geo_countries': [ compat_urlparse.urlparse(url).netloc.rsplit('.', 1)[-1]], # rtmp host mtgfs.fplive.net for viafree is unresolvable 'skip_rtmp': True, }), ie=TVPlayIE.ie_key(), video_id=video_id) class TVPlayHomeIE(InfoExtractor): _VALID_URL = r'https?://tvplay\.(?:tv3\.lt|skaties\.lv|tv3\.ee)/[^/]+/[^/?#&]+-(?P<id>\d+)' _TESTS = [{ 'url': 'https://tvplay.tv3.lt/aferistai-n-7/aferistai-10047125/', 'info_dict': { 'id': '366367', 'ext': 'mp4', 'title': 'Aferistai', 'description': 'Aferistai. Kalėdinė pasaka.', 'series': 'Aferistai [N-7]', 'season': '1 sezonas', 'season_number': 1, 'duration': 464, 'timestamp': 1394209658, 'upload_date': '20140307', 'age_limit': 18, }, 'params': { 'skip_download': True, }, 'add_ie': [TVPlayIE.ie_key()], }, { 'url': 'https://tvplay.skaties.lv/vinas-melo-labak/vinas-melo-labak-10280317/', 'only_matching': True, }, { 'url': 'https://tvplay.tv3.ee/cool-d-ga-mehhikosse/cool-d-ga-mehhikosse-10044354/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_id = self._search_regex( r'data-asset-id\s*=\s*["\'](\d{5,})\b', webpage, 'video id') if len(video_id) < 8: return self.url_result( 'mtg:%s' % video_id, ie=TVPlayIE.ie_key(), video_id=video_id) m3u8_url = self._search_regex( r'data-file\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'm3u8 url', group='url') formats = self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') self._sort_formats(formats) title = self._search_regex( r'data-title\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'title', default=None, group='value') or self._html_search_meta( 'title', webpage, default=None) or self._og_search_title( webpage) description = self._html_search_meta( 'description', webpage, default=None) or self._og_search_description(webpage) thumbnail = self._search_regex( r'data-image\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'thumbnail', default=None, group='url') or self._html_search_meta( 'thumbnail', webpage, default=None) or self._og_search_thumbnail( webpage) duration = int_or_none(self._search_regex( r'data-duration\s*=\s*["\'](\d+)', webpage, 'duration', fatal=False)) season = self._search_regex( (r'data-series-title\s*=\s*(["\'])[^/]+/(?P<value>(?:(?!\1).)+)\1', r'\bseason\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1'), webpage, 'season', default=None, group='value') season_number = int_or_none(self._search_regex( r'(\d+)(?:[.\s]+sezona|\s+HOOAEG)', season or '', 'season number', default=None)) episode = self._search_regex( (r'\bepisode\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', r'data-subtitle\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1'), webpage, 'episode', default=None, group='value') episode_number = int_or_none(self._search_regex( r'(?:S[eē]rija|Osa)\s+(\d+)', episode or '', 'episode number', default=None)) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'season': season, 'season_number': season_number, 'episode': episode, 'episode_number': episode_number, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tvplayer.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_str, ) from ..utils import ( extract_attributes, try_get, urlencode_postdata, ExtractorError, ) class TVPlayerIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tvplayer\.com/watch/(?P<id>[^/?#]+)' _TEST = { 'url': 'http://tvplayer.com/watch/bbcone', 'info_dict': { 'id': '89', 'ext': 'mp4', 'title': r're:^BBC One [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', }, 'params': { # m3u8 download 'skip_download': True, } } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) current_channel = extract_attributes(self._search_regex( r'(<div[^>]+class="[^"]*current-channel[^"]*"[^>]*>)', webpage, 'channel element')) title = current_channel['data-name'] resource_id = current_channel['data-id'] token = self._search_regex( r'data-token=(["\'])(?P<token>(?!\1).+)\1', webpage, 'token', group='token') context = self._download_json( 'https://tvplayer.com/watch/context', display_id, 'Downloading JSON context', query={ 'resource': resource_id, 'gen': token, }) validate = context['validate'] platform = try_get( context, lambda x: x['platform']['key'], compat_str) or 'firefox' try: response = self._download_json( 'http://api.tvplayer.com/api/v2/stream/live', display_id, 'Downloading JSON stream', headers={ 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', }, data=urlencode_postdata({ 'id': resource_id, 'service': 1, 'platform': platform, 'validate': validate, }))['tvplayer']['response'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError): response = self._parse_json( e.cause.read().decode(), resource_id)['tvplayer']['response'] raise ExtractorError( '%s said: %s' % (self.IE_NAME, response['error']), expected=True) raise formats = self._extract_m3u8_formats(response['stream'], display_id, 'mp4') self._sort_formats(formats) return { 'id': resource_id, 'display_id': display_id, 'title': self._live_title(title), 'formats': formats, 'is_live': True, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tweakers.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, determine_ext, mimetype2ext, ) class TweakersIE(InfoExtractor): _VALID_URL = r'https?://tweakers\.net/video/(?P<id>\d+)' _TEST = { 'url': 'https://tweakers.net/video/9926/new-nintendo-3ds-xl-op-alle-fronten-beter.html', 'md5': 'fe73e417c093a788e0160c4025f88b15', 'info_dict': { 'id': '9926', 'ext': 'mp4', 'title': 'New Nintendo 3DS XL - Op alle fronten beter', 'description': 'md5:3789b21fed9c0219e9bcaacd43fab280', 'thumbnail': r're:^https?://.*\.jpe?g$', 'duration': 386, 'uploader_id': 's7JeEm', } } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( 'https://tweakers.net/video/s1playlist/%s/1920/1080/playlist.json' % video_id, video_id)['items'][0] title = video_data['title'] formats = [] for location in video_data.get('locations', {}).get('progressive', []): format_id = location.get('label') width = int_or_none(location.get('width')) height = int_or_none(location.get('height')) for source in location.get('sources', []): source_url = source.get('src') if not source_url: continue ext = mimetype2ext(source.get('type')) or determine_ext(source_url) formats.append({ 'format_id': format_id, 'url': source_url, 'width': width, 'height': height, 'ext': ext, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'thumbnail': video_data.get('poster'), 'duration': int_or_none(video_data.get('duration')), 'uploader_id': video_data.get('account'), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/twentyfourvideo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( parse_iso8601, int_or_none, xpath_attr, xpath_element, ) class TwentyFourVideoIE(InfoExtractor): IE_NAME = '24video' _VALID_URL = r'''(?x) https?:// (?P<host> (?:(?:www|porno)\.)?24video\. (?:net|me|xxx|sexy?|tube|adult|site) )/ (?: video/(?:(?:view|xml)/)?| player/new24_play\.swf\?id= ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://www.24video.net/video/view/1044982', 'md5': 'e09fc0901d9eaeedac872f154931deeb', 'info_dict': { 'id': '1044982', 'ext': 'mp4', 'title': 'Эротика каменного века', 'description': 'Как смотрели порно в каменном веке.', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'SUPERTELO', 'duration': 31, 'timestamp': 1275937857, 'upload_date': '20100607', 'age_limit': 18, 'like_count': int, 'dislike_count': int, }, }, { 'url': 'http://www.24video.net/player/new24_play.swf?id=1044982', 'only_matching': True, }, { 'url': 'http://www.24video.me/video/view/1044982', 'only_matching': True, }, { 'url': 'http://www.24video.tube/video/view/2363750', 'only_matching': True, }, { 'url': 'https://www.24video.site/video/view/2640421', 'only_matching': True, }, { 'url': 'https://porno.24video.net/video/2640421-vsya-takaya-gibkaya-i-v-masle', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') host = mobj.group('host') webpage = self._download_webpage( 'http://%s/video/view/%s' % (host, video_id), video_id) title = self._og_search_title(webpage) description = self._html_search_regex( r'<(p|span)[^>]+itemprop="description"[^>]*>(?P<description>[^<]+)</\1>', webpage, 'description', fatal=False, group='description') thumbnail = self._og_search_thumbnail(webpage) duration = int_or_none(self._og_search_property( 'duration', webpage, 'duration', fatal=False)) timestamp = parse_iso8601(self._search_regex( r'<time[^>]+\bdatetime="([^"]+)"[^>]+itemprop="uploadDate"', webpage, 'upload date', fatal=False)) uploader = self._html_search_regex( r'class="video-uploaded"[^>]*>\s*<a href="/jsecUser/movies/[^"]+"[^>]*>([^<]+)</a>', webpage, 'uploader', fatal=False) view_count = int_or_none(self._html_search_regex( r'<span class="video-views">(\d+) просмотр', webpage, 'view count', fatal=False)) comment_count = int_or_none(self._html_search_regex( r'<a[^>]+href="#tab-comments"[^>]*>(\d+) комментари', webpage, 'comment count', default=None)) # Sets some cookies self._download_xml( r'http://%s/video/xml/%s?mode=init' % (host, video_id), video_id, 'Downloading init XML') video_xml = self._download_xml( 'http://%s/video/xml/%s?mode=play' % (host, video_id), video_id, 'Downloading video XML') video = xpath_element(video_xml, './/video', 'video', fatal=True) formats = [{ 'url': xpath_attr(video, '', 'url', 'video URL', fatal=True), }] like_count = int_or_none(video.get('ratingPlus')) dislike_count = int_or_none(video.get('ratingMinus')) age_limit = 18 if video.get('adult') == 'true' else 0 return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'duration': duration, 'timestamp': timestamp, 'view_count': view_count, 'comment_count': comment_count, 'like_count': like_count, 'dislike_count': dislike_count, 'age_limit': age_limit, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/twentymin.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, try_get, ) class TwentyMinutenIE(InfoExtractor): IE_NAME = '20min' _VALID_URL = r'''(?x) https?:// (?:www\.)?20min\.ch/ (?: videotv/*\?.*?\bvid=| videoplayer/videoplayer\.html\?.*?\bvideoId@ ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://www.20min.ch/videotv/?vid=469148&cid=2', 'md5': 'e7264320db31eed8c38364150c12496e', 'info_dict': { 'id': '469148', 'ext': 'mp4', 'title': '85 000 Franken für 15 perfekte Minuten', 'thumbnail': r're:https?://.*\.jpg$', }, }, { 'url': 'http://www.20min.ch/videoplayer/videoplayer.html?params=client@twentyDE|videoId@523629', 'info_dict': { 'id': '523629', 'ext': 'mp4', 'title': 'So kommen Sie bei Eis und Schnee sicher an', 'description': 'md5:117c212f64b25e3d95747e5276863f7d', 'thumbnail': r're:https?://.*\.jpg$', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.20min.ch/videotv/?cid=44&vid=468738', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return [m.group('url') for m in re.finditer( r'<iframe[^>]+src=(["\'])(?P<url>(?:(?:https?:)?//)?(?:www\.)?20min\.ch/videoplayer/videoplayer.html\?.*?\bvideoId@\d+.*?)\1', webpage)] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'http://api.20min.ch/video/%s/show' % video_id, video_id)['content'] title = video['title'] formats = [{ 'format_id': format_id, 'url': 'http://podcast.20min-tv.ch/podcast/20min/%s%s.mp4' % (video_id, p), 'quality': quality, } for quality, (format_id, p) in enumerate([('sd', ''), ('hd', 'h')])] self._sort_formats(formats) description = video.get('lead') thumbnail = video.get('thumbnail') def extract_count(kind): return try_get( video, lambda x: int_or_none(x['communityobject']['thumbs_%s' % kind])) like_count = extract_count('up') dislike_count = extract_count('down') return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'like_count': like_count, 'dislike_count': dislike_count, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/twentythreevideo.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import int_or_none class TwentyThreeVideoIE(InfoExtractor): IE_NAME = '23video' _VALID_URL = r'https?://video\.(?P<domain>twentythree\.net|23video\.com|filmweb\.no)/v\.ihtml/player\.html\?(?P<query>.*?\bphoto(?:_|%5f)id=(?P<id>\d+).*)' _TEST = { 'url': 'https://video.twentythree.net/v.ihtml/player.html?showDescriptions=0&source=site&photo%5fid=20448876&autoPlay=1', 'md5': '75fcf216303eb1dae9920d651f85ced4', 'info_dict': { 'id': '20448876', 'ext': 'mp4', 'title': 'Video Marketing Minute: Personalized Video', 'timestamp': 1513855354, 'upload_date': '20171221', 'uploader_id': '12258964', 'uploader': 'Rasmus Bysted', } } def _real_extract(self, url): domain, query, photo_id = re.match(self._VALID_URL, url).groups() base_url = 'https://video.%s' % domain photo_data = self._download_json( base_url + '/api/photo/list?' + query, photo_id, query={ 'format': 'json', }, transform_source=lambda s: self._search_regex(r'(?s)({.+})', s, 'photo data'))['photo'] title = photo_data['title'] formats = [] audio_path = photo_data.get('audio_download') if audio_path: formats.append({ 'format_id': 'audio', 'url': base_url + audio_path, 'filesize': int_or_none(photo_data.get('audio_size')), 'vcodec': 'none', }) def add_common_info_to_list(l, template, id_field, id_value): f_base = template % id_value f_path = photo_data.get(f_base + 'download') if not f_path: return l.append({ id_field: id_value, 'url': base_url + f_path, 'width': int_or_none(photo_data.get(f_base + 'width')), 'height': int_or_none(photo_data.get(f_base + 'height')), 'filesize': int_or_none(photo_data.get(f_base + 'size')), }) for f in ('mobile_high', 'medium', 'hd', '1080p', '4k'): add_common_info_to_list(formats, 'video_%s_', 'format_id', f) thumbnails = [] for t in ('quad16', 'quad50', 'quad75', 'quad100', 'small', 'portrait', 'standard', 'medium', 'large', 'original'): add_common_info_to_list(thumbnails, '%s_', 'id', t) return { 'id': photo_id, 'title': title, 'timestamp': int_or_none(photo_data.get('creation_date_epoch')), 'duration': int_or_none(photo_data.get('video_length')), 'view_count': int_or_none(photo_data.get('view_count')), 'comment_count': int_or_none(photo_data.get('number_of_comments')), 'uploader_id': photo_data.get('user_id'), 'uploader': photo_data.get('display_name'), 'thumbnails': thumbnails, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/twitcasting.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import urlencode_postdata import re class TwitCastingIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?twitcasting\.tv/(?P<uploader_id>[^/]+)/movie/(?P<id>\d+)' _TESTS = [{ 'url': 'https://twitcasting.tv/ivetesangalo/movie/2357609', 'md5': '745243cad58c4681dc752490f7540d7f', 'info_dict': { 'id': '2357609', 'ext': 'mp4', 'title': 'Live #2357609', 'uploader_id': 'ivetesangalo', 'description': "Moi! I'm live on TwitCasting from my iPhone.", 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://twitcasting.tv/mttbernardini/movie/3689740', 'info_dict': { 'id': '3689740', 'ext': 'mp4', 'title': 'Live playing something #3689740', 'uploader_id': 'mttbernardini', 'description': "I'm live on TwitCasting from my iPad. password: abc (Santa Marinella/Lazio, Italia)", 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { 'skip_download': True, 'videopassword': 'abc', }, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') uploader_id = mobj.group('uploader_id') video_password = self._downloader.params.get('videopassword') request_data = None if video_password: request_data = urlencode_postdata({ 'password': video_password, }) webpage = self._download_webpage(url, video_id, data=request_data) title = self._html_search_regex( r'(?s)<[^>]+id=["\']movietitle[^>]+>(.+?)</', webpage, 'title', default=None) or self._html_search_meta( 'twitter:title', webpage, fatal=True) m3u8_url = self._search_regex( (r'data-movie-url=(["\'])(?P<url>(?:(?!\1).)+)\1', r'(["\'])(?P<url>http.+?\.m3u8.*?)\1'), webpage, 'm3u8 url', group='url') formats = self._extract_m3u8_formats( m3u8_url, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls') thumbnail = self._og_search_thumbnail(webpage) description = self._og_search_description( webpage, default=None) or self._html_search_meta( 'twitter:description', webpage) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader_id': uploader_id, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/twitch.py
# coding: utf-8 from __future__ import unicode_literals import itertools import re import random import json from .common import InfoExtractor from ..compat import ( compat_kwargs, compat_parse_qs, compat_str, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, ) from ..utils import ( clean_html, ExtractorError, float_or_none, int_or_none, orderedSet, parse_duration, parse_iso8601, qualities, try_get, unified_timestamp, update_url_query, url_or_none, urljoin, ) class TwitchBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:(?:www|go|m)\.)?twitch\.tv' _API_BASE = 'https://api.twitch.tv' _USHER_BASE = 'https://usher.ttvnw.net' _LOGIN_FORM_URL = 'https://www.twitch.tv/login' _LOGIN_POST_URL = 'https://passport.twitch.tv/login' _CLIENT_ID = 'kimne78kx3ncx6brgo4mv6wki5h1ko' _NETRC_MACHINE = 'twitch' def _handle_error(self, response): if not isinstance(response, dict): return error = response.get('error') if error: raise ExtractorError( '%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')), expected=True) def _call_api(self, path, item_id, *args, **kwargs): headers = kwargs.get('headers', {}).copy() headers['Client-ID'] = self._CLIENT_ID kwargs['headers'] = headers response = self._download_json( '%s/%s' % (self._API_BASE, path), item_id, *args, **compat_kwargs(kwargs)) self._handle_error(response) return response def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return def fail(message): raise ExtractorError( 'Unable to login. Twitch said: %s' % message, expected=True) def login_step(page, urlh, note, data): form = self._hidden_inputs(page) form.update(data) page_url = urlh.geturl() post_url = self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', page, 'post url', default=self._LOGIN_POST_URL, group='url') post_url = urljoin(page_url, post_url) headers = { 'Referer': page_url, 'Origin': page_url, 'Content-Type': 'text/plain;charset=UTF-8', } response = self._download_json( post_url, None, note, data=json.dumps(form).encode(), headers=headers, expected_status=400) error = response.get('error_description') or response.get('error_code') if error: fail(error) if 'Authenticated successfully' in response.get('message', ''): return None, None redirect_url = urljoin( post_url, response.get('redirect') or response['redirect_path']) return self._download_webpage_handle( redirect_url, None, 'Downloading login redirect page', headers=headers) login_page, handle = self._download_webpage_handle( self._LOGIN_FORM_URL, None, 'Downloading login page') # Some TOR nodes and public proxies are blocked completely if 'blacklist_message' in login_page: fail(clean_html(login_page)) redirect_page, handle = login_step( login_page, handle, 'Logging in', { 'username': username, 'password': password, 'client_id': self._CLIENT_ID, }) # Successful login if not redirect_page: return if re.search(r'(?i)<form[^>]+id="two-factor-submit"', redirect_page) is not None: # TODO: Add mechanism to request an SMS or phone call tfa_token = self._get_tfa_info('two-factor authentication token') login_step(redirect_page, handle, 'Submitting TFA token', { 'authy_token': tfa_token, 'remember_2fa': 'true', }) def _prefer_source(self, formats): try: source = next(f for f in formats if f['format_id'] == 'Source') source['quality'] = 10 except StopIteration: for f in formats: if '/chunked/' in f['url']: f.update({ 'quality': 10, 'format_note': 'Source', }) self._sort_formats(formats) class TwitchItemBaseIE(TwitchBaseIE): def _download_info(self, item, item_id): return self._extract_info(self._call_api( 'kraken/videos/%s%s' % (item, item_id), item_id, 'Downloading %s info JSON' % self._ITEM_TYPE)) def _extract_media(self, item_id): info = self._download_info(self._ITEM_SHORTCUT, item_id) response = self._call_api( 'api/videos/%s%s' % (self._ITEM_SHORTCUT, item_id), item_id, 'Downloading %s playlist JSON' % self._ITEM_TYPE) entries = [] chunks = response['chunks'] qualities = list(chunks.keys()) for num, fragment in enumerate(zip(*chunks.values()), start=1): formats = [] for fmt_num, fragment_fmt in enumerate(fragment): format_id = qualities[fmt_num] fmt = { 'url': fragment_fmt['url'], 'format_id': format_id, 'quality': 1 if format_id == 'live' else 0, } m = re.search(r'^(?P<height>\d+)[Pp]', format_id) if m: fmt['height'] = int(m.group('height')) formats.append(fmt) self._sort_formats(formats) entry = dict(info) entry['id'] = '%s_%d' % (entry['id'], num) entry['title'] = '%s part %d' % (entry['title'], num) entry['formats'] = formats entries.append(entry) return self.playlist_result(entries, info['id'], info['title']) def _extract_info(self, info): status = info.get('status') if status == 'recording': is_live = True elif status == 'recorded': is_live = False else: is_live = None return { 'id': info['_id'], 'title': info.get('title') or 'Untitled Broadcast', 'description': info.get('description'), 'duration': int_or_none(info.get('length')), 'thumbnail': info.get('preview'), 'uploader': info.get('channel', {}).get('display_name'), 'uploader_id': info.get('channel', {}).get('name'), 'timestamp': parse_iso8601(info.get('recorded_at')), 'view_count': int_or_none(info.get('views')), 'is_live': is_live, } def _real_extract(self, url): return self._extract_media(self._match_id(url)) class TwitchVideoIE(TwitchItemBaseIE): IE_NAME = 'twitch:video' _VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE _ITEM_TYPE = 'video' _ITEM_SHORTCUT = 'a' _TEST = { 'url': 'http://www.twitch.tv/riotgames/b/577357806', 'info_dict': { 'id': 'a577357806', 'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG', }, 'playlist_mincount': 12, 'skip': 'HTTP Error 404: Not Found', } class TwitchChapterIE(TwitchItemBaseIE): IE_NAME = 'twitch:chapter' _VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE _ITEM_TYPE = 'chapter' _ITEM_SHORTCUT = 'c' _TESTS = [{ 'url': 'http://www.twitch.tv/acracingleague/c/5285812', 'info_dict': { 'id': 'c5285812', 'title': 'ACRL Off Season - Sports Cars @ Nordschleife', }, 'playlist_mincount': 3, 'skip': 'HTTP Error 404: Not Found', }, { 'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361', 'only_matching': True, }] class TwitchVodIE(TwitchItemBaseIE): IE_NAME = 'twitch:vod' _VALID_URL = r'''(?x) https?:// (?: (?:(?:www|go|m)\.)?twitch\.tv/(?:[^/]+/v(?:ideo)?|videos)/| player\.twitch\.tv/\?.*?\bvideo=v? ) (?P<id>\d+) ''' _ITEM_TYPE = 'vod' _ITEM_SHORTCUT = 'v' _TESTS = [{ 'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s', 'info_dict': { 'id': 'v6528877', 'ext': 'mp4', 'title': 'LCK Summer Split - Week 6 Day 1', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 17208, 'timestamp': 1435131709, 'upload_date': '20150624', 'uploader': 'Riot Games', 'uploader_id': 'riotgames', 'view_count': int, 'start_time': 310, }, 'params': { # m3u8 download 'skip_download': True, }, }, { # Untitled broadcast (title is None) 'url': 'http://www.twitch.tv/belkao_o/v/11230755', 'info_dict': { 'id': 'v11230755', 'ext': 'mp4', 'title': 'Untitled Broadcast', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1638, 'timestamp': 1439746708, 'upload_date': '20150816', 'uploader': 'BelkAO_o', 'uploader_id': 'belkao_o', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': 'HTTP Error 404: Not Found', }, { 'url': 'http://player.twitch.tv/?t=5m10s&video=v6528877', 'only_matching': True, }, { 'url': 'https://www.twitch.tv/videos/6528877', 'only_matching': True, }, { 'url': 'https://m.twitch.tv/beagsandjam/v/247478721', 'only_matching': True, }, { 'url': 'https://www.twitch.tv/northernlion/video/291940395', 'only_matching': True, }, { 'url': 'https://player.twitch.tv/?video=480452374', 'only_matching': True, }] def _real_extract(self, url): item_id = self._match_id(url) info = self._download_info(self._ITEM_SHORTCUT, item_id) access_token = self._call_api( 'api/vods/%s/access_token' % item_id, item_id, 'Downloading %s access token' % self._ITEM_TYPE) formats = self._extract_m3u8_formats( '%s/vod/%s.m3u8?%s' % ( self._USHER_BASE, item_id, compat_urllib_parse_urlencode({ 'allow_source': 'true', 'allow_audio_only': 'true', 'allow_spectre': 'true', 'player': 'twitchweb', 'nauth': access_token['token'], 'nauthsig': access_token['sig'], })), item_id, 'mp4', entry_protocol='m3u8_native') self._prefer_source(formats) info['formats'] = formats parsed_url = compat_urllib_parse_urlparse(url) query = compat_parse_qs(parsed_url.query) if 't' in query: info['start_time'] = parse_duration(query['t'][0]) if info.get('timestamp') is not None: info['subtitles'] = { 'rechat': [{ 'url': update_url_query( 'https://rechat.twitch.tv/rechat-messages', { 'video_id': 'v%s' % item_id, 'start': info['timestamp'], }), 'ext': 'json', }], } return info class TwitchPlaylistBaseIE(TwitchBaseIE): _PLAYLIST_PATH = 'kraken/channels/%s/videos/?offset=%d&limit=%d' _PAGE_LIMIT = 100 def _extract_playlist(self, channel_id): info = self._call_api( 'kraken/channels/%s' % channel_id, channel_id, 'Downloading channel info JSON') channel_name = info.get('display_name') or info.get('name') entries = [] offset = 0 limit = self._PAGE_LIMIT broken_paging_detected = False counter_override = None for counter in itertools.count(1): response = self._call_api( self._PLAYLIST_PATH % (channel_id, offset, limit), channel_id, 'Downloading %s JSON page %s' % (self._PLAYLIST_TYPE, counter_override or counter)) page_entries = self._extract_playlist_page(response) if not page_entries: break total = int_or_none(response.get('_total')) # Since the beginning of March 2016 twitch's paging mechanism # is completely broken on the twitch side. It simply ignores # a limit and returns the whole offset number of videos. # Working around by just requesting all videos at once. # Upd: pagination bug was fixed by twitch on 15.03.2016. if not broken_paging_detected and total and len(page_entries) > limit: self.report_warning( 'Twitch pagination is broken on twitch side, requesting all videos at once', channel_id) broken_paging_detected = True offset = total counter_override = '(all at once)' continue entries.extend(page_entries) if broken_paging_detected or total and len(page_entries) >= total: break offset += limit return self.playlist_result( [self._make_url_result(entry) for entry in orderedSet(entries)], channel_id, channel_name) def _make_url_result(self, url): try: video_id = 'v%s' % TwitchVodIE._match_id(url) return self.url_result(url, TwitchVodIE.ie_key(), video_id=video_id) except AssertionError: return self.url_result(url) def _extract_playlist_page(self, response): videos = response.get('videos') return [video['url'] for video in videos] if videos else [] def _real_extract(self, url): return self._extract_playlist(self._match_id(url)) class TwitchProfileIE(TwitchPlaylistBaseIE): IE_NAME = 'twitch:profile' _VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE _PLAYLIST_TYPE = 'profile' _TESTS = [{ 'url': 'http://www.twitch.tv/vanillatv/profile', 'info_dict': { 'id': 'vanillatv', 'title': 'VanillaTV', }, 'playlist_mincount': 412, }, { 'url': 'http://m.twitch.tv/vanillatv/profile', 'only_matching': True, }] class TwitchVideosBaseIE(TwitchPlaylistBaseIE): _VALID_URL_VIDEOS_BASE = r'%s/(?P<id>[^/]+)/videos' % TwitchBaseIE._VALID_URL_BASE _PLAYLIST_PATH = TwitchPlaylistBaseIE._PLAYLIST_PATH + '&broadcast_type=' class TwitchAllVideosIE(TwitchVideosBaseIE): IE_NAME = 'twitch:videos:all' _VALID_URL = r'%s/all' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive,upload,highlight' _PLAYLIST_TYPE = 'all videos' _TESTS = [{ 'url': 'https://www.twitch.tv/spamfish/videos/all', 'info_dict': { 'id': 'spamfish', 'title': 'Spamfish', }, 'playlist_mincount': 869, }, { 'url': 'https://m.twitch.tv/spamfish/videos/all', 'only_matching': True, }] class TwitchUploadsIE(TwitchVideosBaseIE): IE_NAME = 'twitch:videos:uploads' _VALID_URL = r'%s/uploads' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'upload' _PLAYLIST_TYPE = 'uploads' _TESTS = [{ 'url': 'https://www.twitch.tv/spamfish/videos/uploads', 'info_dict': { 'id': 'spamfish', 'title': 'Spamfish', }, 'playlist_mincount': 0, }, { 'url': 'https://m.twitch.tv/spamfish/videos/uploads', 'only_matching': True, }] class TwitchPastBroadcastsIE(TwitchVideosBaseIE): IE_NAME = 'twitch:videos:past-broadcasts' _VALID_URL = r'%s/past-broadcasts' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive' _PLAYLIST_TYPE = 'past broadcasts' _TESTS = [{ 'url': 'https://www.twitch.tv/spamfish/videos/past-broadcasts', 'info_dict': { 'id': 'spamfish', 'title': 'Spamfish', }, 'playlist_mincount': 0, }, { 'url': 'https://m.twitch.tv/spamfish/videos/past-broadcasts', 'only_matching': True, }] class TwitchHighlightsIE(TwitchVideosBaseIE): IE_NAME = 'twitch:videos:highlights' _VALID_URL = r'%s/highlights' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'highlight' _PLAYLIST_TYPE = 'highlights' _TESTS = [{ 'url': 'https://www.twitch.tv/spamfish/videos/highlights', 'info_dict': { 'id': 'spamfish', 'title': 'Spamfish', }, 'playlist_mincount': 805, }, { 'url': 'https://m.twitch.tv/spamfish/videos/highlights', 'only_matching': True, }] class TwitchStreamIE(TwitchBaseIE): IE_NAME = 'twitch:stream' _VALID_URL = r'''(?x) https?:// (?: (?:(?:www|go|m)\.)?twitch\.tv/| player\.twitch\.tv/\?.*?\bchannel= ) (?P<id>[^/#?]+) ''' _TESTS = [{ 'url': 'http://www.twitch.tv/shroomztv', 'info_dict': { 'id': '12772022048', 'display_id': 'shroomztv', 'ext': 'mp4', 'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV', 'is_live': True, 'timestamp': 1421928037, 'upload_date': '20150122', 'uploader': 'ShroomzTV', 'uploader_id': 'shroomztv', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.twitch.tv/miracle_doto#profile-0', 'only_matching': True, }, { 'url': 'https://player.twitch.tv/?channel=lotsofs', 'only_matching': True, }, { 'url': 'https://go.twitch.tv/food', 'only_matching': True, }, { 'url': 'https://m.twitch.tv/food', 'only_matching': True, }] @classmethod def suitable(cls, url): return (False if any(ie.suitable(url) for ie in ( TwitchVideoIE, TwitchChapterIE, TwitchVodIE, TwitchProfileIE, TwitchAllVideosIE, TwitchUploadsIE, TwitchPastBroadcastsIE, TwitchHighlightsIE, TwitchClipsIE)) else super(TwitchStreamIE, cls).suitable(url)) def _real_extract(self, url): channel_id = self._match_id(url) stream = self._call_api( 'kraken/streams/%s?stream_type=all' % channel_id, channel_id, 'Downloading stream JSON').get('stream') if not stream: raise ExtractorError('%s is offline' % channel_id, expected=True) # Channel name may be typed if different case than the original channel name # (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing # an invalid m3u8 URL. Working around by use of original channel name from stream # JSON and fallback to lowercase if it's not available. channel_id = stream.get('channel', {}).get('name') or channel_id.lower() access_token = self._call_api( 'api/channels/%s/access_token' % channel_id, channel_id, 'Downloading channel access token') query = { 'allow_source': 'true', 'allow_audio_only': 'true', 'allow_spectre': 'true', 'p': random.randint(1000000, 10000000), 'player': 'twitchweb', 'segment_preference': '4', 'sig': access_token['sig'].encode('utf-8'), 'token': access_token['token'].encode('utf-8'), } formats = self._extract_m3u8_formats( '%s/api/channel/hls/%s.m3u8?%s' % (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)), channel_id, 'mp4') self._prefer_source(formats) view_count = stream.get('viewers') timestamp = parse_iso8601(stream.get('created_at')) channel = stream['channel'] title = self._live_title(channel.get('display_name') or channel.get('name')) description = channel.get('status') thumbnails = [] for thumbnail_key, thumbnail_url in stream['preview'].items(): m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key) if not m: continue thumbnails.append({ 'url': thumbnail_url, 'width': int(m.group('width')), 'height': int(m.group('height')), }) return { 'id': compat_str(stream['_id']), 'display_id': channel_id, 'title': title, 'description': description, 'thumbnails': thumbnails, 'uploader': channel.get('display_name'), 'uploader_id': channel.get('name'), 'timestamp': timestamp, 'view_count': view_count, 'formats': formats, 'is_live': True, } class TwitchClipsIE(TwitchBaseIE): IE_NAME = 'twitch:clips' _VALID_URL = r'https?://(?:clips\.twitch\.tv/(?:[^/]+/)*|(?:www\.)?twitch\.tv/[^/]+/clip/)(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://clips.twitch.tv/FaintLightGullWholeWheat', 'md5': '761769e1eafce0ffebfb4089cb3847cd', 'info_dict': { 'id': '42850523', 'ext': 'mp4', 'title': 'EA Play 2016 Live from the Novo Theatre', 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1465767393, 'upload_date': '20160612', 'creator': 'EA', 'uploader': 'stereotype_', 'uploader_id': '43566419', }, }, { # multiple formats 'url': 'https://clips.twitch.tv/rflegendary/UninterestedBeeDAESuppy', 'only_matching': True, }, { 'url': 'https://www.twitch.tv/sergeynixon/clip/StormyThankfulSproutFutureMan', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) status = self._download_json( 'https://clips.twitch.tv/api/v2/clips/%s/status' % video_id, video_id) formats = [] for option in status['quality_options']: if not isinstance(option, dict): continue source = url_or_none(option.get('source')) if not source: continue formats.append({ 'url': source, 'format_id': option.get('quality'), 'height': int_or_none(option.get('quality')), 'fps': int_or_none(option.get('frame_rate')), }) self._sort_formats(formats) info = { 'formats': formats, } clip = self._call_api( 'kraken/clips/%s' % video_id, video_id, fatal=False, headers={ 'Accept': 'application/vnd.twitchtv.v5+json', }) if clip: quality_key = qualities(('tiny', 'small', 'medium')) thumbnails = [] thumbnails_dict = clip.get('thumbnails') if isinstance(thumbnails_dict, dict): for thumbnail_id, thumbnail_url in thumbnails_dict.items(): thumbnails.append({ 'id': thumbnail_id, 'url': thumbnail_url, 'preference': quality_key(thumbnail_id), }) info.update({ 'id': clip.get('tracking_id') or video_id, 'title': clip.get('title') or video_id, 'duration': float_or_none(clip.get('duration')), 'views': int_or_none(clip.get('views')), 'timestamp': unified_timestamp(clip.get('created_at')), 'thumbnails': thumbnails, 'creator': try_get(clip, lambda x: x['broadcaster']['display_name'], compat_str), 'uploader': try_get(clip, lambda x: x['curator']['display_name'], compat_str), 'uploader_id': try_get(clip, lambda x: x['curator']['id'], compat_str), }) else: info.update({ 'title': video_id, 'id': video_id, }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/twitter.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( determine_ext, dict_get, ExtractorError, float_or_none, int_or_none, remove_end, try_get, xpath_text, ) from .periscope import PeriscopeIE class TwitterBaseIE(InfoExtractor): def _extract_formats_from_vmap_url(self, vmap_url, video_id): vmap_data = self._download_xml(vmap_url, video_id) video_url = xpath_text(vmap_data, './/MediaFile').strip() if determine_ext(video_url) == 'm3u8': return self._extract_m3u8_formats( video_url, video_id, ext='mp4', m3u8_id='hls', entry_protocol='m3u8_native') return [{ 'url': video_url, }] @staticmethod def _search_dimensions_in_video_url(a_format, video_url): m = re.search(r'/(?P<width>\d+)x(?P<height>\d+)/', video_url) if m: a_format.update({ 'width': int(m.group('width')), 'height': int(m.group('height')), }) class TwitterCardIE(TwitterBaseIE): IE_NAME = 'twitter:card' _VALID_URL = r'https?://(?:www\.)?twitter\.com/i/(?P<path>cards/tfw/v1|videos(?:/tweet)?)/(?P<id>\d+)' _TESTS = [ { 'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889', # MD5 checksums are different in different places 'info_dict': { 'id': '560070183650213889', 'ext': 'mp4', 'title': 'Twitter web player', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 30.033, }, }, { 'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768', 'md5': '7ee2a553b63d1bccba97fbed97d9e1c8', 'info_dict': { 'id': '623160978427936768', 'ext': 'mp4', 'title': 'Twitter web player', 'thumbnail': r're:^https?://.*$', }, }, { 'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977', 'md5': 'b6d9683dd3f48e340ded81c0e917ad46', 'info_dict': { 'id': 'dq4Oj5quskI', 'ext': 'mp4', 'title': 'Ubuntu 11.10 Overview', 'description': 'md5:a831e97fa384863d6e26ce48d1c43376', 'upload_date': '20111013', 'uploader': 'OMG! Ubuntu!', 'uploader_id': 'omgubuntu', }, 'add_ie': ['Youtube'], }, { 'url': 'https://twitter.com/i/cards/tfw/v1/665289828897005568', 'md5': '6dabeaca9e68cbb71c99c322a4b42a11', 'info_dict': { 'id': 'iBb2x00UVlv', 'ext': 'mp4', 'upload_date': '20151113', 'uploader_id': '1189339351084113920', 'uploader': 'ArsenalTerje', 'title': 'Vine by ArsenalTerje', 'timestamp': 1447451307, }, 'add_ie': ['Vine'], }, { 'url': 'https://twitter.com/i/videos/tweet/705235433198714880', 'md5': '884812a2adc8aaf6fe52b15ccbfa3b88', 'info_dict': { 'id': '705235433198714880', 'ext': 'mp4', 'title': 'Twitter web player', 'thumbnail': r're:^https?://.*', }, }, { 'url': 'https://twitter.com/i/videos/752274308186120192', 'only_matching': True, }, ] _API_BASE = 'https://api.twitter.com/1.1' def _parse_media_info(self, media_info, video_id): formats = [] for media_variant in media_info.get('variants', []): media_url = media_variant['url'] if media_url.endswith('.m3u8'): formats.extend(self._extract_m3u8_formats(media_url, video_id, ext='mp4', m3u8_id='hls')) elif media_url.endswith('.mpd'): formats.extend(self._extract_mpd_formats(media_url, video_id, mpd_id='dash')) else: tbr = int_or_none(dict_get(media_variant, ('bitRate', 'bitrate')), scale=1000) a_format = { 'url': media_url, 'format_id': 'http-%d' % tbr if tbr else 'http', 'tbr': tbr, } # Reported bitRate may be zero if not a_format['tbr']: del a_format['tbr'] self._search_dimensions_in_video_url(a_format, media_url) formats.append(a_format) return formats def _extract_mobile_formats(self, username, video_id): webpage = self._download_webpage( 'https://mobile.twitter.com/%s/status/%s' % (username, video_id), video_id, 'Downloading mobile webpage', headers={ # A recent mobile UA is necessary for `gt` cookie 'User-Agent': 'Mozilla/5.0 (Android 6.0.1; Mobile; rv:54.0) Gecko/54.0 Firefox/54.0', }) main_script_url = self._html_search_regex( r'<script[^>]+src="([^"]+main\.[^"]+)"', webpage, 'main script URL') main_script = self._download_webpage( main_script_url, video_id, 'Downloading main script') bearer_token = self._search_regex( r'BEARER_TOKEN\s*:\s*"([^"]+)"', main_script, 'bearer token') # https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-statuses-show-id api_data = self._download_json( '%s/statuses/show/%s.json' % (self._API_BASE, video_id), video_id, 'Downloading API data', headers={ 'Authorization': 'Bearer ' + bearer_token, }) media_info = try_get(api_data, lambda o: o['extended_entities']['media'][0]['video_info']) or {} return self._parse_media_info(media_info, video_id) def _real_extract(self, url): path, video_id = re.search(self._VALID_URL, url).groups() config = None formats = [] duration = None urls = [url] if path.startswith('cards/'): urls.append('https://twitter.com/i/videos/' + video_id) for u in urls: webpage = self._download_webpage( u, video_id, headers={'Referer': 'https://twitter.com/'}) iframe_url = self._html_search_regex( r'<iframe[^>]+src="((?:https?:)?//(?:www\.youtube\.com/embed/[^"]+|(?:www\.)?vine\.co/v/\w+/card))"', webpage, 'video iframe', default=None) if iframe_url: return self.url_result(iframe_url) config = self._parse_json(self._html_search_regex( r'data-(?:player-)?config="([^"]+)"', webpage, 'data player config', default='{}'), video_id) if config.get('source_type') == 'vine': return self.url_result(config['player_url'], 'Vine') periscope_url = PeriscopeIE._extract_url(webpage) if periscope_url: return self.url_result(periscope_url, PeriscopeIE.ie_key()) video_url = config.get('video_url') or config.get('playlist', [{}])[0].get('source') if video_url: if determine_ext(video_url) == 'm3u8': formats.extend(self._extract_m3u8_formats(video_url, video_id, ext='mp4', m3u8_id='hls')) else: f = { 'url': video_url, } self._search_dimensions_in_video_url(f, video_url) formats.append(f) vmap_url = config.get('vmapUrl') or config.get('vmap_url') if vmap_url: formats.extend( self._extract_formats_from_vmap_url(vmap_url, video_id)) media_info = None for entity in config.get('status', {}).get('entities', []): if 'mediaInfo' in entity: media_info = entity['mediaInfo'] if media_info: formats.extend(self._parse_media_info(media_info, video_id)) duration = float_or_none(media_info.get('duration', {}).get('nanos'), scale=1e9) username = config.get('user', {}).get('screen_name') if username: formats.extend(self._extract_mobile_formats(username, video_id)) if formats: title = self._search_regex(r'<title>([^<]+)</title>', webpage, 'title') thumbnail = config.get('posterImageUrl') or config.get('image_src') duration = float_or_none(config.get('duration'), scale=1000) or duration break if not formats: headers = { 'Authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKbT3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw', 'Referer': url, } ct0 = self._get_cookies(url).get('ct0') if ct0: headers['csrf_token'] = ct0.value guest_token = self._download_json( '%s/guest/activate.json' % self._API_BASE, video_id, 'Downloading guest token', data=b'', headers=headers)['guest_token'] headers['x-guest-token'] = guest_token self._set_cookie('api.twitter.com', 'gt', guest_token) config = self._download_json( '%s/videos/tweet/config/%s.json' % (self._API_BASE, video_id), video_id, headers=headers) track = config['track'] vmap_url = track.get('vmapUrl') if vmap_url: formats = self._extract_formats_from_vmap_url(vmap_url, video_id) else: playback_url = track['playbackUrl'] if determine_ext(playback_url) == 'm3u8': formats = self._extract_m3u8_formats( playback_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') else: formats = [{ 'url': playback_url, }] title = 'Twitter web player' thumbnail = config.get('posterImage') duration = float_or_none(track.get('durationMs'), scale=1000) self._remove_duplicate_formats(formats) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, } class TwitterIE(InfoExtractor): IE_NAME = 'twitter' _VALID_URL = r'https?://(?:www\.|m\.|mobile\.)?twitter\.com/(?:i/web|(?P<user_id>[^/]+))/status/(?P<id>\d+)' _TEMPLATE_URL = 'https://twitter.com/%s/status/%s' _TEMPLATE_STATUSES_URL = 'https://twitter.com/statuses/%s' _TESTS = [{ 'url': 'https://twitter.com/freethenipple/status/643211948184596480', 'info_dict': { 'id': '643211948184596480', 'ext': 'mp4', 'title': 'FREE THE NIPPLE - FTN supporters on Hollywood Blvd today!', 'thumbnail': r're:^https?://.*\.jpg', 'description': 'FREE THE NIPPLE on Twitter: "FTN supporters on Hollywood Blvd today! http://t.co/c7jHH749xJ"', 'uploader': 'FREE THE NIPPLE', 'uploader_id': 'freethenipple', 'duration': 12.922, }, }, { 'url': 'https://twitter.com/giphz/status/657991469417025536/photo/1', 'md5': 'f36dcd5fb92bf7057f155e7d927eeb42', 'info_dict': { 'id': '657991469417025536', 'ext': 'mp4', 'title': 'Gifs - tu vai cai tu vai cai tu nao eh capaz disso tu vai cai', 'description': 'Gifs on Twitter: "tu vai cai tu vai cai tu nao eh capaz disso tu vai cai https://t.co/tM46VHFlO5"', 'thumbnail': r're:^https?://.*\.png', 'uploader': 'Gifs', 'uploader_id': 'giphz', }, 'expected_warnings': ['height', 'width'], 'skip': 'Account suspended', }, { 'url': 'https://twitter.com/starwars/status/665052190608723968', 'info_dict': { 'id': '665052190608723968', 'ext': 'mp4', 'title': 'Star Wars - A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens.', 'description': 'Star Wars on Twitter: "A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens."', 'uploader_id': 'starwars', 'uploader': 'Star Wars', }, }, { 'url': 'https://twitter.com/BTNBrentYarina/status/705235433198714880', 'info_dict': { 'id': '705235433198714880', 'ext': 'mp4', 'title': 'Brent Yarina - Khalil Iverson\'s missed highlight dunk. And made highlight dunk. In one highlight.', 'description': 'Brent Yarina on Twitter: "Khalil Iverson\'s missed highlight dunk. And made highlight dunk. In one highlight."', 'uploader_id': 'BTNBrentYarina', 'uploader': 'Brent Yarina', }, 'params': { # The same video as https://twitter.com/i/videos/tweet/705235433198714880 # Test case of TwitterCardIE 'skip_download': True, }, }, { 'url': 'https://twitter.com/jaydingeer/status/700207533655363584', 'info_dict': { 'id': '700207533655363584', 'ext': 'mp4', 'title': 'JG - BEAT PROD: @suhmeduh #Damndaniel', 'description': 'JG on Twitter: "BEAT PROD: @suhmeduh https://t.co/HBrQ4AfpvZ #Damndaniel https://t.co/byBooq2ejZ"', 'thumbnail': r're:^https?://.*\.jpg', 'uploader': 'JG', 'uploader_id': 'jaydingeer', 'duration': 30.0, }, }, { 'url': 'https://twitter.com/Filmdrunk/status/713801302971588609', 'md5': '89a15ed345d13b86e9a5a5e051fa308a', 'info_dict': { 'id': 'MIOxnrUteUd', 'ext': 'mp4', 'title': 'Vince Mancini - Vine of the day', 'description': 'Vince Mancini on Twitter: "Vine of the day https://t.co/xmTvRdqxWf"', 'uploader': 'Vince Mancini', 'uploader_id': 'Filmdrunk', 'timestamp': 1402826626, 'upload_date': '20140615', }, 'add_ie': ['Vine'], }, { 'url': 'https://twitter.com/captainamerica/status/719944021058060289', 'info_dict': { 'id': '719944021058060289', 'ext': 'mp4', 'title': 'Captain America - @King0fNerd Are you sure you made the right choice? Find out in theaters.', 'description': 'Captain America on Twitter: "@King0fNerd Are you sure you made the right choice? Find out in theaters. https://t.co/GpgYi9xMJI"', 'uploader_id': 'captainamerica', 'uploader': 'Captain America', 'duration': 3.17, }, }, { 'url': 'https://twitter.com/OPP_HSD/status/779210622571536384', 'info_dict': { 'id': '1zqKVVlkqLaKB', 'ext': 'mp4', 'title': 'Sgt Kerry Schmidt - LIVE on #Periscope: Road rage, mischief, assault, rollover and fire in one occurrence', 'description': 'Sgt Kerry Schmidt on Twitter: "LIVE on #Periscope: Road rage, mischief, assault, rollover and fire in one occurrence https://t.co/EKrVgIXF3s"', 'upload_date': '20160923', 'uploader_id': 'OPP_HSD', 'uploader': 'Sgt Kerry Schmidt', 'timestamp': 1474613214, }, 'add_ie': ['Periscope'], }, { # has mp4 formats via mobile API 'url': 'https://twitter.com/news_al3alm/status/852138619213144067', 'info_dict': { 'id': '852138619213144067', 'ext': 'mp4', 'title': 'عالم الأخبار - كلمة تاريخية بجلسة الجناسي التاريخية.. النائب خالد مؤنس العتيبي للمعارضين : اتقوا الله .. الظلم ظلمات يوم القيامة', 'description': 'عالم الأخبار on Twitter: "كلمة تاريخية بجلسة الجناسي التاريخية.. النائب خالد مؤنس العتيبي للمعارضين : اتقوا الله .. الظلم ظلمات يوم القيامة https://t.co/xg6OhpyKfN"', 'uploader': 'عالم الأخبار', 'uploader_id': 'news_al3alm', 'duration': 277.4, }, }, { 'url': 'https://twitter.com/i/web/status/910031516746514432', 'info_dict': { 'id': '910031516746514432', 'ext': 'mp4', 'title': 'Préfet de Guadeloupe - [Direct] #Maria Le centre se trouve actuellement au sud de Basse-Terre. Restez confinés. Réfugiez-vous dans la pièce la + sûre.', 'thumbnail': r're:^https?://.*\.jpg', 'description': 'Préfet de Guadeloupe on Twitter: "[Direct] #Maria Le centre se trouve actuellement au sud de Basse-Terre. Restez confinés. Réfugiez-vous dans la pièce la + sûre. https://t.co/mwx01Rs4lo"', 'uploader': 'Préfet de Guadeloupe', 'uploader_id': 'Prefet971', 'duration': 47.48, }, 'params': { 'skip_download': True, # requires ffmpeg }, }, { # card via api.twitter.com/1.1/videos/tweet/config 'url': 'https://twitter.com/LisPower1/status/1001551623938805763', 'info_dict': { 'id': '1001551623938805763', 'ext': 'mp4', 'title': 're:.*?Shep is on a roll today.*?', 'thumbnail': r're:^https?://.*\.jpg', 'description': 'md5:63b036c228772523ae1924d5f8e5ed6b', 'uploader': 'Lis Power', 'uploader_id': 'LisPower1', 'duration': 111.278, }, 'params': { 'skip_download': True, # requires ffmpeg }, }, { 'url': 'https://twitter.com/foobar/status/1087791357756956680', 'info_dict': { 'id': '1087791357756956680', 'ext': 'mp4', 'title': 'Twitter - A new is coming. Some of you got an opt-in to try it now. Check out the emoji button, quick keyboard shortcuts, upgraded trends, advanced search, and more. Let us know your thoughts!', 'thumbnail': r're:^https?://.*\.jpg', 'description': 'md5:66d493500c013e3e2d434195746a7f78', 'uploader': 'Twitter', 'uploader_id': 'Twitter', 'duration': 61.567, }, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) twid = mobj.group('id') webpage, urlh = self._download_webpage_handle( self._TEMPLATE_STATUSES_URL % twid, twid) if 'twitter.com/account/suspended' in urlh.geturl(): raise ExtractorError('Account suspended by Twitter.', expected=True) user_id = None redirect_mobj = re.match(self._VALID_URL, urlh.geturl()) if redirect_mobj: user_id = redirect_mobj.group('user_id') if not user_id: user_id = mobj.group('user_id') username = remove_end(self._og_search_title(webpage), ' on Twitter') title = description = self._og_search_description(webpage).strip('').replace('\n', ' ').strip('“”') # strip 'https -_t.co_BJYgOjSeGA' junk from filenames title = re.sub(r'\s+(https?://[^ ]+)', '', title) info = { 'uploader_id': user_id, 'uploader': username, 'webpage_url': url, 'description': '%s on Twitter: "%s"' % (username, description), 'title': username + ' - ' + title, } mobj = re.search(r'''(?x) <video[^>]+class="animated-gif"(?P<more_info>[^>]+)>\s* <source[^>]+video-src="(?P<url>[^"]+)" ''', webpage) if mobj: more_info = mobj.group('more_info') height = int_or_none(self._search_regex( r'data-height="(\d+)"', more_info, 'height', fatal=False)) width = int_or_none(self._search_regex( r'data-width="(\d+)"', more_info, 'width', fatal=False)) thumbnail = self._search_regex( r'poster="([^"]+)"', more_info, 'poster', fatal=False) info.update({ 'id': twid, 'url': mobj.group('url'), 'height': height, 'width': width, 'thumbnail': thumbnail, }) return info twitter_card_url = None if 'class="PlayableMedia' in webpage: twitter_card_url = '%s//twitter.com/i/videos/tweet/%s' % (self.http_scheme(), twid) else: twitter_card_iframe_url = self._search_regex( r'data-full-card-iframe-url=([\'"])(?P<url>(?:(?!\1).)+)\1', webpage, 'Twitter card iframe URL', default=None, group='url') if twitter_card_iframe_url: twitter_card_url = compat_urlparse.urljoin(url, twitter_card_iframe_url) if twitter_card_url: info.update({ '_type': 'url_transparent', 'ie_key': 'TwitterCard', 'url': twitter_card_url, }) return info raise ExtractorError('There\'s no video in this tweet.') class TwitterAmplifyIE(TwitterBaseIE): IE_NAME = 'twitter:amplify' _VALID_URL = r'https?://amp\.twimg\.com/v/(?P<id>[0-9a-f\-]{36})' _TEST = { 'url': 'https://amp.twimg.com/v/0ba0c3c7-0af3-4c0a-bed5-7efd1ffa2951', 'md5': '7df102d0b9fd7066b86f3159f8e81bf6', 'info_dict': { 'id': '0ba0c3c7-0af3-4c0a-bed5-7efd1ffa2951', 'ext': 'mp4', 'title': 'Twitter Video', 'thumbnail': 're:^https?://.*', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) vmap_url = self._html_search_meta( 'twitter:amplify:vmap', webpage, 'vmap url') formats = self._extract_formats_from_vmap_url(vmap_url, video_id) thumbnails = [] thumbnail = self._html_search_meta( 'twitter:image:src', webpage, 'thumbnail', fatal=False) def _find_dimension(target): w = int_or_none(self._html_search_meta( 'twitter:%s:width' % target, webpage, fatal=False)) h = int_or_none(self._html_search_meta( 'twitter:%s:height' % target, webpage, fatal=False)) return w, h if thumbnail: thumbnail_w, thumbnail_h = _find_dimension('image') thumbnails.append({ 'url': thumbnail, 'width': thumbnail_w, 'height': thumbnail_h, }) video_w, video_h = _find_dimension('player') formats[0].update({ 'width': video_w, 'height': video_h, }) return { 'id': video_id, 'title': 'Twitter Video', 'formats': formats, 'thumbnails': thumbnails, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/udemy.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_kwargs, compat_str, compat_urllib_request, compat_urlparse, ) from ..utils import ( determine_ext, extract_attributes, ExtractorError, float_or_none, int_or_none, js_to_json, sanitized_Request, try_get, unescapeHTML, url_or_none, urlencode_postdata, ) class UdemyIE(InfoExtractor): IE_NAME = 'udemy' _VALID_URL = r'''(?x) https?:// (?:[^/]+\.)?udemy\.com/ (?: [^#]+\#/lecture/| lecture/view/?\?lectureId=| [^/]+/learn/v4/t/lecture/ ) (?P<id>\d+) ''' _LOGIN_URL = 'https://www.udemy.com/join/login-popup/?displayType=ajax&showSkipButton=1' _ORIGIN_URL = 'https://www.udemy.com' _NETRC_MACHINE = 'udemy' _TESTS = [{ 'url': 'https://www.udemy.com/java-tutorial/#/lecture/172757', 'md5': '98eda5b657e752cf945d8445e261b5c5', 'info_dict': { 'id': '160614', 'ext': 'mp4', 'title': 'Introduction and Installation', 'description': 'md5:c0d51f6f21ef4ec65f091055a5eef876', 'duration': 579.29, }, 'skip': 'Requires udemy account credentials', }, { # new URL schema 'url': 'https://www.udemy.com/electric-bass-right-from-the-start/learn/v4/t/lecture/4580906', 'only_matching': True, }, { # no url in outputs format entry 'url': 'https://www.udemy.com/learn-web-development-complete-step-by-step-guide-to-success/learn/v4/t/lecture/4125812', 'only_matching': True, }, { # only outputs rendition 'url': 'https://www.udemy.com/how-you-can-help-your-local-community-5-amazing-examples/learn/v4/t/lecture/3225750?start=0', 'only_matching': True, }, { 'url': 'https://wipro.udemy.com/java-tutorial/#/lecture/172757', 'only_matching': True, }] def _extract_course_info(self, webpage, video_id): course = self._parse_json( unescapeHTML(self._search_regex( r'ng-init=["\'].*\bcourse=({.+?})[;"\']', webpage, 'course', default='{}')), video_id, fatal=False) or {} course_id = course.get('id') or self._search_regex( [ r'data-course-id=["\'](\d+)', r'&quot;courseId&quot;\s*:\s*(\d+)' ], webpage, 'course id') return course_id, course.get('title') def _enroll_course(self, base_url, webpage, course_id): def combine_url(base_url, url): return compat_urlparse.urljoin(base_url, url) if not url.startswith('http') else url checkout_url = unescapeHTML(self._search_regex( r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/(?:payment|cart)/checkout/.+?)\1', webpage, 'checkout url', group='url', default=None)) if checkout_url: raise ExtractorError( 'Course %s is not free. You have to pay for it before you can download. ' 'Use this URL to confirm purchase: %s' % (course_id, combine_url(base_url, checkout_url)), expected=True) enroll_url = unescapeHTML(self._search_regex( r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/course/subscribe/.+?)\1', webpage, 'enroll url', group='url', default=None)) if enroll_url: webpage = self._download_webpage( combine_url(base_url, enroll_url), course_id, 'Enrolling in the course', headers={'Referer': base_url}) if '>You have enrolled in' in webpage: self.to_screen('%s: Successfully enrolled in the course' % course_id) def _download_lecture(self, course_id, lecture_id): return self._download_json( 'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?' % (course_id, lecture_id), lecture_id, 'Downloading lecture JSON', query={ 'fields[lecture]': 'title,description,view_html,asset', 'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,stream_urls,captions,data', }) def _handle_error(self, response): if not isinstance(response, dict): return error = response.get('error') if error: error_str = 'Udemy returned error #%s: %s' % (error.get('code'), error.get('message')) error_data = error.get('data') if error_data: error_str += ' - %s' % error_data.get('formErrors') raise ExtractorError(error_str, expected=True) def _download_webpage_handle(self, *args, **kwargs): headers = kwargs.get('headers', {}).copy() headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36' kwargs['headers'] = headers ret = super(UdemyIE, self)._download_webpage_handle( *args, **compat_kwargs(kwargs)) if not ret: return ret webpage, _ = ret if any(p in webpage for p in ( '>Please verify you are a human', 'Access to this page has been denied because we believe you are using automation tools to browse the website', '"_pxCaptcha"')): raise ExtractorError( 'Udemy asks you to solve a CAPTCHA. Login with browser, ' 'solve CAPTCHA, then export cookies and pass cookie file to ' 'youtube-dl with --cookies.', expected=True) return ret def _download_json(self, url_or_request, *args, **kwargs): headers = { 'X-Udemy-Snail-Case': 'true', 'X-Requested-With': 'XMLHttpRequest', } for cookie in self._downloader.cookiejar: if cookie.name == 'client_id': headers['X-Udemy-Client-Id'] = cookie.value elif cookie.name == 'access_token': headers['X-Udemy-Bearer-Token'] = cookie.value headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value if isinstance(url_or_request, compat_urllib_request.Request): for header, value in headers.items(): url_or_request.add_header(header, value) else: url_or_request = sanitized_Request(url_or_request, headers=headers) response = super(UdemyIE, self)._download_json(url_or_request, *args, **kwargs) self._handle_error(response) return response def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return login_popup = self._download_webpage( self._LOGIN_URL, None, 'Downloading login popup') def is_logged(webpage): return any(re.search(p, webpage) for p in ( r'href=["\'](?:https://www\.udemy\.com)?/user/logout/', r'>Logout<')) # already logged in if is_logged(login_popup): return login_form = self._form_hidden_inputs('login-form', login_popup) login_form.update({ 'email': username, 'password': password, }) response = self._download_webpage( self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(login_form), headers={ 'Referer': self._ORIGIN_URL, 'Origin': self._ORIGIN_URL, }) if not is_logged(response): error = self._html_search_regex( r'(?s)<div[^>]+class="form-errors[^"]*">(.+?)</div>', response, 'error message', default=None) if error: raise ExtractorError('Unable to login: %s' % error, expected=True) raise ExtractorError('Unable to log in') def _real_extract(self, url): lecture_id = self._match_id(url) webpage = self._download_webpage(url, lecture_id) course_id, _ = self._extract_course_info(webpage, lecture_id) try: lecture = self._download_lecture(course_id, lecture_id) except ExtractorError as e: # Error could possibly mean we are not enrolled in the course if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: self._enroll_course(url, webpage, course_id) lecture = self._download_lecture(course_id, lecture_id) else: raise title = lecture['title'] description = lecture.get('description') asset = lecture['asset'] asset_type = asset.get('asset_type') or asset.get('assetType') if asset_type != 'Video': raise ExtractorError( 'Lecture %s is not a video' % lecture_id, expected=True) stream_url = asset.get('stream_url') or asset.get('streamUrl') if stream_url: youtube_url = self._search_regex( r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url, 'youtube URL', default=None) if youtube_url: return self.url_result(youtube_url, 'Youtube') video_id = compat_str(asset['id']) thumbnail = asset.get('thumbnail_url') or asset.get('thumbnailUrl') duration = float_or_none(asset.get('data', {}).get('duration')) subtitles = {} automatic_captions = {} formats = [] def extract_output_format(src, f_id): return { 'url': src.get('url'), 'format_id': '%sp' % (src.get('height') or f_id), 'width': int_or_none(src.get('width')), 'height': int_or_none(src.get('height')), 'vbr': int_or_none(src.get('video_bitrate_in_kbps')), 'vcodec': src.get('video_codec'), 'fps': int_or_none(src.get('frame_rate')), 'abr': int_or_none(src.get('audio_bitrate_in_kbps')), 'acodec': src.get('audio_codec'), 'asr': int_or_none(src.get('audio_sample_rate')), 'tbr': int_or_none(src.get('total_bitrate_in_kbps')), 'filesize': int_or_none(src.get('file_size_in_bytes')), } outputs = asset.get('data', {}).get('outputs') if not isinstance(outputs, dict): outputs = {} def add_output_format_meta(f, key): output = outputs.get(key) if isinstance(output, dict): output_format = extract_output_format(output, key) output_format.update(f) return output_format return f def extract_formats(source_list): if not isinstance(source_list, list): return for source in source_list: video_url = url_or_none(source.get('file') or source.get('src')) if not video_url: continue if source.get('type') == 'application/x-mpegURL' or determine_ext(video_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) continue format_id = source.get('label') f = { 'url': video_url, 'format_id': '%sp' % format_id, 'height': int_or_none(format_id), } if format_id: # Some videos contain additional metadata (e.g. # https://www.udemy.com/ios9-swift/learn/#/lecture/3383208) f = add_output_format_meta(f, format_id) formats.append(f) def extract_subtitles(track_list): if not isinstance(track_list, list): return for track in track_list: if not isinstance(track, dict): continue if track.get('kind') != 'captions': continue src = url_or_none(track.get('src')) if not src: continue lang = track.get('language') or track.get( 'srclang') or track.get('label') sub_dict = automatic_captions if track.get( 'autogenerated') is True else subtitles sub_dict.setdefault(lang, []).append({ 'url': src, }) for url_kind in ('download', 'stream'): urls = asset.get('%s_urls' % url_kind) if isinstance(urls, dict): extract_formats(urls.get('Video')) captions = asset.get('captions') if isinstance(captions, list): for cc in captions: if not isinstance(cc, dict): continue cc_url = url_or_none(cc.get('url')) if not cc_url: continue lang = try_get(cc, lambda x: x['locale']['locale'], compat_str) sub_dict = (automatic_captions if cc.get('source') == 'auto' else subtitles) sub_dict.setdefault(lang or 'en', []).append({ 'url': cc_url, }) view_html = lecture.get('view_html') if view_html: view_html_urls = set() for source in re.findall(r'<source[^>]+>', view_html): attributes = extract_attributes(source) src = attributes.get('src') if not src: continue res = attributes.get('data-res') height = int_or_none(res) if src in view_html_urls: continue view_html_urls.add(src) if attributes.get('type') == 'application/x-mpegURL' or determine_ext(src) == 'm3u8': m3u8_formats = self._extract_m3u8_formats( src, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) for f in m3u8_formats: m = re.search(r'/hls_(?P<height>\d{3,4})_(?P<tbr>\d{2,})/', f['url']) if m: if not f.get('height'): f['height'] = int(m.group('height')) if not f.get('tbr'): f['tbr'] = int(m.group('tbr')) formats.extend(m3u8_formats) else: formats.append(add_output_format_meta({ 'url': src, 'format_id': '%dp' % height if height else None, 'height': height, }, res)) # react rendition since 2017.04.15 (see # https://github.com/ytdl-org/youtube-dl/issues/12744) data = self._parse_json( self._search_regex( r'videojs-setup-data=(["\'])(?P<data>{.+?})\1', view_html, 'setup data', default='{}', group='data'), video_id, transform_source=unescapeHTML, fatal=False) if data and isinstance(data, dict): extract_formats(data.get('sources')) if not duration: duration = int_or_none(data.get('duration')) extract_subtitles(data.get('tracks')) if not subtitles and not automatic_captions: text_tracks = self._parse_json( self._search_regex( r'text-tracks=(["\'])(?P<data>\[.+?\])\1', view_html, 'text tracks', default='{}', group='data'), video_id, transform_source=lambda s: js_to_json(unescapeHTML(s)), fatal=False) extract_subtitles(text_tracks) if not formats and outputs: for format_id, output in outputs.items(): f = extract_output_format(output, format_id) if f.get('url'): formats.append(f) self._sort_formats(formats, field_preference=('height', 'width', 'tbr', 'format_id')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, 'subtitles': subtitles, 'automatic_captions': automatic_captions, } class UdemyCourseIE(UdemyIE): IE_NAME = 'udemy:course' _VALID_URL = r'https?://(?:[^/]+\.)?udemy\.com/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.udemy.com/java-tutorial/', 'only_matching': True, }, { 'url': 'https://wipro.udemy.com/java-tutorial/', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if UdemyIE.suitable(url) else super(UdemyCourseIE, cls).suitable(url) def _real_extract(self, url): course_path = self._match_id(url) webpage = self._download_webpage(url, course_path) course_id, title = self._extract_course_info(webpage, course_path) self._enroll_course(url, webpage, course_id) response = self._download_json( 'https://www.udemy.com/api-2.0/courses/%s/cached-subscriber-curriculum-items' % course_id, course_id, 'Downloading course curriculum', query={ 'fields[chapter]': 'title,object_index', 'fields[lecture]': 'title,asset', 'page_size': '1000', }) entries = [] chapter, chapter_number = [None] * 2 for entry in response['results']: clazz = entry.get('_class') if clazz == 'lecture': asset = entry.get('asset') if isinstance(asset, dict): asset_type = asset.get('asset_type') or asset.get('assetType') if asset_type != 'Video': continue lecture_id = entry.get('id') if lecture_id: entry = { '_type': 'url_transparent', 'url': 'https://www.udemy.com/%s/learn/v4/t/lecture/%s' % (course_path, entry['id']), 'title': entry.get('title'), 'ie_key': UdemyIE.ie_key(), } if chapter_number: entry['chapter_number'] = chapter_number if chapter: entry['chapter'] = chapter entries.append(entry) elif clazz == 'chapter': chapter_number = entry.get('object_index') chapter = entry.get('title') return self.playlist_result(entries, course_id, title)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/udn.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, js_to_json, ) from ..compat import compat_urlparse class UDNEmbedIE(InfoExtractor): IE_DESC = '聯合影音' _PROTOCOL_RELATIVE_VALID_URL = r'//video\.udn\.com/(?:embed|play)/news/(?P<id>\d+)' _VALID_URL = r'https?:' + _PROTOCOL_RELATIVE_VALID_URL _TESTS = [{ 'url': 'http://video.udn.com/embed/news/300040', 'info_dict': { 'id': '300040', 'ext': 'mp4', 'title': '生物老師男變女 全校挺"做自己"', 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { # m3u8 download 'skip_download': True, }, 'expected_warnings': ['Failed to parse JSON Expecting value'], }, { 'url': 'https://video.udn.com/embed/news/300040', 'only_matching': True, }, { # From https://video.udn.com/news/303776 'url': 'https://video.udn.com/play/news/303776', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) page = self._download_webpage(url, video_id) options_str = self._html_search_regex( r'var\s+options\s*=\s*([^;]+);', page, 'options') trans_options_str = js_to_json(options_str) options = self._parse_json(trans_options_str, 'options', fatal=False) or {} if options: video_urls = options['video'] title = options['title'] poster = options.get('poster') else: video_urls = self._parse_json(self._html_search_regex( r'"video"\s*:\s*({.+?})\s*,', trans_options_str, 'video urls'), 'video urls') title = self._html_search_regex( r"title\s*:\s*'(.+?)'\s*,", options_str, 'title') poster = self._html_search_regex( r"poster\s*:\s*'(.+?)'\s*,", options_str, 'poster', default=None) if video_urls.get('youtube'): return self.url_result(video_urls.get('youtube'), 'Youtube') formats = [] for video_type, api_url in video_urls.items(): if not api_url: continue video_url = self._download_webpage( compat_urlparse.urljoin(url, api_url), video_id, note='retrieve url for %s video' % video_type) ext = determine_ext(video_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, ext='mp4', m3u8_id='hls')) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id='hds')) else: mobj = re.search(r'_(?P<height>\d+)p_(?P<tbr>\d+)\.mp4', video_url) a_format = { 'url': video_url, # video_type may be 'mp4', which confuses YoutubeDL 'format_id': 'http-' + video_type, } if mobj: a_format.update({ 'height': int_or_none(mobj.group('height')), 'tbr': int_or_none(mobj.group('tbr')), }) formats.append(a_format) self._sort_formats(formats) return { 'id': video_id, 'formats': formats, 'title': title, 'thumbnail': poster, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/ufctv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, parse_duration, parse_iso8601, urlencode_postdata, ) class UFCTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ufc\.tv/video/(?P<id>[^/]+)' _NETRC_MACHINE = 'ufctv' _TEST = { 'url': 'https://www.ufc.tv/video/ufc-219-countdown-full-episode', 'info_dict': { 'id': '34167', 'ext': 'mp4', 'title': 'UFC 219 Countdown: Full Episode', 'description': 'md5:26d4e8bf4665ae5878842d7050c3c646', 'timestamp': 1513962360, 'upload_date': '20171222', }, 'params': { # m3u8 download 'skip_download': True, } } def _real_initialize(self): username, password = self._get_login_info() if username is None: return code = self._download_json( 'https://www.ufc.tv/secure/authenticate', None, 'Logging in', data=urlencode_postdata({ 'username': username, 'password': password, 'format': 'json', })).get('code') if code and code != 'loginsuccess': raise ExtractorError(code, expected=True) def _real_extract(self, url): display_id = self._match_id(url) video_data = self._download_json(url, display_id, query={ 'format': 'json', }) video_id = str(video_data['id']) title = video_data['name'] m3u8_url = self._download_json( 'https://www.ufc.tv/service/publishpoint', video_id, query={ 'type': 'video', 'format': 'json', 'id': video_id, }, headers={ 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0_1 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A402 Safari/604.1', })['path'] m3u8_url = m3u8_url.replace('_iphone.', '.') formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4') self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'duration': parse_duration(video_data.get('runtime')), 'timestamp': parse_iso8601(video_data.get('releaseDate')), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/uktvplay.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class UKTVPlayIE(InfoExtractor): _VALID_URL = r'https?://uktvplay\.uktv\.co\.uk/.+?\?.*?\bvideo=(?P<id>\d+)' _TEST = { 'url': 'https://uktvplay.uktv.co.uk/shows/world-at-war/c/200/watch-online/?video=2117008346001', 'md5': '', 'info_dict': { 'id': '2117008346001', 'ext': 'mp4', 'title': 'Pincers', 'description': 'Pincers', 'uploader_id': '1242911124001', 'upload_date': '20130124', 'timestamp': 1359049267, }, 'params': { # m3u8 download 'skip_download': True, }, 'expected_warnings': ['Failed to download MPD manifest'] } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1242911124001/H1xnMOqP_default/index.html?videoId=%s' def _real_extract(self, url): video_id = self._match_id(url) return self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % video_id, 'BrightcoveNew', video_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/umg.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_filesize, parse_iso8601, ) class UMGDeIE(InfoExtractor): IE_NAME = 'umg:de' IE_DESC = 'Universal Music Deutschland' _VALID_URL = r'https?://(?:www\.)?universal-music\.de/[^/]+/videos/[^/?#]+-(?P<id>\d+)' _TEST = { 'url': 'https://www.universal-music.de/sido/videos/jedes-wort-ist-gold-wert-457803', 'md5': 'ebd90f48c80dcc82f77251eb1902634f', 'info_dict': { 'id': '457803', 'ext': 'mp4', 'title': 'Jedes Wort ist Gold wert', 'timestamp': 1513591800, 'upload_date': '20171218', } } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( 'https://api.universal-music.de/graphql', video_id, query={ 'query': '''{ universalMusic(channel:16) { video(id:%s) { headline formats { formatId url type width height mimeType fileSize } duration createdDate } } }''' % video_id})['data']['universalMusic']['video'] title = video_data['headline'] hls_url_template = 'http://mediadelivery.universal-music-services.de/vod/mp4:autofill/storage/' + '/'.join(list(video_id)) + '/content/%s/file/playlist.m3u8' thumbnails = [] formats = [] def add_m3u8_format(format_id): m3u8_formats = self._extract_m3u8_formats( hls_url_template % format_id, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal='False') if m3u8_formats and m3u8_formats[0].get('height'): formats.extend(m3u8_formats) for f in video_data.get('formats', []): f_url = f.get('url') mime_type = f.get('mimeType') if not f_url or mime_type == 'application/mxf': continue fmt = { 'url': f_url, 'width': int_or_none(f.get('width')), 'height': int_or_none(f.get('height')), 'filesize': parse_filesize(f.get('fileSize')), } f_type = f.get('type') if f_type == 'Image': thumbnails.append(fmt) elif f_type == 'Video': format_id = f.get('formatId') if format_id: fmt['format_id'] = format_id if mime_type == 'video/mp4': add_m3u8_format(format_id) urlh = self._request_webpage(f_url, video_id, fatal=False) if urlh: first_byte = urlh.read(1) if first_byte not in (b'F', b'\x00'): continue formats.append(fmt) if not formats: for format_id in (867, 836, 940): add_m3u8_format(format_id) self._sort_formats(formats, ('width', 'height', 'filesize', 'tbr')) return { 'id': video_id, 'title': title, 'duration': int_or_none(video_data.get('duration')), 'timestamp': parse_iso8601(video_data.get('createdDate'), ' '), 'thumbnails': thumbnails, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/unistra.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import qualities class UnistraIE(InfoExtractor): _VALID_URL = r'https?://utv\.unistra\.fr/(?:index|video)\.php\?id_video\=(?P<id>\d+)' _TESTS = [ { 'url': 'http://utv.unistra.fr/video.php?id_video=154', 'md5': '736f605cfdc96724d55bb543ab3ced24', 'info_dict': { 'id': '154', 'ext': 'mp4', 'title': 'M!ss Yella', 'description': 'md5:104892c71bd48e55d70b902736b81bbf', }, }, { 'url': 'http://utv.unistra.fr/index.php?id_video=437', 'md5': '1ddddd6cccaae76f622ce29b8779636d', 'info_dict': { 'id': '437', 'ext': 'mp4', 'title': 'Prix Louise Weiss 2014', 'description': 'md5:cc3a8735f079f4fb6b0b570fc10c135a', }, } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) files = set(re.findall(r'file\s*:\s*"(/[^"]+)"', webpage)) quality = qualities(['SD', 'HD']) formats = [] for file_path in files: format_id = 'HD' if file_path.endswith('-HD.mp4') else 'SD' formats.append({ 'url': 'http://vod-flash.u-strasbg.fr:8080%s' % file_path, 'format_id': format_id, 'quality': quality(format_id) }) self._sort_formats(formats) title = self._html_search_regex( r'<title>UTV - (.*?)</', webpage, 'title') description = self._html_search_regex( r'<meta name="Description" content="(.*?)"', webpage, 'description', flags=re.DOTALL) thumbnail = self._search_regex( r'image: "(.*?)"', webpage, 'thumbnail') return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'formats': formats }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/unity.py
from __future__ import unicode_literals from .common import InfoExtractor from .youtube import YoutubeIE class UnityIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?unity3d\.com/learn/tutorials/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://unity3d.com/learn/tutorials/topics/animation/animate-anything-mecanim', 'info_dict': { 'id': 'jWuNtik0C8E', 'ext': 'mp4', 'title': 'Live Training 22nd September 2014 - Animate Anything', 'description': 'md5:e54913114bd45a554c56cdde7669636e', 'duration': 2893, 'uploader': 'Unity', 'uploader_id': 'Unity3D', 'upload_date': '20140926', } }, { 'url': 'https://unity3d.com/learn/tutorials/projects/2d-ufo-tutorial/following-player-camera?playlist=25844', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) youtube_id = self._search_regex( r'data-video-id="([_0-9a-zA-Z-]+)"', webpage, 'youtube ID') return self.url_result(youtube_id, ie=YoutubeIE.ie_key(), video_id=video_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/uol.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( clean_html, int_or_none, parse_duration, update_url_query, str_or_none, ) class UOLIE(InfoExtractor): IE_NAME = 'uol.com.br' _VALID_URL = r'https?://(?:.+?\.)?uol\.com\.br/.*?(?:(?:mediaId|v)=|view/(?:[a-z0-9]+/)?|video(?:=|/(?:\d{4}/\d{2}/\d{2}/)?))(?P<id>\d+|[\w-]+-[A-Z0-9]+)' _TESTS = [{ 'url': 'http://player.mais.uol.com.br/player_video_v3.swf?mediaId=15951931', 'md5': '25291da27dc45e0afb5718a8603d3816', 'info_dict': { 'id': '15951931', 'ext': 'mp4', 'title': 'Miss simpatia é encontrada morta', 'description': 'md5:3f8c11a0c0556d66daf7e5b45ef823b2', } }, { 'url': 'http://tvuol.uol.com.br/video/incendio-destroi-uma-das-maiores-casas-noturnas-de-londres-04024E9A3268D4C95326', 'md5': 'e41a2fb7b7398a3a46b6af37b15c00c9', 'info_dict': { 'id': '15954259', 'ext': 'mp4', 'title': 'Incêndio destrói uma das maiores casas noturnas de Londres', 'description': 'Em Londres, um incêndio destruiu uma das maiores boates da cidade. Não há informações sobre vítimas.', } }, { 'url': 'http://mais.uol.com.br/static/uolplayer/index.html?mediaId=15951931', 'only_matching': True, }, { 'url': 'http://mais.uol.com.br/view/15954259', 'only_matching': True, }, { 'url': 'http://noticias.band.uol.com.br/brasilurgente/video/2016/08/05/15951931/miss-simpatia-e-encontrada-morta.html', 'only_matching': True, }, { 'url': 'http://videos.band.uol.com.br/programa.asp?e=noticias&pr=brasil-urgente&v=15951931&t=Policia-desmonte-base-do-PCC-na-Cracolandia', 'only_matching': True, }, { 'url': 'http://mais.uol.com.br/view/cphaa0gl2x8r/incendio-destroi-uma-das-maiores-casas-noturnas-de-londres-04024E9A3268D4C95326', 'only_matching': True, }, { 'url': 'http://noticias.uol.com.br//videos/assistir.htm?video=rafaela-silva-inspira-criancas-no-judo-04024D983968D4C95326', 'only_matching': True, }, { 'url': 'http://mais.uol.com.br/view/e0qbgxid79uv/15275470', 'only_matching': True, }] _FORMATS = { '2': { 'width': 640, 'height': 360, }, '5': { 'width': 1280, 'height': 720, }, '6': { 'width': 426, 'height': 240, }, '7': { 'width': 1920, 'height': 1080, }, '8': { 'width': 192, 'height': 144, }, '9': { 'width': 568, 'height': 320, }, '11': { 'width': 640, 'height': 360, } } def _real_extract(self, url): video_id = self._match_id(url) media_id = None if video_id.isdigit(): media_id = video_id if not media_id: embed_page = self._download_webpage( 'https://jsuol.com.br/c/tv/uol/embed/?params=[embed,%s]' % video_id, video_id, 'Downloading embed page', fatal=False) if embed_page: media_id = self._search_regex( (r'uol\.com\.br/(\d+)', r'mediaId=(\d+)'), embed_page, 'media id', default=None) if not media_id: webpage = self._download_webpage(url, video_id) media_id = self._search_regex(r'mediaId=(\d+)', webpage, 'media id') video_data = self._download_json( 'http://mais.uol.com.br/apiuol/v3/player/getMedia/%s.json' % media_id, media_id)['item'] title = video_data['title'] query = { 'ver': video_data.get('numRevision', 2), 'r': 'http://mais.uol.com.br', } for k in ('token', 'sign'): v = video_data.get(k) if v: query[k] = v formats = [] for f in video_data.get('formats', []): f_url = f.get('url') or f.get('secureUrl') if not f_url: continue f_url = update_url_query(f_url, query) format_id = str_or_none(f.get('id')) if format_id == '10': formats.extend(self._extract_m3u8_formats( f_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) continue fmt = { 'format_id': format_id, 'url': f_url, 'source_preference': 1, } fmt.update(self._FORMATS.get(format_id, {})) formats.append(fmt) self._sort_formats(formats, ('height', 'width', 'source_preference', 'tbr', 'ext')) tags = [] for tag in video_data.get('tags', []): tag_description = tag.get('description') if not tag_description: continue tags.append(tag_description) return { 'id': media_id, 'title': title, 'description': clean_html(video_data.get('desMedia')), 'thumbnail': video_data.get('thumbnail'), 'duration': int_or_none(video_data.get('durationSeconds')) or parse_duration(video_data.get('duration')), 'tags': tags, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/uplynk.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( float_or_none, ExtractorError, ) class UplynkIE(InfoExtractor): IE_NAME = 'uplynk' _VALID_URL = r'https?://.*?\.uplynk\.com/(?P<path>ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|(?P<id>[0-9a-f]{32}))\.(?:m3u8|json)(?:.*?\bpbs=(?P<session_id>[^&]+))?' _TEST = { 'url': 'http://content.uplynk.com/e89eaf2ce9054aa89d92ddb2d817a52e.m3u8', 'info_dict': { 'id': 'e89eaf2ce9054aa89d92ddb2d817a52e', 'ext': 'mp4', 'title': '030816-kgo-530pm-solar-eclipse-vid_web.mp4', 'uploader_id': '4413701bf5a1488db55b767f8ae9d4fa', }, 'params': { # m3u8 download 'skip_download': True, }, } def _extract_uplynk_info(self, uplynk_content_url): path, external_id, video_id, session_id = re.match(UplynkIE._VALID_URL, uplynk_content_url).groups() display_id = video_id or external_id formats = self._extract_m3u8_formats( 'http://content.uplynk.com/%s.m3u8' % path, display_id, 'mp4', 'm3u8_native') if session_id: for f in formats: f['extra_param_to_segment_url'] = 'pbs=' + session_id self._sort_formats(formats) asset = self._download_json('http://content.uplynk.com/player/assetinfo/%s.json' % path, display_id) if asset.get('error') == 1: raise ExtractorError('% said: %s' % (self.IE_NAME, asset['msg']), expected=True) return { 'id': asset['asset'], 'title': asset['desc'], 'thumbnail': asset.get('default_poster_url'), 'duration': float_or_none(asset.get('duration')), 'uploader_id': asset.get('owner'), 'formats': formats, } def _real_extract(self, url): return self._extract_uplynk_info(url) class UplynkPreplayIE(UplynkIE): IE_NAME = 'uplynk:preplay' _VALID_URL = r'https?://.*?\.uplynk\.com/preplay2?/(?P<path>ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|(?P<id>[0-9a-f]{32}))\.json' _TEST = None def _real_extract(self, url): path, external_id, video_id = re.match(self._VALID_URL, url).groups() display_id = video_id or external_id preplay = self._download_json(url, display_id) content_url = 'http://content.uplynk.com/%s.m3u8' % path session_id = preplay.get('sid') if session_id: content_url += '?pbs=' + session_id return self._extract_uplynk_info(content_url)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/urort.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urllib_parse, ) from ..utils import ( unified_strdate, ) class UrortIE(InfoExtractor): IE_DESC = 'NRK P3 Urørt' _VALID_URL = r'https?://(?:www\.)?urort\.p3\.no/#!/Band/(?P<id>[^/]+)$' _TEST = { 'url': 'https://urort.p3.no/#!/Band/Gerilja', 'md5': '5ed31a924be8a05e47812678a86e127b', 'info_dict': { 'id': '33124-24', 'ext': 'mp3', 'title': 'The Bomb', 'thumbnail': r're:^https?://.+\.jpg', 'uploader': 'Gerilja', 'uploader_id': 'Gerilja', 'upload_date': '20100323', }, 'params': { 'matchtitle': '^The Bomb$', # To test, we want just one video } } def _real_extract(self, url): playlist_id = self._match_id(url) fstr = compat_urllib_parse.quote("InternalBandUrl eq '%s'" % playlist_id) json_url = 'http://urort.p3.no/breeze/urort/TrackDTOViews?$filter=%s&$orderby=Released%%20desc&$expand=Tags%%2CFiles' % fstr songs = self._download_json(json_url, playlist_id) entries = [] for s in songs: formats = [{ 'tbr': f.get('Quality'), 'ext': f['FileType'], 'format_id': '%s-%s' % (f['FileType'], f.get('Quality', '')), 'url': 'http://p3urort.blob.core.windows.net/tracks/%s' % f['FileRef'], 'preference': 3 if f['FileType'] == 'mp3' else 2, } for f in s['Files']] self._sort_formats(formats) e = { 'id': '%d-%s' % (s['BandId'], s['$id']), 'title': s['Title'], 'uploader_id': playlist_id, 'uploader': s.get('BandName', playlist_id), 'thumbnail': 'http://urort.p3.no/cloud/images/%s' % s['Image'], 'upload_date': unified_strdate(s.get('Released')), 'formats': formats, } entries.append(e) return { '_type': 'playlist', 'id': playlist_id, 'title': playlist_id, 'entries': entries, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/urplay.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import unified_timestamp class URPlayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ur(?:play|skola)\.se/(?:program|Produkter)/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://urplay.se/program/203704-ur-samtiden-livet-universum-och-rymdens-markliga-musik-om-vetenskap-kritiskt-tankande-och-motstand', 'md5': 'ff5b0c89928f8083c74bbd5099c9292d', 'info_dict': { 'id': '203704', 'ext': 'mp4', 'title': 'UR Samtiden - Livet, universum och rymdens märkliga musik : Om vetenskap, kritiskt tänkande och motstånd', 'description': 'md5:5344508a52aa78c1ced6c1b8b9e44e9a', 'timestamp': 1513512768, 'upload_date': '20171217', }, }, { 'url': 'https://urskola.se/Produkter/190031-Tripp-Trapp-Trad-Sovkudde', 'info_dict': { 'id': '190031', 'ext': 'mp4', 'title': 'Tripp, Trapp, Träd : Sovkudde', 'description': 'md5:b86bffdae04a7e9379d1d7e5947df1d1', 'timestamp': 1440093600, 'upload_date': '20150820', }, }, { 'url': 'http://urskola.se/Produkter/155794-Smasagor-meankieli-Grodan-i-vida-varlden', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) urplayer_data = self._parse_json(self._search_regex( r'urPlayer\.init\(({.+?})\);', webpage, 'urplayer data'), video_id) host = self._download_json('http://streaming-loadbalancer.ur.se/loadbalancer.json', video_id)['redirect'] formats = [] for quality_attr, quality, preference in (('', 'sd', 0), ('_hd', 'hd', 1)): file_http = urplayer_data.get('file_http' + quality_attr) or urplayer_data.get('file_http_sub' + quality_attr) if file_http: formats.extend(self._extract_wowza_formats( 'http://%s/%splaylist.m3u8' % (host, file_http), video_id, skip_protocols=['rtmp', 'rtsp'])) self._sort_formats(formats) subtitles = {} for subtitle in urplayer_data.get('subtitles', []): subtitle_url = subtitle.get('file') kind = subtitle.get('kind') if not subtitle_url or (kind and kind != 'captions'): continue subtitles.setdefault(subtitle.get('label', 'Svenska'), []).append({ 'url': subtitle_url, }) return { 'id': video_id, 'title': urplayer_data['title'], 'description': self._og_search_description(webpage), 'thumbnail': urplayer_data.get('image'), 'timestamp': unified_timestamp(self._html_search_meta(('uploadDate', 'schema:uploadDate'), webpage, 'timestamp')), 'series': urplayer_data.get('series_title'), 'subtitles': subtitles, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/usanetwork.py
# coding: utf-8 from __future__ import unicode_literals from .adobepass import AdobePassIE from ..utils import ( NO_DEFAULT, smuggle_url, update_url_query, ) class USANetworkIE(AdobePassIE): _VALID_URL = r'https?://(?:www\.)?usanetwork\.com/(?:[^/]+/videos|movies)/(?P<id>[^/?#]+)' _TEST = { 'url': 'http://www.usanetwork.com/mrrobot/videos/hpe-cybersecurity', 'md5': '33c0d2ba381571b414024440d08d57fd', 'info_dict': { 'id': '3086229', 'ext': 'mp4', 'title': 'HPE Cybersecurity', 'description': 'The more we digitize our world, the more vulnerable we are.', 'upload_date': '20160818', 'timestamp': 1471535460, 'uploader': 'NBCU-USA', }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) def _x(name, default=NO_DEFAULT): return self._search_regex( r'data-%s\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1' % name, webpage, name, default=default, group='value') video_id = _x('mpx-guid') title = _x('episode-title') mpx_account_id = _x('mpx-account-id', '2304992029') query = { 'mbr': 'true', } if _x('is-full-episode', None) == '1': query['manifest'] = 'm3u' if _x('is-entitlement', None) == '1': adobe_pass = {} drupal_settings = self._search_regex( r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings', fatal=False) if drupal_settings: drupal_settings = self._parse_json(drupal_settings, video_id, fatal=False) if drupal_settings: adobe_pass = drupal_settings.get('adobePass', {}) resource = self._get_mvpd_resource( adobe_pass.get('adobePassResourceId', 'usa'), title, video_id, _x('episode-rating', 'TV-14')) query['auth'] = self._extract_mvpd_auth( url, video_id, adobe_pass.get('adobePassRequestorId', 'usa'), resource) info = self._search_json_ld(webpage, video_id, default={}) info.update({ '_type': 'url_transparent', 'url': smuggle_url(update_url_query( 'http://link.theplatform.com/s/HNK2IC/media/guid/%s/%s' % (mpx_account_id, video_id), query), {'force_smil_url': True}), 'id': video_id, 'title': title, 'series': _x('show-title', None), 'episode': title, 'ie_key': 'ThePlatform', }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/usatoday.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, get_element_by_attribute, parse_duration, try_get, update_url_query, ) from ..compat import compat_str class USATodayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?usatoday\.com/(?:[^/]+/)*(?P<id>[^?/#]+)' _TESTS = [{ # Brightcove Partner ID = 29906170001 'url': 'http://www.usatoday.com/media/cinematic/video/81729424/us-france-warn-syrian-regime-ahead-of-new-peace-talks/', 'md5': '033587d2529dc3411a1ab3644c3b8827', 'info_dict': { 'id': '4799374959001', 'ext': 'mp4', 'title': 'US, France warn Syrian regime ahead of new peace talks', 'timestamp': 1457891045, 'description': 'md5:7e50464fdf2126b0f533748d3c78d58f', 'uploader_id': '29906170001', 'upload_date': '20160313', } }, { # ui-video-data[asset_metadata][items][brightcoveaccount] = 28911775001 'url': 'https://www.usatoday.com/story/tech/science/2018/08/21/yellowstone-supervolcano-eruption-stop-worrying-its-blow/973633002/', 'info_dict': { 'id': '5824495846001', 'ext': 'mp4', 'title': 'Yellowstone more likely to crack rather than explode', 'timestamp': 1534790612, 'description': 'md5:3715e7927639a4f16b474e9391687c62', 'uploader_id': '28911775001', 'upload_date': '20180820', } }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(update_url_query(url, {'ajax': 'true'}), display_id) ui_video_data = get_element_by_attribute('class', 'ui-video-data', webpage) if not ui_video_data: raise ExtractorError('no video on the webpage', expected=True) video_data = self._parse_json(ui_video_data, display_id) item = try_get(video_data, lambda x: x['asset_metadata']['items'], dict) or {} return { '_type': 'url_transparent', 'url': self.BRIGHTCOVE_URL_TEMPLATE % (item.get('brightcoveaccount', '29906170001'), item.get('brightcoveid') or video_data['brightcove_id']), 'id': compat_str(video_data['id']), 'title': video_data['title'], 'thumbnail': video_data.get('thumbnail'), 'description': video_data.get('description'), 'duration': parse_duration(video_data.get('length')), 'ie_key': 'BrightcoveNew', }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/ustream.py
from __future__ import unicode_literals import random import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( encode_data_uri, ExtractorError, int_or_none, float_or_none, mimetype2ext, str_or_none, ) class UstreamIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<id>\d+)' IE_NAME = 'ustream' _TESTS = [{ 'url': 'http://www.ustream.tv/recorded/20274954', 'md5': '088f151799e8f572f84eb62f17d73e5c', 'info_dict': { 'id': '20274954', 'ext': 'flv', 'title': 'Young Americans for Liberty February 7, 2012 2:28 AM', 'description': 'Young Americans for Liberty February 7, 2012 2:28 AM', 'timestamp': 1328577035, 'upload_date': '20120207', 'uploader': 'yaliberty', 'uploader_id': '6780869', }, }, { # From http://sportscanada.tv/canadagames/index.php/week2/figure-skating/444 # Title and uploader available only from params JSON 'url': 'http://www.ustream.tv/embed/recorded/59307601?ub=ff0000&lc=ff0000&oc=ffffff&uc=ffffff&v=3&wmode=direct', 'md5': '5a2abf40babeac9812ed20ae12d34e10', 'info_dict': { 'id': '59307601', 'ext': 'flv', 'title': '-CG11- Canada Games Figure Skating', 'uploader': 'sportscanadatv', }, 'skip': 'This Pro Broadcaster has chosen to remove this video from the ustream.tv site.', }, { 'url': 'http://www.ustream.tv/embed/10299409', 'info_dict': { 'id': '10299409', }, 'playlist_count': 3, }, { 'url': 'http://www.ustream.tv/recorded/91343263', 'info_dict': { 'id': '91343263', 'ext': 'mp4', 'title': 'GitHub Universe - General Session - Day 1', 'upload_date': '20160914', 'description': 'GitHub Universe - General Session - Day 1', 'timestamp': 1473872730, 'uploader': 'wa0dnskeqkr', 'uploader_id': '38977840', }, 'params': { 'skip_download': True, # m3u8 download }, }] @staticmethod def _extract_url(webpage): mobj = re.search( r'<iframe[^>]+?src=(["\'])(?P<url>http://www\.ustream\.tv/embed/.+?)\1', webpage) if mobj is not None: return mobj.group('url') def _get_stream_info(self, url, video_id, app_id_ver, extra_note=None): def num_to_hex(n): return hex(n)[2:] rnd = random.randrange if not extra_note: extra_note = '' conn_info = self._download_json( 'http://r%d-1-%s-recorded-lp-live.ums.ustream.tv/1/ustream' % (rnd(1e8), video_id), video_id, note='Downloading connection info' + extra_note, query={ 'type': 'viewer', 'appId': app_id_ver[0], 'appVersion': app_id_ver[1], 'rsid': '%s:%s' % (num_to_hex(rnd(1e8)), num_to_hex(rnd(1e8))), 'rpin': '_rpin.%d' % rnd(1e15), 'referrer': url, 'media': video_id, 'application': 'recorded', }) host = conn_info[0]['args'][0]['host'] connection_id = conn_info[0]['args'][0]['connectionId'] return self._download_json( 'http://%s/1/ustream?connectionId=%s' % (host, connection_id), video_id, note='Downloading stream info' + extra_note) def _get_streams(self, url, video_id, app_id_ver): # Sometimes the return dict does not have 'stream' for trial_count in range(3): stream_info = self._get_stream_info( url, video_id, app_id_ver, extra_note=' (try %d)' % (trial_count + 1) if trial_count > 0 else '') if 'stream' in stream_info[0]['args'][0]: return stream_info[0]['args'][0]['stream'] return [] def _parse_segmented_mp4(self, dash_stream_info): def resolve_dash_template(template, idx, chunk_hash): return template.replace('%', compat_str(idx), 1).replace('%', chunk_hash) formats = [] for stream in dash_stream_info['streams']: # Use only one provider to avoid too many formats provider = dash_stream_info['providers'][0] fragments = [{ 'url': resolve_dash_template( provider['url'] + stream['initUrl'], 0, dash_stream_info['hashes']['0']) }] for idx in range(dash_stream_info['videoLength'] // dash_stream_info['chunkTime']): fragments.append({ 'url': resolve_dash_template( provider['url'] + stream['segmentUrl'], idx, dash_stream_info['hashes'][compat_str(idx // 10 * 10)]) }) content_type = stream['contentType'] kind = content_type.split('/')[0] f = { 'format_id': '-'.join(filter(None, [ 'dash', kind, str_or_none(stream.get('bitrate'))])), 'protocol': 'http_dash_segments', # TODO: generate a MPD doc for external players? 'url': encode_data_uri(b'<MPD/>', 'text/xml'), 'ext': mimetype2ext(content_type), 'height': stream.get('height'), 'width': stream.get('width'), 'fragments': fragments, } if kind == 'video': f.update({ 'vcodec': stream.get('codec'), 'acodec': 'none', 'vbr': stream.get('bitrate'), }) else: f.update({ 'vcodec': 'none', 'acodec': stream.get('codec'), 'abr': stream.get('bitrate'), }) formats.append(f) return formats def _real_extract(self, url): m = re.match(self._VALID_URL, url) video_id = m.group('id') # some sites use this embed format (see: https://github.com/ytdl-org/youtube-dl/issues/2990) if m.group('type') == 'embed/recorded': video_id = m.group('id') desktop_url = 'http://www.ustream.tv/recorded/' + video_id return self.url_result(desktop_url, 'Ustream') if m.group('type') == 'embed': video_id = m.group('id') webpage = self._download_webpage(url, video_id) content_video_ids = self._parse_json(self._search_regex( r'ustream\.vars\.offAirContentVideoIds=([^;]+);', webpage, 'content video IDs'), video_id) return self.playlist_result( map(lambda u: self.url_result('http://www.ustream.tv/recorded/' + u, 'Ustream'), content_video_ids), video_id) params = self._download_json( 'https://api.ustream.tv/videos/%s.json' % video_id, video_id) error = params.get('error') if error: raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error), expected=True) video = params['video'] title = video['title'] filesize = float_or_none(video.get('file_size')) formats = [{ 'id': video_id, 'url': video_url, 'ext': format_id, 'filesize': filesize, } for format_id, video_url in video['media_urls'].items() if video_url] if not formats: hls_streams = self._get_streams(url, video_id, app_id_ver=(11, 2)) if hls_streams: # m3u8_native leads to intermittent ContentTooShortError formats.extend(self._extract_m3u8_formats( hls_streams[0]['url'], video_id, ext='mp4', m3u8_id='hls')) ''' # DASH streams handling is incomplete as 'url' is missing dash_streams = self._get_streams(url, video_id, app_id_ver=(3, 1)) if dash_streams: formats.extend(self._parse_segmented_mp4(dash_streams)) ''' self._sort_formats(formats) description = video.get('description') timestamp = int_or_none(video.get('created_at')) duration = float_or_none(video.get('length')) view_count = int_or_none(video.get('views')) uploader = video.get('owner', {}).get('username') uploader_id = video.get('owner', {}).get('id') thumbnails = [{ 'id': thumbnail_id, 'url': thumbnail_url, } for thumbnail_id, thumbnail_url in video.get('thumbnail', {}).items()] return { 'id': video_id, 'title': title, 'description': description, 'thumbnails': thumbnails, 'timestamp': timestamp, 'duration': duration, 'view_count': view_count, 'uploader': uploader, 'uploader_id': uploader_id, 'formats': formats, } class UstreamChannelIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ustream\.tv/channel/(?P<slug>.+)' IE_NAME = 'ustream:channel' _TEST = { 'url': 'http://www.ustream.tv/channel/channeljapan', 'info_dict': { 'id': '10874166', }, 'playlist_mincount': 17, } def _real_extract(self, url): m = re.match(self._VALID_URL, url) display_id = m.group('slug') webpage = self._download_webpage(url, display_id) channel_id = self._html_search_meta('ustream:channel_id', webpage) BASE = 'http://www.ustream.tv' next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id video_ids = [] while next_url: reply = self._download_json( compat_urlparse.urljoin(BASE, next_url), display_id, note='Downloading video information (next: %d)' % (len(video_ids) + 1)) video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data'])) next_url = reply['nextUrl'] entries = [ self.url_result('http://www.ustream.tv/recorded/' + vid, 'Ustream') for vid in video_ids] return { '_type': 'playlist', 'id': channel_id, 'display_id': display_id, 'entries': entries, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/ustudio.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, unified_strdate, unescapeHTML, ) class UstudioIE(InfoExtractor): IE_NAME = 'ustudio' _VALID_URL = r'https?://(?:(?:www|v1)\.)?ustudio\.com/video/(?P<id>[^/]+)/(?P<display_id>[^/?#&]+)' _TEST = { 'url': 'http://ustudio.com/video/Uxu2my9bgSph/san_francisco_golden_gate_bridge', 'md5': '58bbfca62125378742df01fc2abbdef6', 'info_dict': { 'id': 'Uxu2my9bgSph', 'display_id': 'san_francisco_golden_gate_bridge', 'ext': 'mp4', 'title': 'San Francisco: Golden Gate Bridge', 'description': 'md5:23925500697f2c6d4830e387ba51a9be', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20111107', 'uploader': 'Tony Farley', } } def _real_extract(self, url): video_id, display_id = re.match(self._VALID_URL, url).groups() config = self._download_xml( 'http://v1.ustudio.com/embed/%s/ustudio/config.xml' % video_id, display_id) def extract(kind): return [{ 'url': unescapeHTML(item.attrib['url']), 'width': int_or_none(item.get('width')), 'height': int_or_none(item.get('height')), } for item in config.findall('./qualities/quality/%s' % kind) if item.get('url')] formats = extract('video') self._sort_formats(formats) webpage = self._download_webpage(url, display_id) title = self._og_search_title(webpage) upload_date = unified_strdate(self._search_regex( r'(?s)Uploaded by\s*.+?\s*on\s*<span>([^<]+)</span>', webpage, 'upload date', fatal=False)) uploader = self._search_regex( r'Uploaded by\s*<a[^>]*>([^<]+)<', webpage, 'uploader', fatal=False) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': self._og_search_description(webpage), 'thumbnails': extract('image'), 'upload_date': upload_date, 'uploader': uploader, 'formats': formats, } class UstudioEmbedIE(InfoExtractor): IE_NAME = 'ustudio:embed' _VALID_URL = r'https?://(?:(?:app|embed)\.)?ustudio\.com/embed/(?P<uid>[^/]+)/(?P<id>[^/]+)' _TEST = { 'url': 'http://app.ustudio.com/embed/DeN7VdYRDKhP/Uw7G1kMCe65T', 'md5': '47c0be52a09b23a7f40de9469cec58f4', 'info_dict': { 'id': 'Uw7G1kMCe65T', 'ext': 'mp4', 'title': '5 Things IT Should Know About Video', 'description': 'md5:93d32650884b500115e158c5677d25ad', 'uploader_id': 'DeN7VdYRDKhP', } } def _real_extract(self, url): uploader_id, video_id = re.match(self._VALID_URL, url).groups() video_data = self._download_json( 'http://app.ustudio.com/embed/%s/%s/config.json' % (uploader_id, video_id), video_id)['videos'][0] title = video_data['name'] formats = [] for ext, qualities in video_data.get('transcodes', {}).items(): for quality in qualities: quality_url = quality.get('url') if not quality_url: continue height = int_or_none(quality.get('height')) formats.append({ 'format_id': '%s-%dp' % (ext, height) if height else ext, 'url': quality_url, 'width': int_or_none(quality.get('width')), 'height': height, }) self._sort_formats(formats) thumbnails = [] for image in video_data.get('images', []): image_url = image.get('url') if not image_url: continue thumbnails.append({ 'url': image_url, }) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'duration': int_or_none(video_data.get('duration')), 'uploader_id': uploader_id, 'tags': video_data.get('keywords'), 'thumbnails': thumbnails, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/varzesh3.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urllib_parse_urlparse, compat_parse_qs, ) from ..utils import ( clean_html, remove_start, ) class Varzesh3IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?video\.varzesh3\.com/(?:[^/]+/)+(?P<id>[^/]+)/?' _TESTS = [{ 'url': 'http://video.varzesh3.com/germany/bundesliga/5-%D9%88%D8%A7%DA%A9%D9%86%D8%B4-%D8%A8%D8%B1%D8%AA%D8%B1-%D8%AF%D8%B1%D9%88%D8%A7%D8%B2%D9%87%E2%80%8C%D8%A8%D8%A7%D9%86%D8%A7%D9%86%D8%9B%D9%87%D9%81%D8%AA%D9%87-26-%D8%A8%D9%88%D9%86%D8%AF%D8%B3/', 'md5': '2a933874cb7dce4366075281eb49e855', 'info_dict': { 'id': '76337', 'ext': 'mp4', 'title': '۵ واکنش برتر دروازه‌بانان؛هفته ۲۶ بوندسلیگا', 'description': 'فصل ۲۰۱۵-۲۰۱۴', 'thumbnail': r're:^https?://.*\.jpg$', }, 'skip': 'HTTP 404 Error', }, { 'url': 'http://video.varzesh3.com/video/112785/%D8%AF%D9%84%D9%87-%D8%B9%D9%84%DB%8C%D8%9B-%D8%B3%D8%AA%D8%A7%D8%B1%D9%87-%D9%86%D9%88%D8%B8%D9%87%D9%88%D8%B1-%D9%84%DB%8C%DA%AF-%D8%A8%D8%B1%D8%AA%D8%B1-%D8%AC%D8%B2%DB%8C%D8%B1%D9%87', 'md5': '841b7cd3afbc76e61708d94e53a4a4e7', 'info_dict': { 'id': '112785', 'ext': 'mp4', 'title': 'دله علی؛ ستاره نوظهور لیگ برتر جزیره', 'description': 'فوتبال 120', }, 'expected_warnings': ['description'], }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_url = self._search_regex( r'<source[^>]+src="([^"]+)"', webpage, 'video url') title = remove_start(self._html_search_regex( r'<title>([^<]+)</title>', webpage, 'title'), 'ویدیو ورزش 3 | ') description = self._html_search_regex( r'(?s)<div class="matn">(.+?)</div>', webpage, 'description', default=None) if description is None: description = clean_html(self._html_search_meta('description', webpage)) thumbnail = self._og_search_thumbnail(webpage, default=None) if thumbnail is None: fb_sharer_url = self._search_regex( r'<a[^>]+href="(https?://www\.facebook\.com/sharer/sharer\.php?[^"]+)"', webpage, 'facebook sharer URL', fatal=False) sharer_params = compat_parse_qs(compat_urllib_parse_urlparse(fb_sharer_url).query) thumbnail = sharer_params.get('p[images][0]', [None])[0] video_id = self._search_regex( r"<link[^>]+rel='(?:canonical|shortlink)'[^>]+href='/\?p=([^']+)'", webpage, display_id, default=None) if video_id is None: video_id = self._search_regex( r'var\s+VideoId\s*=\s*(\d+);', webpage, 'video id', default=display_id) return { 'url': video_url, 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/vbox7.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError class Vbox7IE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:[^/]+\.)?vbox7\.com/ (?: play:| (?: emb/external\.php| player/ext\.swf )\?.*?\bvid= ) (?P<id>[\da-fA-F]+) ''' _GEO_COUNTRIES = ['BG'] _TESTS = [{ 'url': 'http://vbox7.com/play:0946fff23c', 'md5': 'a60f9ab3a3a2f013ef9a967d5f7be5bf', 'info_dict': { 'id': '0946fff23c', 'ext': 'mp4', 'title': 'Борисов: Притеснен съм за бъдещето на България', 'description': 'По думите му е опасно страната ни да бъде обявена за "сигурна"', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1470982814, 'upload_date': '20160812', 'uploader': 'zdraveibulgaria', }, 'params': { 'proxy': '127.0.0.1:8118', }, }, { 'url': 'http://vbox7.com/play:249bb972c2', 'md5': '99f65c0c9ef9b682b97313e052734c3f', 'info_dict': { 'id': '249bb972c2', 'ext': 'mp4', 'title': 'Смях! Чудо - чист за секунди - Скрита камера', }, 'skip': 'georestricted', }, { 'url': 'http://vbox7.com/emb/external.php?vid=a240d20f9c&autoplay=1', 'only_matching': True, }, { 'url': 'http://i49.vbox7.com/player/ext.swf?vid=0946fff23c&autoplay=1', 'only_matching': True, }] @staticmethod def _extract_url(webpage): mobj = re.search( r'<iframe[^>]+src=(?P<q>["\'])(?P<url>(?:https?:)?//vbox7\.com/emb/external\.php.+?)(?P=q)', webpage) if mobj: return mobj.group('url') def _real_extract(self, url): video_id = self._match_id(url) response = self._download_json( 'https://www.vbox7.com/ajax/video/nextvideo.php?vid=%s' % video_id, video_id) if 'error' in response: raise ExtractorError( '%s said: %s' % (self.IE_NAME, response['error']), expected=True) video = response['options'] title = video['title'] video_url = video['src'] if '/na.mp4' in video_url: self.raise_geo_restricted(countries=self._GEO_COUNTRIES) uploader = video.get('uploader') webpage = self._download_webpage( 'http://vbox7.com/play:%s' % video_id, video_id, fatal=None) info = {} if webpage: info = self._search_json_ld( webpage.replace('"/*@context"', '"@context"'), video_id, fatal=False) info.update({ 'id': video_id, 'title': title, 'url': video_url, 'uploader': uploader, 'thumbnail': self._proto_relative_url( info.get('thumbnail') or self._og_search_thumbnail(webpage), 'http:'), }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/veehd.py
from __future__ import unicode_literals import re import json from .common import InfoExtractor from ..compat import ( compat_urllib_parse_unquote, compat_urlparse, ) from ..utils import ( ExtractorError, clean_html, get_element_by_id, ) class VeeHDIE(InfoExtractor): _VALID_URL = r'https?://veehd\.com/video/(?P<id>\d+)' # Seems VeeHD videos have multiple copies on several servers, all of # whom have different MD5 checksums, so omit md5 field in all tests _TESTS = [{ 'url': 'http://veehd.com/video/4639434_Solar-Sinter', 'info_dict': { 'id': '4639434', 'ext': 'mp4', 'title': 'Solar Sinter', 'uploader_id': 'VideoEyes', 'description': 'md5:46a840e8692ddbaffb5f81d9885cb457', }, 'skip': 'Video deleted', }, { 'url': 'http://veehd.com/video/4905758_Elysian-Fields-Channeling', 'info_dict': { 'id': '4905758', 'ext': 'mp4', 'title': 'Elysian Fields - Channeling', 'description': 'md5:360e4e95fdab58aefbea0f2a19e5604b', 'uploader_id': 'spotted', } }, { 'url': 'http://veehd.com/video/2046729_2012-2009-DivX-Trailer', 'info_dict': { 'id': '2046729', 'ext': 'avi', 'title': '2012 (2009) DivX Trailer', 'description': 'md5:75435ee95255e6a9838ac6f6f3a2396b', 'uploader_id': 'Movie_Trailers', } }] def _real_extract(self, url): video_id = self._match_id(url) # VeeHD seems to send garbage on the first request. # See https://github.com/ytdl-org/youtube-dl/issues/2102 self._download_webpage(url, video_id, 'Requesting webpage') webpage = self._download_webpage(url, video_id) if 'This video has been removed<' in webpage: raise ExtractorError('Video %s has been removed' % video_id, expected=True) player_path = self._search_regex( r'\$\("#playeriframe"\).attr\({src : "(.+?)"', webpage, 'player path') player_url = compat_urlparse.urljoin(url, player_path) self._download_webpage(player_url, video_id, 'Requesting player page') player_page = self._download_webpage( player_url, video_id, 'Downloading player page') video_url = None config_json = self._search_regex( r'value=\'config=({.+?})\'', player_page, 'config json', default=None) if config_json: config = json.loads(config_json) video_url = compat_urllib_parse_unquote(config['clip']['url']) if not video_url: video_url = self._html_search_regex( r'<embed[^>]+type="video/divx"[^>]+src="([^"]+)"', player_page, 'video url', default=None) if not video_url: iframe_src = self._search_regex( r'<iframe[^>]+src="/?([^"]+)"', player_page, 'iframe url') iframe_url = 'http://veehd.com/%s' % iframe_src self._download_webpage(iframe_url, video_id, 'Requesting iframe page') iframe_page = self._download_webpage( iframe_url, video_id, 'Downloading iframe page') video_url = self._search_regex( r"file\s*:\s*'([^']+)'", iframe_page, 'video url') title = clean_html(get_element_by_id('videoName', webpage).rpartition('|')[0]) uploader_id = self._html_search_regex( r'<a href="/profile/\d+">(.+?)</a>', webpage, 'uploader') thumbnail = self._search_regex( r'<img id="veehdpreview" src="(.+?)"', webpage, 'thumbnail') description = self._html_search_regex( r'<td class="infodropdown".*?<div>(.*?)<ul', webpage, 'description', flags=re.DOTALL) return { '_type': 'video', 'id': video_id, 'title': title, 'url': video_url, 'uploader_id': uploader_id, 'thumbnail': thumbnail, 'description': description, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/veoh.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, qualities, ) class VeohIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?veoh\.com/(?:watch|embed|iphone/#_Watch)/(?P<id>(?:v|e|yapi-)[\da-zA-Z]+)' _TESTS = [{ 'url': 'http://www.veoh.com/watch/v56314296nk7Zdmz3', 'md5': '9e7ecc0fd8bbee7a69fe38953aeebd30', 'info_dict': { 'id': 'v56314296nk7Zdmz3', 'ext': 'mp4', 'title': 'Straight Backs Are Stronger', 'uploader': 'LUMOback', 'description': 'At LUMOback, we believe straight backs are stronger. The LUMOback Posture & Movement Sensor: It gently vibrates when you slouch, inspiring improved posture and mobility. Use the app to track your data and improve your posture over time. ', }, }, { 'url': 'http://www.veoh.com/embed/v56314296nk7Zdmz3', 'only_matching': True, }, { 'url': 'http://www.veoh.com/watch/v27701988pbTc4wzN?h1=Chile+workers+cover+up+to+avoid+skin+damage', 'md5': '4a6ff84b87d536a6a71e6aa6c0ad07fa', 'info_dict': { 'id': '27701988', 'ext': 'mp4', 'title': 'Chile workers cover up to avoid skin damage', 'description': 'md5:2bd151625a60a32822873efc246ba20d', 'uploader': 'afp-news', 'duration': 123, }, 'skip': 'This video has been deleted.', }, { 'url': 'http://www.veoh.com/watch/v69525809F6Nc4frX', 'md5': '4fde7b9e33577bab2f2f8f260e30e979', 'note': 'Embedded ooyala video', 'info_dict': { 'id': '69525809', 'ext': 'mp4', 'title': 'Doctors Alter Plan For Preteen\'s Weight Loss Surgery', 'description': 'md5:f5a11c51f8fb51d2315bca0937526891', 'uploader': 'newsy-videos', }, 'skip': 'This video has been deleted.', }, { 'url': 'http://www.veoh.com/watch/e152215AJxZktGS', 'only_matching': True, }] def _extract_video(self, source): return { 'id': source.get('videoId'), 'title': source.get('title'), 'description': source.get('description'), 'thumbnail': source.get('highResImage') or source.get('medResImage'), 'uploader': source.get('username'), 'duration': int_or_none(source.get('length')), 'view_count': int_or_none(source.get('views')), 'age_limit': 18 if source.get('isMature') == 'true' or source.get('isSexy') == 'true' else 0, 'formats': self._extract_formats(source), } def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'https://www.veoh.com/watch/getVideo/' + video_id, video_id)['video'] title = video['title'] thumbnail_url = None q = qualities(['HQ', 'Regular']) formats = [] for f_id, f_url in video.get('src', {}).items(): if not f_url: continue if f_id == 'poster': thumbnail_url = f_url else: formats.append({ 'format_id': f_id, 'quality': q(f_id), 'url': f_url, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': video.get('description'), 'thumbnail': thumbnail_url, 'uploader': video.get('author', {}).get('nickname'), 'duration': int_or_none(video.get('lengthBySec')) or parse_duration(video.get('length')), 'view_count': int_or_none(video.get('views')), 'formats': formats, 'average_rating': int_or_none(video.get('rating')), 'comment_count': int_or_none(video.get('numOfComments')), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/vesti.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError from .rutv import RUTVIE class VestiIE(InfoExtractor): IE_DESC = 'Вести.Ru' _VALID_URL = r'https?://(?:.+?\.)?vesti\.ru/(?P<id>.+)' _TESTS = [ { 'url': 'http://www.vesti.ru/videos?vid=575582&cid=1', 'info_dict': { 'id': '765035', 'ext': 'mp4', 'title': 'Вести.net: биткоины в России не являются законными', 'description': 'md5:d4bb3859dc1177b28a94c5014c35a36b', 'duration': 302, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.vesti.ru/doc.html?id=1349233', 'info_dict': { 'id': '773865', 'ext': 'mp4', 'title': 'Участники митинга штурмуют Донецкую областную администрацию', 'description': 'md5:1a160e98b3195379b4c849f2f4958009', 'duration': 210, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.vesti.ru/only_video.html?vid=576180', 'info_dict': { 'id': '766048', 'ext': 'mp4', 'title': 'США заморозило, Британию затопило', 'description': 'md5:f0ed0695ec05aed27c56a70a58dc4cc1', 'duration': 87, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://hitech.vesti.ru/news/view/id/4000', 'info_dict': { 'id': '766888', 'ext': 'mp4', 'title': 'Вести.net: интернет-гиганты начали перетягивание программных "одеял"', 'description': 'md5:65ddd47f9830c4f42ed6475f8730c995', 'duration': 279, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://sochi2014.vesti.ru/video/index/video_id/766403', 'info_dict': { 'id': '766403', 'ext': 'mp4', 'title': 'XXII зимние Олимпийские игры. Российские хоккеисты стартовали на Олимпиаде с победы', 'description': 'md5:55805dfd35763a890ff50fa9e35e31b3', 'duration': 271, }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': 'Blocked outside Russia', }, { 'url': 'http://sochi2014.vesti.ru/live/play/live_id/301', 'info_dict': { 'id': '51499', 'ext': 'flv', 'title': 'Сочи-2014. Биатлон. Индивидуальная гонка. Мужчины ', 'description': 'md5:9e0ed5c9d2fa1efbfdfed90c9a6d179c', }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Translation has finished' }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') page = self._download_webpage(url, video_id, 'Downloading page') mobj = re.search( r'<meta[^>]+?property="og:video"[^>]+?content="http://www\.vesti\.ru/i/flvplayer_videoHost\.swf\?vid=(?P<id>\d+)', page) if mobj: video_id = mobj.group('id') page = self._download_webpage('http://www.vesti.ru/only_video.html?vid=%s' % video_id, video_id, 'Downloading video page') rutv_url = RUTVIE._extract_url(page) if rutv_url: return self.url_result(rutv_url, 'RUTV') raise ExtractorError('No video found', expected=True)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/vevo.py
from __future__ import unicode_literals import re import json from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, compat_HTTPError, ) from ..utils import ( ExtractorError, int_or_none, parse_iso8601, ) class VevoBaseIE(InfoExtractor): def _extract_json(self, webpage, video_id): return self._parse_json( self._search_regex( r'window\.__INITIAL_STORE__\s*=\s*({.+?});\s*</script>', webpage, 'initial store'), video_id) class VevoIE(VevoBaseIE): ''' Accepts urls from vevo.com or in the format 'vevo:{id}' (currently used by MTVIE and MySpaceIE) ''' _VALID_URL = r'''(?x) (?:https?://(?:www\.)?vevo\.com/watch/(?!playlist|genre)(?:[^/]+/(?:[^/]+/)?)?| https?://cache\.vevo\.com/m/html/embed\.html\?video=| https?://videoplayer\.vevo\.com/embed/embedded\?videoId=| https?://embed\.vevo\.com/.*?[?&]isrc=| vevo:) (?P<id>[^&?#]+)''' _TESTS = [{ 'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280', 'md5': '95ee28ee45e70130e3ab02b0f579ae23', 'info_dict': { 'id': 'GB1101300280', 'ext': 'mp4', 'title': 'Hurts - Somebody to Die For', 'timestamp': 1372057200, 'upload_date': '20130624', 'uploader': 'Hurts', 'track': 'Somebody to Die For', 'artist': 'Hurts', 'genre': 'Pop', }, 'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'], }, { 'note': 'v3 SMIL format', 'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923', 'md5': 'f6ab09b034f8c22969020b042e5ac7fc', 'info_dict': { 'id': 'USUV71302923', 'ext': 'mp4', 'title': 'Cassadee Pope - I Wish I Could Break Your Heart', 'timestamp': 1392796919, 'upload_date': '20140219', 'uploader': 'Cassadee Pope', 'track': 'I Wish I Could Break Your Heart', 'artist': 'Cassadee Pope', 'genre': 'Country', }, 'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'], }, { 'note': 'Age-limited video', 'url': 'https://www.vevo.com/watch/justin-timberlake/tunnel-vision-explicit/USRV81300282', 'info_dict': { 'id': 'USRV81300282', 'ext': 'mp4', 'title': 'Justin Timberlake - Tunnel Vision (Explicit)', 'age_limit': 18, 'timestamp': 1372888800, 'upload_date': '20130703', 'uploader': 'Justin Timberlake', 'track': 'Tunnel Vision (Explicit)', 'artist': 'Justin Timberlake', 'genre': 'Pop', }, 'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'], }, { 'note': 'No video_info', 'url': 'http://www.vevo.com/watch/k-camp-1/Till-I-Die/USUV71503000', 'md5': '8b83cc492d72fc9cf74a02acee7dc1b0', 'info_dict': { 'id': 'USUV71503000', 'ext': 'mp4', 'title': 'K Camp ft. T.I. - Till I Die', 'age_limit': 18, 'timestamp': 1449468000, 'upload_date': '20151207', 'uploader': 'K Camp', 'track': 'Till I Die', 'artist': 'K Camp', 'genre': 'Hip-Hop', }, 'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'], }, { 'note': 'Featured test', 'url': 'https://www.vevo.com/watch/lemaitre/Wait/USUV71402190', 'md5': 'd28675e5e8805035d949dc5cf161071d', 'info_dict': { 'id': 'USUV71402190', 'ext': 'mp4', 'title': 'Lemaitre ft. LoLo - Wait', 'age_limit': 0, 'timestamp': 1413432000, 'upload_date': '20141016', 'uploader': 'Lemaitre', 'track': 'Wait', 'artist': 'Lemaitre', 'genre': 'Electronic', }, 'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'], }, { 'note': 'Only available via webpage', 'url': 'http://www.vevo.com/watch/GBUV71600656', 'md5': '67e79210613865b66a47c33baa5e37fe', 'info_dict': { 'id': 'GBUV71600656', 'ext': 'mp4', 'title': 'ABC - Viva Love', 'age_limit': 0, 'timestamp': 1461830400, 'upload_date': '20160428', 'uploader': 'ABC', 'track': 'Viva Love', 'artist': 'ABC', 'genre': 'Pop', }, 'expected_warnings': ['Failed to download video versions info'], }, { # no genres available 'url': 'http://www.vevo.com/watch/INS171400764', 'only_matching': True, }, { # Another case available only via the webpage; using streams/streamsV3 formats # Geo-restricted to Netherlands/Germany 'url': 'http://www.vevo.com/watch/boostee/pop-corn-clip-officiel/FR1A91600909', 'only_matching': True, }, { 'url': 'https://embed.vevo.com/?isrc=USH5V1923499&partnerId=4d61b777-8023-4191-9ede-497ed6c24647&partnerAdCode=', 'only_matching': True, }] _VERSIONS = { 0: 'youtube', # only in AuthenticateVideo videoVersions 1: 'level3', 2: 'akamai', 3: 'level3', 4: 'amazon', } def _initialize_api(self, video_id): webpage = self._download_webpage( 'https://accounts.vevo.com/token', None, note='Retrieving oauth token', errnote='Unable to retrieve oauth token', data=json.dumps({ 'client_id': 'SPupX1tvqFEopQ1YS6SS', 'grant_type': 'urn:vevo:params:oauth:grant-type:anonymous', }).encode('utf-8'), headers={ 'Content-Type': 'application/json', }) if re.search(r'(?i)THIS PAGE IS CURRENTLY UNAVAILABLE IN YOUR REGION', webpage): self.raise_geo_restricted( '%s said: This page is currently unavailable in your region' % self.IE_NAME) auth_info = self._parse_json(webpage, video_id) self._api_url_template = self.http_scheme() + '//apiv2.vevo.com/%s?token=' + auth_info['legacy_token'] def _call_api(self, path, *args, **kwargs): try: data = self._download_json(self._api_url_template % path, *args, **kwargs) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError): errors = self._parse_json(e.cause.read().decode(), None)['errors'] error_message = ', '.join([error['message'] for error in errors]) raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True) raise return data def _real_extract(self, url): video_id = self._match_id(url) self._initialize_api(video_id) video_info = self._call_api( 'video/%s' % video_id, video_id, 'Downloading api video info', 'Failed to download video info') video_versions = self._call_api( 'video/%s/streams' % video_id, video_id, 'Downloading video versions info', 'Failed to download video versions info', fatal=False) # Some videos are only available via webpage (e.g. # https://github.com/ytdl-org/youtube-dl/issues/9366) if not video_versions: webpage = self._download_webpage(url, video_id) json_data = self._extract_json(webpage, video_id) if 'streams' in json_data.get('default', {}): video_versions = json_data['default']['streams'][video_id][0] else: video_versions = [ value for key, value in json_data['apollo']['data'].items() if key.startswith('%s.streams' % video_id)] uploader = None artist = None featured_artist = None artists = video_info.get('artists') for curr_artist in artists: if curr_artist.get('role') == 'Featured': featured_artist = curr_artist['name'] else: artist = uploader = curr_artist['name'] formats = [] for video_version in video_versions: version = self._VERSIONS.get(video_version.get('version'), 'generic') version_url = video_version.get('url') if not version_url: continue if '.ism' in version_url: continue elif '.mpd' in version_url: formats.extend(self._extract_mpd_formats( version_url, video_id, mpd_id='dash-%s' % version, note='Downloading %s MPD information' % version, errnote='Failed to download %s MPD information' % version, fatal=False)) elif '.m3u8' in version_url: formats.extend(self._extract_m3u8_formats( version_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls-%s' % version, note='Downloading %s m3u8 information' % version, errnote='Failed to download %s m3u8 information' % version, fatal=False)) else: m = re.search(r'''(?xi) _(?P<width>[0-9]+)x(?P<height>[0-9]+) _(?P<vcodec>[a-z0-9]+) _(?P<vbr>[0-9]+) _(?P<acodec>[a-z0-9]+) _(?P<abr>[0-9]+) \.(?P<ext>[a-z0-9]+)''', version_url) if not m: continue formats.append({ 'url': version_url, 'format_id': 'http-%s-%s' % (version, video_version['quality']), 'vcodec': m.group('vcodec'), 'acodec': m.group('acodec'), 'vbr': int(m.group('vbr')), 'abr': int(m.group('abr')), 'ext': m.group('ext'), 'width': int(m.group('width')), 'height': int(m.group('height')), }) self._sort_formats(formats) track = video_info['title'] if featured_artist: artist = '%s ft. %s' % (artist, featured_artist) title = '%s - %s' % (artist, track) if artist else track genres = video_info.get('genres') genre = ( genres[0] if genres and isinstance(genres, list) and isinstance(genres[0], compat_str) else None) is_explicit = video_info.get('isExplicit') if is_explicit is True: age_limit = 18 elif is_explicit is False: age_limit = 0 else: age_limit = None return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': video_info.get('imageUrl') or video_info.get('thumbnailUrl'), 'timestamp': parse_iso8601(video_info.get('releaseDate')), 'uploader': uploader, 'duration': int_or_none(video_info.get('duration')), 'view_count': int_or_none(video_info.get('views', {}).get('total')), 'age_limit': age_limit, 'track': track, 'artist': uploader, 'genre': genre, } class VevoPlaylistIE(VevoBaseIE): _VALID_URL = r'https?://(?:www\.)?vevo\.com/watch/(?P<kind>playlist|genre)/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.vevo.com/watch/playlist/dadbf4e7-b99f-4184-9670-6f0e547b6a29', 'info_dict': { 'id': 'dadbf4e7-b99f-4184-9670-6f0e547b6a29', 'title': 'Best-Of: Birdman', }, 'playlist_count': 10, }, { 'url': 'http://www.vevo.com/watch/genre/rock', 'info_dict': { 'id': 'rock', 'title': 'Rock', }, 'playlist_count': 20, }, { 'url': 'http://www.vevo.com/watch/playlist/dadbf4e7-b99f-4184-9670-6f0e547b6a29?index=0', 'md5': '32dcdfddddf9ec6917fc88ca26d36282', 'info_dict': { 'id': 'USCMV1100073', 'ext': 'mp4', 'title': 'Birdman - Y.U. MAD', 'timestamp': 1323417600, 'upload_date': '20111209', 'uploader': 'Birdman', 'track': 'Y.U. MAD', 'artist': 'Birdman', 'genre': 'Rap/Hip-Hop', }, 'expected_warnings': ['Unable to download SMIL file'], }, { 'url': 'http://www.vevo.com/watch/genre/rock?index=0', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) playlist_id = mobj.group('id') playlist_kind = mobj.group('kind') webpage = self._download_webpage(url, playlist_id) qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) index = qs.get('index', [None])[0] if index: video_id = self._search_regex( r'<meta[^>]+content=(["\'])vevo://video/(?P<id>.+?)\1[^>]*>', webpage, 'video id', default=None, group='id') if video_id: return self.url_result('vevo:%s' % video_id, VevoIE.ie_key()) playlists = self._extract_json(webpage, playlist_id)['default']['%ss' % playlist_kind] playlist = (list(playlists.values())[0] if playlist_kind == 'playlist' else playlists[playlist_id]) entries = [ self.url_result('vevo:%s' % src, VevoIE.ie_key()) for src in playlist['isrcs']] return self.playlist_result( entries, playlist.get('playlistId') or playlist_id, playlist.get('name'), playlist.get('description'))
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/vgtv.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from .xstream import XstreamIE from ..utils import ( ExtractorError, float_or_none, try_get, ) class VGTVIE(XstreamIE): IE_DESC = 'VGTV, BTTV, FTV, Aftenposten and Aftonbladet' _GEO_BYPASS = False _HOST_TO_APPNAME = { 'vgtv.no': 'vgtv', 'bt.no/tv': 'bttv', 'aftenbladet.no/tv': 'satv', 'fvn.no/fvntv': 'fvntv', 'aftenposten.no/webtv': 'aptv', 'ap.vgtv.no/webtv': 'aptv', 'tv.aftonbladet.se/abtv': 'abtv', 'www.aftonbladet.se/tv': 'abtv', } _APP_NAME_TO_VENDOR = { 'vgtv': 'vgtv', 'bttv': 'bt', 'satv': 'sa', 'fvntv': 'fvn', 'aptv': 'ap', 'abtv': 'ab', } _VALID_URL = r'''(?x) (?:https?://(?:www\.)? (?P<host> %s ) /? (?: (?:\#!/)?(?:video|live)/| embed?.*id=| a(?:rticles)?/ )| (?P<appname> %s ):) (?P<id>\d+) ''' % ('|'.join(_HOST_TO_APPNAME.keys()), '|'.join(_APP_NAME_TO_VENDOR.keys())) _TESTS = [ { # streamType: vod 'url': 'http://www.vgtv.no/#!/video/84196/hevnen-er-soet-episode-10-abu', 'md5': 'b8be7a234cebb840c0d512c78013e02f', 'info_dict': { 'id': '84196', 'ext': 'mp4', 'title': 'Hevnen er søt: Episode 10 - Abu', 'description': 'md5:e25e4badb5f544b04341e14abdc72234', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 648.000, 'timestamp': 1404626400, 'upload_date': '20140706', 'view_count': int, }, }, { # streamType: wasLive 'url': 'http://www.vgtv.no/#!/live/100764/opptak-vgtv-foelger-em-kvalifiseringen', 'info_dict': { 'id': '100764', 'ext': 'flv', 'title': 'OPPTAK: VGTV følger EM-kvalifiseringen', 'description': 'md5:3772d9c0dc2dff92a886b60039a7d4d3', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 9103.0, 'timestamp': 1410113864, 'upload_date': '20140907', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': 'Video is no longer available', }, { # streamType: wasLive 'url': 'http://www.vgtv.no/#!/live/113063/direkte-v75-fra-solvalla', 'info_dict': { 'id': '113063', 'ext': 'mp4', 'title': 'V75 fra Solvalla 30.05.15', 'description': 'md5:b3743425765355855f88e096acc93231', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 25966, 'timestamp': 1432975582, 'upload_date': '20150530', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.aftenposten.no/webtv/#!/video/21039/trailer-sweatshop-i-can-t-take-any-more', 'md5': 'fd828cd29774a729bf4d4425fe192972', 'info_dict': { 'id': '21039', 'ext': 'mp4', 'title': 'TRAILER: «SWEATSHOP» - I can´t take any more', 'description': 'md5:21891f2b0dd7ec2f78d84a50e54f8238', 'duration': 66, 'timestamp': 1417002452, 'upload_date': '20141126', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.bt.no/tv/#!/video/100250/norling-dette-er-forskjellen-paa-1-divisjon-og-eliteserien', 'only_matching': True, }, { 'url': 'http://ap.vgtv.no/webtv#!/video/111084/de-nye-bysyklene-lettere-bedre-gir-stoerre-hjul-og-feste-til-mobil', 'only_matching': True, }, { # geoblocked 'url': 'http://www.vgtv.no/#!/video/127205/inside-the-mind-of-favela-funk', 'only_matching': True, }, { 'url': 'http://tv.aftonbladet.se/abtv/articles/36015', 'only_matching': True, }, { 'url': 'https://www.aftonbladet.se/tv/a/36015', 'only_matching': True, }, { 'url': 'abtv:140026', 'only_matching': True, }, { 'url': 'http://www.vgtv.no/video/84196/hevnen-er-soet-episode-10-abu', 'only_matching': True, }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') host = mobj.group('host') appname = self._HOST_TO_APPNAME[host] if host else mobj.group('appname') vendor = self._APP_NAME_TO_VENDOR[appname] data = self._download_json( 'http://svp.vg.no/svp/api/v1/%s/assets/%s?appName=%s-website' % (vendor, video_id, appname), video_id, 'Downloading media JSON') if data.get('status') == 'inactive': raise ExtractorError( 'Video %s is no longer available' % video_id, expected=True) info = { 'formats': [], } if len(video_id) == 5: if appname == 'bttv': info = self._extract_video_info('btno', video_id) streams = data['streamUrls'] stream_type = data.get('streamType') is_live = stream_type == 'live' formats = [] hls_url = streams.get('hls') if hls_url: formats.extend(self._extract_m3u8_formats( hls_url, video_id, 'mp4', entry_protocol='m3u8' if is_live else 'm3u8_native', m3u8_id='hls', fatal=False)) hds_url = streams.get('hds') if hds_url: hdcore_sign = 'hdcore=3.7.0' f4m_formats = self._extract_f4m_formats( hds_url + '?%s' % hdcore_sign, video_id, f4m_id='hds', fatal=False) if f4m_formats: for entry in f4m_formats: # URLs without the extra param induce an 404 error entry.update({'extra_param_to_segment_url': hdcore_sign}) formats.append(entry) mp4_urls = streams.get('pseudostreaming') or [] mp4_url = streams.get('mp4') if mp4_url: mp4_urls.append(mp4_url) for mp4_url in mp4_urls: format_info = { 'url': mp4_url, } mobj = re.search(r'(\d+)_(\d+)_(\d+)', mp4_url) if mobj: tbr = int(mobj.group(3)) format_info.update({ 'width': int(mobj.group(1)), 'height': int(mobj.group(2)), 'tbr': tbr, 'format_id': 'mp4-%s' % tbr, }) formats.append(format_info) info['formats'].extend(formats) if not info['formats']: properties = try_get( data, lambda x: x['streamConfiguration']['properties'], list) if properties and 'geoblocked' in properties: raise self.raise_geo_restricted( countries=[host.rpartition('.')[-1].partition('/')[0].upper()]) self._sort_formats(info['formats']) info.update({ 'id': video_id, 'title': self._live_title(data['title']) if is_live else data['title'], 'description': data['description'], 'thumbnail': data['images']['main'] + '?t[]=900x506q80', 'timestamp': data['published'], 'duration': float_or_none(data['duration'], 1000), 'view_count': data['displays'], 'is_live': is_live, }) return info class BTArticleIE(InfoExtractor): IE_NAME = 'bt:article' IE_DESC = 'Bergens Tidende Articles' _VALID_URL = r'https?://(?:www\.)?bt\.no/(?:[^/]+/)+(?P<id>[^/]+)-\d+\.html' _TEST = { 'url': 'http://www.bt.no/nyheter/lokalt/Kjemper-for-internatet-1788214.html', 'md5': '2acbe8ad129b3469d5ae51b1158878df', 'info_dict': { 'id': '23199', 'ext': 'mp4', 'title': 'Alrekstad internat', 'description': 'md5:dc81a9056c874fedb62fc48a300dac58', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 191, 'timestamp': 1289991323, 'upload_date': '20101117', 'view_count': int, }, } def _real_extract(self, url): webpage = self._download_webpage(url, self._match_id(url)) video_id = self._search_regex( r'<video[^>]+data-id="(\d+)"', webpage, 'video id') return self.url_result('bttv:%s' % video_id, 'VGTV') class BTVestlendingenIE(InfoExtractor): IE_NAME = 'bt:vestlendingen' IE_DESC = 'Bergens Tidende - Vestlendingen' _VALID_URL = r'https?://(?:www\.)?bt\.no/spesial/vestlendingen/#!/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.bt.no/spesial/vestlendingen/#!/86588', 'md5': 'd7d17e3337dc80de6d3a540aefbe441b', 'info_dict': { 'id': '86588', 'ext': 'mov', 'title': 'Otto Wollertsen', 'description': 'Vestlendingen Otto Fredrik Wollertsen', 'timestamp': 1430473209, 'upload_date': '20150501', }, 'skip': '404 Error', }, { 'url': 'http://www.bt.no/spesial/vestlendingen/#!/86255', 'md5': 'a2893f8632e96389f4bdf36aa9463ceb', 'info_dict': { 'id': '86255', 'ext': 'mov', 'title': 'Du må tåle å fryse og være sulten', 'description': 'md5:b8046f4d022d5830ddab04865791d063', 'upload_date': '20150321', 'timestamp': 1426942023, }, }] def _real_extract(self, url): return self.url_result('bttv:%s' % self._match_id(url), 'VGTV')
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/vh1.py
# coding: utf-8 from __future__ import unicode_literals from .mtv import MTVServicesInfoExtractor class VH1IE(MTVServicesInfoExtractor): IE_NAME = 'vh1.com' _FEED_URL = 'http://www.vh1.com/feeds/mrss/' _TESTS = [{ 'url': 'http://www.vh1.com/episodes/0umwpq/hip-hop-squares-kent-jones-vs-nick-young-season-1-ep-120', 'info_dict': { 'title': 'Kent Jones vs. Nick Young', 'description': 'Come to Play. Stay to Party. With Mike Epps, TIP, O’Shea Jackson Jr., T-Pain, Tisha Campbell-Martin and more.', }, 'playlist_mincount': 4, }, { # Clip 'url': 'http://www.vh1.com/video-clips/t74mif/scared-famous-scared-famous-extended-preview', 'info_dict': { 'id': '0a50c2d2-a86b-4141-9565-911c7e2d0b92', 'ext': 'mp4', 'title': 'Scared Famous|October 9, 2017|1|NO-EPISODE#|Scared Famous + Extended Preview', 'description': 'md5:eff5551a274c473a29463de40f7b09da', 'upload_date': '20171009', 'timestamp': 1507574700, }, 'params': { # m3u8 download 'skip_download': True, }, }] _VALID_URL = r'https?://(?:www\.)?vh1\.com/(?:video-clips|episodes)/(?P<id>[^/?#.]+)' def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) mgid = self._extract_triforce_mgid(webpage) videos_info = self._get_videos_info(mgid) return videos_info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/vice.py
# coding: utf-8 from __future__ import unicode_literals import re import time import hashlib import json import random from .adobepass import AdobePassIE from .youtube import YoutubeIE from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_str, ) from ..utils import ( ExtractorError, int_or_none, parse_age_limit, str_or_none, try_get, ) class ViceIE(AdobePassIE): IE_NAME = 'vice' _VALID_URL = r'https?://(?:(?:video|vms)\.vice|(?:www\.)?viceland)\.com/(?P<locale>[^/]+)/(?:video/[^/]+|embed)/(?P<id>[\da-f]+)' _TESTS = [{ 'url': 'https://video.vice.com/en_us/video/pet-cremator/58c69e38a55424f1227dc3f7', 'info_dict': { 'id': '5e647f0125e145c9aef2069412c0cbde', 'ext': 'mp4', 'title': '10 Questions You Always Wanted To Ask: Pet Cremator', 'description': 'md5:fe856caacf61fe0e74fab15ce2b07ca5', 'uploader': 'vice', 'uploader_id': '57a204088cb727dec794c67b', 'timestamp': 1489664942, 'upload_date': '20170316', 'age_limit': 14, }, 'params': { # m3u8 download 'skip_download': True, }, 'add_ie': ['UplynkPreplay'], }, { # geo restricted to US 'url': 'https://video.vice.com/en_us/video/the-signal-from-tolva/5816510690b70e6c5fd39a56', 'info_dict': { 'id': '930c0ad1f47141cc955087eecaddb0e2', 'ext': 'mp4', 'uploader': 'waypoint', 'title': 'The Signal From Tölva', 'description': 'md5:3927e3c79f9e8094606a2b3c5b5e55d5', 'uploader_id': '57f7d621e05ca860fa9ccaf9', 'timestamp': 1477941983, 'upload_date': '20161031', }, 'params': { # m3u8 download 'skip_download': True, }, 'add_ie': ['UplynkPreplay'], }, { 'url': 'https://video.vice.com/alps/video/ulfs-wien-beruchtigste-grafitti-crew-part-1/581b12b60a0e1f4c0fb6ea2f', 'info_dict': { 'id': '581b12b60a0e1f4c0fb6ea2f', 'ext': 'mp4', 'title': 'ULFs - Wien berüchtigste Grafitti Crew - Part 1', 'description': '<p>Zwischen Hinterzimmer-Tattoos und U-Bahnschächten erzählen uns die Ulfs, wie es ist, "süchtig nach Sachbeschädigung" zu sein.</p>', 'uploader': 'VICE', 'uploader_id': '57a204088cb727dec794c67b', 'timestamp': 1485368119, 'upload_date': '20170125', 'age_limit': 14, }, 'params': { # AES-encrypted m3u8 'skip_download': True, 'proxy': '127.0.0.1:8118', }, 'add_ie': ['UplynkPreplay'], }, { 'url': 'https://video.vice.com/en_us/video/pizza-show-trailer/56d8c9a54d286ed92f7f30e4', 'only_matching': True, }, { 'url': 'https://video.vice.com/en_us/embed/57f41d3556a0a80f54726060', 'only_matching': True, }, { 'url': 'https://vms.vice.com/en_us/video/preplay/58c69e38a55424f1227dc3f7', 'only_matching': True, }, { 'url': 'https://www.viceland.com/en_us/video/thursday-march-1-2018/5a8f2d7ff1cdb332dd446ec1', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe\b[^>]+\bsrc=["\']((?:https?:)?//video\.vice\.com/[^/]+/embed/[\da-f]+)', webpage) @staticmethod def _extract_url(webpage): urls = ViceIE._extract_urls(webpage) return urls[0] if urls else None def _real_extract(self, url): locale, video_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage( 'https://video.vice.com/%s/embed/%s' % (locale, video_id), video_id) video = self._parse_json( self._search_regex( r'PREFETCH_DATA\s*=\s*({.+?})\s*;\s*\n', webpage, 'app state'), video_id)['video'] video_id = video.get('vms_id') or video.get('id') or video_id title = video['title'] is_locked = video.get('locked') rating = video.get('rating') thumbnail = video.get('thumbnail_url') duration = int_or_none(video.get('duration')) series = try_get( video, lambda x: x['episode']['season']['show']['title'], compat_str) episode_number = try_get( video, lambda x: x['episode']['episode_number']) season_number = try_get( video, lambda x: x['episode']['season']['season_number']) uploader = None query = {} if is_locked: resource = self._get_mvpd_resource( 'VICELAND', title, video_id, rating) query['tvetoken'] = self._extract_mvpd_auth( url, video_id, 'VICELAND', resource) # signature generation algorithm is reverse engineered from signatureGenerator in # webpack:///../shared/~/vice-player/dist/js/vice-player.js in # https://www.viceland.com/assets/common/js/web.vendor.bundle.js # new JS is located here https://vice-web-statics-cdn.vice.com/vice-player/player-embed.js exp = int(time.time()) + 1440 query.update({ 'exp': exp, 'sign': hashlib.sha512(('%s:GET:%d' % (video_id, exp)).encode()).hexdigest(), '_ad_blocked': None, '_ad_unit': '', '_debug': '', 'platform': 'desktop', 'rn': random.randint(10000, 100000), 'fbprebidtoken': '', }) try: preplay = self._download_json( 'https://vms.vice.com/%s/video/preplay/%s' % (locale, video_id), video_id, query=query) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 401): error = json.loads(e.cause.read().decode()) error_message = error.get('error_description') or error['details'] raise ExtractorError('%s said: %s' % ( self.IE_NAME, error_message), expected=True) raise video_data = preplay['video'] base = video_data['base'] uplynk_preplay_url = preplay['preplayURL'] episode = video_data.get('episode', {}) channel = video_data.get('channel', {}) subtitles = {} cc_url = preplay.get('ccURL') if cc_url: subtitles['en'] = [{ 'url': cc_url, }] return { '_type': 'url_transparent', 'url': uplynk_preplay_url, 'id': video_id, 'title': title, 'description': base.get('body') or base.get('display_body'), 'thumbnail': thumbnail, 'duration': int_or_none(video_data.get('video_duration')) or duration, 'timestamp': int_or_none(video_data.get('created_at'), 1000), 'age_limit': parse_age_limit(video_data.get('video_rating')), 'series': video_data.get('show_title') or series, 'episode_number': int_or_none(episode.get('episode_number') or episode_number), 'episode_id': str_or_none(episode.get('id') or video_data.get('episode_id')), 'season_number': int_or_none(season_number), 'season_id': str_or_none(episode.get('season_id')), 'uploader': channel.get('base', {}).get('title') or channel.get('name') or uploader, 'uploader_id': str_or_none(channel.get('id')), 'subtitles': subtitles, 'ie_key': 'UplynkPreplay', } class ViceShowIE(InfoExtractor): IE_NAME = 'vice:show' _VALID_URL = r'https?://(?:.+?\.)?vice\.com/(?:[^/]+/)?show/(?P<id>[^/?#&]+)' _TEST = { 'url': 'https://munchies.vice.com/en/show/fuck-thats-delicious-2', 'info_dict': { 'id': 'fuck-thats-delicious-2', 'title': "Fuck, That's Delicious", 'description': 'Follow the culinary adventures of rapper Action Bronson during his ongoing world tour.', }, 'playlist_count': 17, } def _real_extract(self, url): show_id = self._match_id(url) webpage = self._download_webpage(url, show_id) entries = [ self.url_result(video_url, ViceIE.ie_key()) for video_url, _ in re.findall( r'<h2[^>]+class="article-title"[^>]+data-id="\d+"[^>]*>\s*<a[^>]+href="(%s.*?)"' % ViceIE._VALID_URL, webpage)] title = self._search_regex( r'<title>(.+?)</title>', webpage, 'title', default=None) if title: title = re.sub(r'(.+)\s*\|\s*.+$', r'\1', title).strip() description = self._html_search_meta( 'description', webpage, 'description') return self.playlist_result(entries, show_id, title, description) class ViceArticleIE(InfoExtractor): IE_NAME = 'vice:article' _VALID_URL = r'https://www\.vice\.com/[^/]+/article/(?P<id>[^?#]+)' _TESTS = [{ 'url': 'https://www.vice.com/en_us/article/on-set-with-the-woman-making-mormon-porn-in-utah', 'info_dict': { 'id': '41eae2a47b174a1398357cec55f1f6fc', 'ext': 'mp4', 'title': 'Mormon War on Porn ', 'description': 'md5:6394a8398506581d0346b9ab89093fef', 'uploader': 'vice', 'uploader_id': '57a204088cb727dec794c67b', 'timestamp': 1491883129, 'upload_date': '20170411', 'age_limit': 17, }, 'params': { # AES-encrypted m3u8 'skip_download': True, }, 'add_ie': ['UplynkPreplay'], }, { 'url': 'https://www.vice.com/en_us/article/how-to-hack-a-car', 'md5': '7fe8ebc4fa3323efafc127b82bd821d9', 'info_dict': { 'id': '3jstaBeXgAs', 'ext': 'mp4', 'title': 'How to Hack a Car: Phreaked Out (Episode 2)', 'description': 'md5:ee95453f7ff495db8efe14ae8bf56f30', 'uploader': 'Motherboard', 'uploader_id': 'MotherboardTV', 'upload_date': '20140529', }, 'add_ie': ['Youtube'], }, { 'url': 'https://www.vice.com/en_us/article/znm9dx/karley-sciortino-slutever-reloaded', 'md5': 'a7ecf64ee4fa19b916c16f4b56184ae2', 'info_dict': { 'id': 'e2ed435eb67e43efb66e6ef9a6930a88', 'ext': 'mp4', 'title': "Making The World's First Male Sex Doll", 'description': 'md5:916078ef0e032d76343116208b6cc2c4', 'uploader': 'vice', 'uploader_id': '57a204088cb727dec794c67b', 'timestamp': 1476919911, 'upload_date': '20161019', 'age_limit': 17, }, 'params': { 'skip_download': True, }, 'add_ie': [ViceIE.ie_key()], }, { 'url': 'https://www.vice.com/en_us/article/cowboy-capitalists-part-1', 'only_matching': True, }, { 'url': 'https://www.vice.com/ru/article/big-night-out-ibiza-clive-martin-229', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) prefetch_data = self._parse_json(self._search_regex( r'__APP_STATE\s*=\s*({.+?})(?:\s*\|\|\s*{}\s*)?;\s*\n', webpage, 'app state'), display_id)['pageData'] body = prefetch_data['body'] def _url_res(video_url, ie_key): return { '_type': 'url_transparent', 'url': video_url, 'display_id': display_id, 'ie_key': ie_key, } vice_url = ViceIE._extract_url(webpage) if vice_url: return _url_res(vice_url, ViceIE.ie_key()) embed_code = self._search_regex( r'embedCode=([^&\'"]+)', body, 'ooyala embed code', default=None) if embed_code: return _url_res('ooyala:%s' % embed_code, 'Ooyala') youtube_url = YoutubeIE._extract_url(body) if youtube_url: return _url_res(youtube_url, YoutubeIE.ie_key()) video_url = self._html_search_regex( r'data-video-url="([^"]+)"', prefetch_data['embed_code'], 'video URL') return _url_res(video_url, ViceIE.ie_key())
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/vidbit.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( int_or_none, js_to_json, remove_end, unified_strdate, ) class VidbitIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?vidbit\.co/(?:watch|embed)\?.*?\bv=(?P<id>[\da-zA-Z]+)' _TESTS = [{ 'url': 'http://www.vidbit.co/watch?v=jkL2yDOEq2', 'md5': '1a34b7f14defe3b8fafca9796892924d', 'info_dict': { 'id': 'jkL2yDOEq2', 'ext': 'mp4', 'title': 'Intro to VidBit', 'description': 'md5:5e0d6142eec00b766cbf114bfd3d16b7', 'thumbnail': r're:https?://.*\.jpg$', 'upload_date': '20160618', 'view_count': int, 'comment_count': int, } }, { 'url': 'http://www.vidbit.co/embed?v=jkL2yDOEq2&auto=0&water=0', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( compat_urlparse.urljoin(url, '/watch?v=%s' % video_id), video_id) video_url, title = [None] * 2 config = self._parse_json(self._search_regex( r'(?s)\.setup\(({.+?})\);', webpage, 'setup', default='{}'), video_id, transform_source=js_to_json) if config: if config.get('file'): video_url = compat_urlparse.urljoin(url, config['file']) title = config.get('title') if not video_url: video_url = compat_urlparse.urljoin(url, self._search_regex( r'file\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'video URL', group='url')) if not title: title = remove_end( self._html_search_regex( (r'<h1>(.+?)</h1>', r'<title>(.+?)</title>'), webpage, 'title', default=None) or self._og_search_title(webpage), ' - VidBit') description = self._html_search_meta( ('description', 'og:description', 'twitter:description'), webpage, 'description') upload_date = unified_strdate(self._html_search_meta( 'datePublished', webpage, 'upload date')) view_count = int_or_none(self._search_regex( r'<strong>(\d+)</strong> views', webpage, 'view count', fatal=False)) comment_count = int_or_none(self._search_regex( r'id=["\']cmt_num["\'][^>]*>\((\d+)\)', webpage, 'comment count', fatal=False)) return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': self._og_search_thumbnail(webpage), 'upload_date': upload_date, 'view_count': view_count, 'comment_count': comment_count, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/viddler.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, ) class ViddlerIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?viddler\.com/(?:v|embed|player)/(?P<id>[a-z0-9]+)(?:.+?\bsecret=(\d+))?' _TESTS = [{ 'url': 'http://www.viddler.com/v/43903784', 'md5': '9eee21161d2c7f5b39690c3e325fab2f', 'info_dict': { 'id': '43903784', 'ext': 'mov', 'title': 'Video Made Easy', 'description': 'md5:6a697ebd844ff3093bd2e82c37b409cd', 'uploader': 'viddler', 'timestamp': 1335371429, 'upload_date': '20120425', 'duration': 100.89, 'thumbnail': r're:^https?://.*\.jpg$', 'view_count': int, 'comment_count': int, 'categories': ['video content', 'high quality video', 'video made easy', 'how to produce video with limited resources', 'viddler'], } }, { 'url': 'http://www.viddler.com/v/4d03aad9/', 'md5': 'f12c5a7fa839c47a79363bfdf69404fb', 'info_dict': { 'id': '4d03aad9', 'ext': 'ts', 'title': 'WALL-TO-GORTAT', 'upload_date': '20150126', 'uploader': 'deadspin', 'timestamp': 1422285291, 'view_count': int, 'comment_count': int, } }, { 'url': 'http://www.viddler.com/player/221ebbbd/0/', 'md5': '740511f61d3d1bb71dc14a0fe01a1c10', 'info_dict': { 'id': '221ebbbd', 'ext': 'mov', 'title': 'LETeens-Grammar-snack-third-conditional', 'description': ' ', 'upload_date': '20140929', 'uploader': 'BCLETeens', 'timestamp': 1411997190, 'view_count': int, 'comment_count': int, } }, { # secret protected 'url': 'http://www.viddler.com/v/890c0985?secret=34051570', 'info_dict': { 'id': '890c0985', 'ext': 'mp4', 'title': 'Complete Property Training - Traineeships', 'description': ' ', 'upload_date': '20130606', 'uploader': 'TiffanyBowtell', 'timestamp': 1370496993, 'view_count': int, 'comment_count': int, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id, secret = re.match(self._VALID_URL, url).groups() query = { 'video_id': video_id, 'key': 'v0vhrt7bg2xq1vyxhkct', } if secret: query['secret'] = secret data = self._download_json( 'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json', video_id, headers={'Referer': url}, query=query)['video'] formats = [] for filed in data['files']: if filed.get('status', 'ready') != 'ready': continue format_id = filed.get('profile_id') or filed['profile_name'] f = { 'format_id': format_id, 'format_note': filed['profile_name'], 'url': self._proto_relative_url(filed['url']), 'width': int_or_none(filed.get('width')), 'height': int_or_none(filed.get('height')), 'filesize': int_or_none(filed.get('size')), 'ext': filed.get('ext'), 'source_preference': -1, } formats.append(f) if filed.get('cdn_url'): f = f.copy() f['url'] = self._proto_relative_url(filed['cdn_url'], 'http:') f['format_id'] = format_id + '-cdn' f['source_preference'] = 1 formats.append(f) if filed.get('html5_video_source'): f = f.copy() f['url'] = self._proto_relative_url(filed['html5_video_source']) f['format_id'] = format_id + '-html5' f['source_preference'] = 0 formats.append(f) self._sort_formats(formats) categories = [ t.get('text') for t in data.get('tags', []) if 'text' in t] return { 'id': video_id, 'title': data['title'], 'formats': formats, 'description': data.get('description'), 'timestamp': int_or_none(data.get('upload_time')), 'thumbnail': self._proto_relative_url(data.get('thumbnail_url')), 'uploader': data.get('author'), 'duration': float_or_none(data.get('length')), 'view_count': int_or_none(data.get('view_count')), 'comment_count': int_or_none(data.get('comment_count')), 'categories': categories, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/videa.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, mimetype2ext, parse_codecs, xpath_element, xpath_text, ) class VideaIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// videa(?:kid)?\.hu/ (?: videok/(?:[^/]+/)*[^?#&]+-| player\?.*?\bv=| player/v/ ) (?P<id>[^?#&]+) ''' _TESTS = [{ 'url': 'http://videa.hu/videok/allatok/az-orult-kigyasz-285-kigyot-kigyo-8YfIAjxwWGwT8HVQ', 'md5': '97a7af41faeaffd9f1fc864a7c7e7603', 'info_dict': { 'id': '8YfIAjxwWGwT8HVQ', 'ext': 'mp4', 'title': 'Az őrült kígyász 285 kígyót enged szabadon', 'thumbnail': r're:^https?://.*', 'duration': 21, }, }, { 'url': 'http://videa.hu/videok/origo/jarmuvek/supercars-elozes-jAHDWfWSJH5XuFhH', 'only_matching': True, }, { 'url': 'http://videa.hu/player?v=8YfIAjxwWGwT8HVQ', 'only_matching': True, }, { 'url': 'http://videa.hu/player/v/8YfIAjxwWGwT8HVQ?autoplay=1', 'only_matching': True, }, { 'url': 'https://videakid.hu/videok/origo/jarmuvek/supercars-elozes-jAHDWfWSJH5XuFhH', 'only_matching': True, }, { 'url': 'https://videakid.hu/player?v=8YfIAjxwWGwT8HVQ', 'only_matching': True, }, { 'url': 'https://videakid.hu/player/v/8YfIAjxwWGwT8HVQ?autoplay=1', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return [url for _, url in re.findall( r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//videa\.hu/player\?.*?\bv=.+?)\1', webpage)] def _real_extract(self, url): video_id = self._match_id(url) info = self._download_xml( 'http://videa.hu/videaplayer_get_xml.php', video_id, query={'v': video_id}) video = xpath_element(info, './/video', 'video', fatal=True) sources = xpath_element(info, './/video_sources', 'sources', fatal=True) title = xpath_text(video, './title', fatal=True) formats = [] for source in sources.findall('./video_source'): source_url = source.text if not source_url: continue f = parse_codecs(source.get('codecs')) f.update({ 'url': source_url, 'ext': mimetype2ext(source.get('mimetype')) or 'mp4', 'format_id': source.get('name'), 'width': int_or_none(source.get('width')), 'height': int_or_none(source.get('height')), }) formats.append(f) self._sort_formats(formats) thumbnail = xpath_text(video, './poster_src') duration = int_or_none(xpath_text(video, './duration')) age_limit = None is_adult = xpath_text(video, './is_adult_content', default=None) if is_adult: age_limit = 18 if is_adult == '1' else 0 return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'age_limit': age_limit, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/videodetective.py
from __future__ import unicode_literals from .common import InfoExtractor from .internetvideoarchive import InternetVideoArchiveIE class VideoDetectiveIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?videodetective\.com/[^/]+/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'http://www.videodetective.com/movies/kick-ass-2/194487', 'info_dict': { 'id': '194487', 'ext': 'mp4', 'title': 'Kick-Ass 2', 'description': 'md5:c189d5b7280400630a1d3dd17eaa8d8a', }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) query = 'customerid=69249&publishedid=' + video_id return self.url_result( InternetVideoArchiveIE._build_json_url(query), ie=InternetVideoArchiveIE.ie_key())
[]
[]
[]