Fixed downloading of unrelated videos when downloading a YouTube playlist

This commit is contained in:
cryzed 2012-03-09 22:05:35 +01:00
parent 9c228928b6
commit 7151f63a5f

View file

@ -766,7 +766,7 @@ class FileDownloader(object):
raise MaxDownloadsReached() raise MaxDownloadsReached()
filename = self.prepare_filename(info_dict) filename = self.prepare_filename(info_dict)
# Forced printings # Forced printings
if self.params.get('forcetitle', False): if self.params.get('forcetitle', False):
print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace') print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
@ -842,7 +842,7 @@ class FileDownloader(object):
except (ContentTooShortError, ), err: except (ContentTooShortError, ), err:
self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return return
if success: if success:
try: try:
self.post_process(filename, info_dict) self.post_process(filename, info_dict)
@ -1183,7 +1183,7 @@ class YoutubeIE(InfoExtractor):
'43': '360x640', '43': '360x640',
'44': '480x854', '44': '480x854',
'45': '720x1280', '45': '720x1280',
} }
IE_NAME = u'youtube' IE_NAME = u'youtube'
def report_lang(self): def report_lang(self):
@ -2519,7 +2519,7 @@ class YoutubePlaylistIE(InfoExtractor):
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*' _VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
_VIDEO_INDICATOR = r'/watch\?v=(.+?)&' _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&list=PL%s&'
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>' _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
_youtube_ie = None _youtube_ie = None
IE_NAME = u'youtube:playlist' IE_NAME = u'youtube:playlist'
@ -2571,7 +2571,8 @@ class YoutubePlaylistIE(InfoExtractor):
# Extract video identifiers # Extract video identifiers
ids_in_page = [] ids_in_page = []
for mobj in re.finditer(self._VIDEO_INDICATOR, page): video_indicator = self._VIDEO_INDICATOR_TEMPLATE % playlist_id
for mobj in re.finditer(video_indicator, page):
if mobj.group(1) not in ids_in_page: if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1)) ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page) video_ids.extend(ids_in_page)
@ -2582,7 +2583,11 @@ class YoutubePlaylistIE(InfoExtractor):
playliststart = self._downloader.params.get('playliststart', 1) - 1 playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1) playlistend = self._downloader.params.get('playlistend', -1)
video_ids = video_ids[playliststart:playlistend]
if playlistend == -1:
video_ids = video_ids[playliststart:]
else:
video_ids = video_ids[playliststart:playlistend]
for id in video_ids: for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id) self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
@ -3028,14 +3033,14 @@ class BlipTVIE(InfoExtractor):
data = json_data['Post'] data = json_data['Post']
else: else:
data = json_data data = json_data
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
video_url = data['media']['url'] video_url = data['media']['url']
umobj = re.match(self._URL_EXT, video_url) umobj = re.match(self._URL_EXT, video_url)
if umobj is None: if umobj is None:
raise ValueError('Can not determine filename extension') raise ValueError('Can not determine filename extension')
ext = umobj.group(1) ext = umobj.group(1)
info = { info = {
'id': data['item_id'], 'id': data['item_id'],
'url': video_url, 'url': video_url,
@ -3069,7 +3074,7 @@ class MyVideoIE(InfoExtractor):
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id)
@ -3136,7 +3141,7 @@ class ComedyCentralIE(InfoExtractor):
def report_extraction(self, episode_id): def report_extraction(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id) self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
def report_config_download(self, episode_id): def report_config_download(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id) self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
@ -3563,7 +3568,7 @@ class SoundcloudIE(InfoExtractor):
mobj = re.search('track-description-value"><p>(.*?)</p>', webpage) mobj = re.search('track-description-value"><p>(.*?)</p>', webpage)
if mobj: if mobj:
description = mobj.group(1) description = mobj.group(1)
# upload date # upload date
upload_date = None upload_date = None
mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage) mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage)
@ -3698,7 +3703,7 @@ class MixcloudIE(InfoExtractor):
url_list = jsonData[fmt][bitrate] url_list = jsonData[fmt][bitrate]
except TypeError: # we have no bitrate info. except TypeError: # we have no bitrate info.
url_list = jsonData[fmt] url_list = jsonData[fmt]
return url_list return url_list
def check_urls(self, url_list): def check_urls(self, url_list):
@ -3818,7 +3823,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
info = { info = {
'id': _simplify_title(course + '_' + video), 'id': _simplify_title(course + '_' + video),
} }
self.report_extraction(info['id']) self.report_extraction(info['id'])
baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/' baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
xmlUrl = baseUrl + video + '.xml' xmlUrl = baseUrl + video + '.xml'
@ -3952,7 +3957,7 @@ class MTVIE(InfoExtractor):
self._downloader.trouble(u'ERROR: unable to extract performer') self._downloader.trouble(u'ERROR: unable to extract performer')
return return
performer = _unescapeHTML(mobj.group(1).decode('iso-8859-1')) performer = _unescapeHTML(mobj.group(1).decode('iso-8859-1'))
video_title = performer + ' - ' + song_name video_title = performer + ' - ' + song_name
mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage) mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
if mobj is None: if mobj is None:
@ -4194,7 +4199,7 @@ def updateSelf(downloader, filename):
try: try:
urlh = urllib.urlopen(UPDATE_URL) urlh = urllib.urlopen(UPDATE_URL)
newcontent = urlh.read() newcontent = urlh.read()
vmatch = re.search("__version__ = '([^']+)'", newcontent) vmatch = re.search("__version__ = '([^']+)'", newcontent)
if vmatch is not None and vmatch.group(1) == __version__: if vmatch is not None and vmatch.group(1) == __version__:
downloader.to_screen(u'youtube-dl is up-to-date (' + __version__ + ')') downloader.to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
@ -4615,7 +4620,7 @@ def _real_main():
parser.error(u'you must provide at least one URL') parser.error(u'you must provide at least one URL')
else: else:
sys.exit() sys.exit()
try: try:
retcode = fd.download(all_urls) retcode = fd.download(all_urls)
except MaxDownloadsReached: except MaxDownloadsReached: