diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 452caeade..8a5067dd9 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1153,6 +1153,7 @@ from .southpark import (
from .spankbang import (
SpankBangIE,
SpankBangPlaylistIE,
+ SpankBangListIE,
)
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE
diff --git a/youtube_dl/extractor/spankbang.py b/youtube_dl/extractor/spankbang.py
index b3bff8ba1..e76f9ead3 100644
--- a/youtube_dl/extractor/spankbang.py
+++ b/youtube_dl/extractor/spankbang.py
@@ -5,6 +5,7 @@ import itertools
import re
from .common import InfoExtractor
+from ..compat import compat_urlparse
from ..utils import (
determine_ext,
extract_attributes,
@@ -183,6 +184,7 @@ class SpankBangPlaylistIE(InfoExtractor):
'info_dict': {
'id': 'ug0k',
'title': 'Big Ass Titties',
+ 'description': 'md5:65b01bb13a9276cf172a67a41304bafd',
},
'playlist_mincount': 35,
}, {
@@ -191,43 +193,152 @@ class SpankBangPlaylistIE(InfoExtractor):
'info_dict': {
'id': '51wxk',
'title': 'Dance',
+ 'description': 'md5:7aae6991c65d561a9319ecab31f857e2',
},
'playlist_mincount': 60,
}]
+ def _entries(self, url, playlist_id, webpage=None):
+ for ii in itertools.count(1):
+ if not webpage:
+ webpage = self._download_webpage(
+ url, playlist_id,
+ note='Downloading playlist page %d' % (ii, ),
+ fatal=False)
+ if not webpage:
+ break
+ # search ....innerHTML
+ for mobj in re.finditer(
+ r''']*?\bclass\s*=\s*('|")(?:(?:(?!\1).)+?\s)?\s*thumb\b[^>]*>''',
+ get_element_by_id('container', webpage) or webpage):
+ item_url = extract_attributes(mobj.group(0)).get('href')
+ if item_url:
+ yield urljoin(url, item_url)
+ next_url = self._search_regex(
+ r'''\bhref\s*=\s*(["'])(?P(?!\1).+?)/?\1''',
+ get_element_by_class('next', webpage) or '',
+ 'continuation page', group='path', default=None)
+ if next_url is None or next_url in url:
+ break
+ url, webpage = urljoin(url, next_url), None
+ p_url = compat_urlparse.urlparse(url)
+ url = compat_urlparse.urlunparse(p_url._replace(path=p_url.path + '/'))
+
+ def _get_title(self, list_id, webpage, url):
+ return self._html_search_regex(
+ r'