mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2024-11-22 18:22:21 +00:00
[bambuser] Add an extractor for channels (closes #1702)
This commit is contained in:
parent
72a5b4f702
commit
165e3bb67a
3 changed files with 49 additions and 2 deletions
|
@ -20,6 +20,7 @@ from youtube_dl.extractor import (
|
|||
SoundcloudUserIE,
|
||||
LivestreamIE,
|
||||
NHLVideocenterIE,
|
||||
BambuserChannelIE,
|
||||
)
|
||||
|
||||
|
||||
|
@ -85,5 +86,13 @@ class TestPlaylists(unittest.TestCase):
|
|||
self.assertEqual(result['title'], u'Highlights')
|
||||
self.assertEqual(len(result['entries']), 12)
|
||||
|
||||
def test_bambuser_channel(self):
|
||||
dl = FakeYDL()
|
||||
ie = BambuserChannelIE(dl)
|
||||
result = ie.extract('http://bambuser.com/channel/pixelversity')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], u'pixelversity')
|
||||
self.assertTrue(len(result['entries']) >= 66)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
@ -9,7 +9,7 @@ from .arte import (
|
|||
ArteTVFutureIE,
|
||||
)
|
||||
from .auengine import AUEngineIE
|
||||
from .bambuser import BambuserIE
|
||||
from .bambuser import BambuserIE, BambuserChannelIE
|
||||
from .bandcamp import BandcampIE
|
||||
from .bliptv import BlipTVIE, BlipTVUserIE
|
||||
from .bloomberg import BloombergIE
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
import re
|
||||
import json
|
||||
import itertools
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_request,
|
||||
)
|
||||
|
||||
|
||||
class BambuserIE(InfoExtractor):
|
||||
IE_NAME = u'bambuser'
|
||||
_VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)'
|
||||
_API_KEY = '005f64509e19a868399060af746a00aa'
|
||||
|
||||
|
@ -33,10 +38,43 @@ class BambuserIE(InfoExtractor):
|
|||
'id': video_id,
|
||||
'title': info['title'],
|
||||
'url': info['url'],
|
||||
'thumbnail': info['preview'],
|
||||
'thumbnail': info.get('preview'),
|
||||
'duration': int(info['length']),
|
||||
'view_count': int(info['views_total']),
|
||||
'uploader': info['username'],
|
||||
'uploader_id': info['uid'],
|
||||
}
|
||||
|
||||
|
||||
class BambuserChannelIE(InfoExtractor):
|
||||
IE_NAME = u'bambuser:channel'
|
||||
_VALID_URL = r'http://bambuser.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
|
||||
# The maximum number we can get with each request
|
||||
_STEP = 50
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
user = mobj.group('user')
|
||||
urls = []
|
||||
last_id = ''
|
||||
for i in itertools.count(1):
|
||||
req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
|
||||
'&sort=created&access_mode=0%2C1%2C2&limit={count}'
|
||||
'&method=broadcast&format=json&vid_older_than={last}'
|
||||
).format(user=user, count=self._STEP, last=last_id)
|
||||
req = compat_urllib_request.Request(req_url)
|
||||
# Without setting this header, we wouldn't get any result
|
||||
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
|
||||
info_json = self._download_webpage(req, user,
|
||||
u'Downloading page %d' % i)
|
||||
results = json.loads(info_json)['result']
|
||||
if len(results) == 0:
|
||||
break
|
||||
last_id = results[-1]['vid']
|
||||
urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'title': user,
|
||||
'entries': urls,
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue