mirror of
https://github.com/ytdl-org/youtube-dl
synced 2025-10-24 09:08:36 +09:00
Compare commits
609 Commits
2020.12.12
...
df-test-cl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ed1ad29633 | ||
|
|
9a0a381954 | ||
|
|
4f10e1e7c7 | ||
|
|
e679476ea0 | ||
|
|
28194bd327 | ||
|
|
efa723edc6 | ||
|
|
adb5294177 | ||
|
|
5f5c127ece | ||
|
|
090acd58c1 | ||
|
|
a03b9775d5 | ||
|
|
8a158a936c | ||
|
|
11665dd236 | ||
|
|
cc179df346 | ||
|
|
0700fde640 | ||
|
|
811c480f7b | ||
|
|
3aa94d7945 | ||
|
|
ef044be34b | ||
|
|
530f4582d0 | ||
|
|
1baa0f5f66 | ||
|
|
9aa8e5340f | ||
|
|
04fd3289d3 | ||
|
|
52c3751df7 | ||
|
|
187a48aee2 | ||
|
|
be35e5343a | ||
|
|
c3deca86ae | ||
|
|
c7965b9fc2 | ||
|
|
e988fa4523 | ||
|
|
e27d8d819f | ||
|
|
ebc627847c | ||
|
|
a0068bd6be | ||
|
|
b764dbe773 | ||
|
|
871645a4a4 | ||
|
|
1f50a07771 | ||
|
|
9e5ca66f16 | ||
|
|
17d295a1ec | ||
|
|
49c5293014 | ||
|
|
6508688e88 | ||
|
|
4194d253c0 | ||
|
|
f8e543c906 | ||
|
|
c4d1738316 | ||
|
|
1f13ccfd7f | ||
|
|
923292ba64 | ||
|
|
782bfd26db | ||
|
|
3472227074 | ||
|
|
bf23bc0489 | ||
|
|
85bf26c1d0 | ||
|
|
d8adca1b66 | ||
|
|
d02064218b | ||
|
|
b1297308fb | ||
|
|
8088ce036a | ||
|
|
29f7bfc4d7 | ||
|
|
74f8cc48af | ||
|
|
8ff961d10f | ||
|
|
266b6ef185 | ||
|
|
825d3426c5 | ||
|
|
47b0c8697a | ||
|
|
734dfbb4e3 | ||
|
|
ddc080a562 | ||
|
|
16a3fe2ba6 | ||
|
|
c820a284a2 | ||
|
|
58babe9af7 | ||
|
|
6d4932f023 | ||
|
|
92d73ef393 | ||
|
|
91278f4b6b | ||
|
|
73e1ab6125 | ||
|
|
584715a803 | ||
|
|
e00b0eab1e | ||
|
|
005339d637 | ||
|
|
23ad6402a6 | ||
|
|
9642344965 | ||
|
|
568c7005d5 | ||
|
|
5cb4833f40 | ||
|
|
5197336de6 | ||
|
|
01824d275b | ||
|
|
39a98b09a2 | ||
|
|
f0a05a55c2 | ||
|
|
4186e81777 | ||
|
|
b494824286 | ||
|
|
8248133e5e | ||
|
|
27dbf6f0ab | ||
|
|
61d791726f | ||
|
|
0c0876f790 | ||
|
|
7a497f1405 | ||
|
|
5add3f4373 | ||
|
|
78ce962f4f | ||
|
|
41f0043983 | ||
|
|
34c06b16f5 | ||
|
|
1e677567cd | ||
|
|
af9e72507e | ||
|
|
6ca7b77696 | ||
|
|
9d142109f4 | ||
|
|
1ca673bd98 | ||
|
|
e1eae16b56 | ||
|
|
96f87aaa3b | ||
|
|
5f5de51a49 | ||
|
|
39ca35e765 | ||
|
|
d76d59d99d | ||
|
|
2c2c2bd348 | ||
|
|
46e0a729b2 | ||
|
|
57044eaceb | ||
|
|
a3373da70c | ||
|
|
2c4cb134a9 | ||
|
|
bfe72723d8 | ||
|
|
ed99d68bdd | ||
|
|
5014bd67c2 | ||
|
|
e418823350 | ||
|
|
b5242da7d2 | ||
|
|
a803582717 | ||
|
|
7fb9564420 | ||
|
|
379f52a495 | ||
|
|
cb668eb973 | ||
|
|
751c9ae39a | ||
|
|
da32828208 | ||
|
|
2ccee8db74 | ||
|
|
47f2f2fbe9 | ||
|
|
03ab02730f | ||
|
|
4c77a2e538 | ||
|
|
4131703001 | ||
|
|
cc21aebe90 | ||
|
|
57b9a4b4c6 | ||
|
|
3a7ef27cf3 | ||
|
|
a7f61feab2 | ||
|
|
8fe5d54eb7 | ||
|
|
d156bc8d59 | ||
|
|
c2350cac24 | ||
|
|
b224cf39d5 | ||
|
|
5f85eb820c | ||
|
|
bb7ac1ed66 | ||
|
|
fdf91c52a8 | ||
|
|
943070af4a | ||
|
|
82f3993ba3 | ||
|
|
d495292852 | ||
|
|
2ee6c7f110 | ||
|
|
6511b8e8d7 | ||
|
|
f3cd1d9cec | ||
|
|
e13a01061d | ||
|
|
24297a42ef | ||
|
|
1980ff4550 | ||
|
|
dfbbe2902f | ||
|
|
e1a9d0ef78 | ||
|
|
f47627a1c9 | ||
|
|
efeb9e0fbf | ||
|
|
e90a890f01 | ||
|
|
199c645bee | ||
|
|
503a3744ad | ||
|
|
ef03721f47 | ||
|
|
1e8aaa1d15 | ||
|
|
6423d7054e | ||
|
|
eb5080286a | ||
|
|
286e01ce30 | ||
|
|
8536dcafd8 | ||
|
|
552b139911 | ||
|
|
2202cef0e4 | ||
|
|
a726009987 | ||
|
|
03afef7538 | ||
|
|
b797c1cc75 | ||
|
|
04be55307a | ||
|
|
504e4d804d | ||
|
|
1786cd3fe4 | ||
|
|
b8645c1f58 | ||
|
|
fe05191b8c | ||
|
|
0204838163 | ||
|
|
a0df8a0617 | ||
|
|
d1b9a5e2ef | ||
|
|
ff04d43c46 | ||
|
|
d2f72c40db | ||
|
|
e33dfb445c | ||
|
|
94520568b3 | ||
|
|
273964d190 | ||
|
|
346dd3b5e8 | ||
|
|
f5c2c06231 | ||
|
|
57eaaff5cf | ||
|
|
999329cf6b | ||
|
|
c6ab792990 | ||
|
|
0db79d8181 | ||
|
|
7e8b3f9439 | ||
|
|
ac19c3ac80 | ||
|
|
c4a451bcdd | ||
|
|
5ad69d3d0e | ||
|
|
32290307a4 | ||
|
|
dab83a2597 | ||
|
|
41920fc80e | ||
|
|
9f6c03a006 | ||
|
|
596b26606c | ||
|
|
f20b505b46 | ||
|
|
cfee2dfe83 | ||
|
|
30a3a4c70f | ||
|
|
a00a7e0cad | ||
|
|
54558e0baa | ||
|
|
7c52395479 | ||
|
|
ea87ed8394 | ||
|
|
d01e261a15 | ||
|
|
79e4ccfc4b | ||
|
|
06159135ef | ||
|
|
4fb25ff5a3 | ||
|
|
1b0a13f33c | ||
|
|
27e5a4464d | ||
|
|
545d6cb9d0 | ||
|
|
006eea564d | ||
|
|
281b8e3443 | ||
|
|
c0c5134c57 | ||
|
|
72a2c0a9ed | ||
|
|
445db582a2 | ||
|
|
6b116f0c03 | ||
|
|
70d0d4f9be | ||
|
|
6b315d96bc | ||
|
|
25b1287323 | ||
|
|
760c911299 | ||
|
|
162bf9e10a | ||
|
|
6beb1ac65b | ||
|
|
3ae9c0f410 | ||
|
|
e165f5641f | ||
|
|
aee6feb02a | ||
|
|
654b4f4ff2 | ||
|
|
1df2596f81 | ||
|
|
04d4a3b136 | ||
|
|
392c467f95 | ||
|
|
c5aa8f36bf | ||
|
|
3748863070 | ||
|
|
ca304beb15 | ||
|
|
e789bb1aa4 | ||
|
|
14f29f087e | ||
|
|
b97fb2edac | ||
|
|
28bab774a0 | ||
|
|
8f493de9fb | ||
|
|
207bc35d34 | ||
|
|
955894e72f | ||
|
|
287e50b56b | ||
|
|
da762c4e32 | ||
|
|
87a8bde777 | ||
|
|
49fc0a567f | ||
|
|
cc777dcaa0 | ||
|
|
c785911870 | ||
|
|
605e7b5e47 | ||
|
|
8562218350 | ||
|
|
76da1c954a | ||
|
|
c2fbfb49da | ||
|
|
d1069d33b4 | ||
|
|
eafcadea26 | ||
|
|
a40002444e | ||
|
|
5208ae92fc | ||
|
|
8117d613ac | ||
|
|
00b4d72d1e | ||
|
|
21ccd0d7f4 | ||
|
|
7e79ba7dd6 | ||
|
|
fa6bf0a711 | ||
|
|
f912d6c8cf | ||
|
|
357bfe251d | ||
|
|
3be098010f | ||
|
|
9955bb4a27 | ||
|
|
ebfd66c4b1 | ||
|
|
b509d24b2f | ||
|
|
1860d0f41c | ||
|
|
60845121ca | ||
|
|
1182f9567b | ||
|
|
ef414343e5 | ||
|
|
43d986acd8 | ||
|
|
9c644a6419 | ||
|
|
fc2c6d5323 | ||
|
|
64ed3af328 | ||
|
|
bae7dbf78b | ||
|
|
15c24b0346 | ||
|
|
477bff6906 | ||
|
|
1a1ccd9a6e | ||
|
|
7dc513487f | ||
|
|
c6a14755bb | ||
|
|
7f064d50db | ||
|
|
b8b622fbeb | ||
|
|
ec64ec9651 | ||
|
|
f68692b004 | ||
|
|
8c9766f4bf | ||
|
|
061c030133 | ||
|
|
8f56907afa | ||
|
|
e1adb3ed4f | ||
|
|
e465b25c1f | ||
|
|
7c06216abf | ||
|
|
0002888627 | ||
|
|
3fb14cd214 | ||
|
|
bee6182680 | ||
|
|
38fe5e239a | ||
|
|
678d46f6bb | ||
|
|
3c58f9e0b9 | ||
|
|
ef28e33249 | ||
|
|
9662e4964b | ||
|
|
44603290e5 | ||
|
|
1631fca1ee | ||
|
|
295860ff00 | ||
|
|
8cb4b71909 | ||
|
|
d81421af4b | ||
|
|
7422a2194f | ||
|
|
2090dbdc8c | ||
|
|
0a04e03a02 | ||
|
|
44b2d5f5fc | ||
|
|
aa9118a373 | ||
|
|
36abc16c3c | ||
|
|
919d764600 | ||
|
|
696183e133 | ||
|
|
f90d825a6b | ||
|
|
3037ab00c7 | ||
|
|
21e872b19a | ||
|
|
cf2dbec630 | ||
|
|
b92bb0e02a | ||
|
|
40edffae3d | ||
|
|
9fc5eafb8e | ||
|
|
08c2fbb844 | ||
|
|
3997efb65e | ||
|
|
a7356dffe9 | ||
|
|
e20ec43094 | ||
|
|
70baa7bfae | ||
|
|
8980f53b42 | ||
|
|
a363fb5d28 | ||
|
|
646052e416 | ||
|
|
844e4cbc54 | ||
|
|
56c63c8c02 | ||
|
|
07eb8f1916 | ||
|
|
4b5410c5c8 | ||
|
|
be2e9b76ee | ||
|
|
d8085580f6 | ||
|
|
6d32c6c6d3 | ||
|
|
f94d764993 | ||
|
|
f28f1b4d6e | ||
|
|
360d5f0daa | ||
|
|
cd493c5adc | ||
|
|
a4c7ed6b1e | ||
|
|
7f8b8bc418 | ||
|
|
311ebdd9a5 | ||
|
|
99c68db0a8 | ||
|
|
5fc53690cb | ||
|
|
7a9161578e | ||
|
|
2405854705 | ||
|
|
0cf09c2b41 | ||
|
|
0156ce95c5 | ||
|
|
1641b13232 | ||
|
|
a4bdc3112b | ||
|
|
c7d407bca2 | ||
|
|
7215691ab7 | ||
|
|
fc88e8f0e3 | ||
|
|
cfefb7d854 | ||
|
|
3c07d007ca | ||
|
|
89c5a7d5aa | ||
|
|
2adc0c51cd | ||
|
|
1f0910bc27 | ||
|
|
e22ff4e356 | ||
|
|
83031d749b | ||
|
|
1b731ebcaa | ||
|
|
ab25f3f431 | ||
|
|
07f7aad81c | ||
|
|
1e2575df87 | ||
|
|
b111a64135 | ||
|
|
0e3a968479 | ||
|
|
c11f7cf9bd | ||
|
|
8fa7cc387d | ||
|
|
65eee5a745 | ||
|
|
efef4ddf51 | ||
|
|
159a3d48df | ||
|
|
b46483a6ec | ||
|
|
9c724601ba | ||
|
|
67299f23d8 | ||
|
|
8bf9591a70 | ||
|
|
a800838f5a | ||
|
|
ba15b2fee6 | ||
|
|
56a7ee9033 | ||
|
|
0b4f03a563 | ||
|
|
7b8fa658f8 | ||
|
|
fd95fc33b1 | ||
|
|
c669554ef5 | ||
|
|
11b68df7a4 | ||
|
|
d18f4419a7 | ||
|
|
0f7d413d5b | ||
|
|
286e5d6724 | ||
|
|
395981288b | ||
|
|
55bb3556c8 | ||
|
|
57f2488bbe | ||
|
|
ea399a53eb | ||
|
|
811a183eb6 | ||
|
|
b63981e850 | ||
|
|
186cbaffb9 | ||
|
|
dbf3fa8af6 | ||
|
|
f08c31cf33 | ||
|
|
d8dab85419 | ||
|
|
5519bba3e1 | ||
|
|
142c584063 | ||
|
|
4542e3e555 | ||
|
|
fa8f6d8580 | ||
|
|
3bb7769c40 | ||
|
|
8d286bd5b6 | ||
|
|
cff72b4cc0 | ||
|
|
657221c81d | ||
|
|
62acf5fa2c | ||
|
|
b79977fb6b | ||
|
|
bc7c8f3d4e | ||
|
|
015e19b350 | ||
|
|
54856480d7 | ||
|
|
1dd12708c2 | ||
|
|
f9201cef58 | ||
|
|
26499ba823 | ||
|
|
58f6c2112d | ||
|
|
de026a6acd | ||
|
|
d4564afc70 | ||
|
|
360a5e0f60 | ||
|
|
55a3ca16d3 | ||
|
|
ef50cb3fda | ||
|
|
8673f4344c | ||
|
|
f1487d4fca | ||
|
|
0cd4c402f0 | ||
|
|
9c9b458145 | ||
|
|
9d50f86232 | ||
|
|
7e92f9015e | ||
|
|
aa860b8016 | ||
|
|
b484097b01 | ||
|
|
ab9001dab5 | ||
|
|
879866a230 | ||
|
|
8e5477d036 | ||
|
|
1e8e5d5238 | ||
|
|
d81a213cfb | ||
|
|
7c2d18a13f | ||
|
|
2408e6d26a | ||
|
|
cf862771d7 | ||
|
|
a938f111ed | ||
|
|
4759543f6e | ||
|
|
d0fc289f45 | ||
|
|
70f572585d | ||
|
|
c2d06aef60 | ||
|
|
ff1e765400 | ||
|
|
170e1c1995 | ||
|
|
61e669acff | ||
|
|
2c337f4e85 | ||
|
|
bf6a74c620 | ||
|
|
38a967c98e | ||
|
|
3a61e6d360 | ||
|
|
3d8e32dcc0 | ||
|
|
8f29b2dd38 | ||
|
|
a29e340efa | ||
|
|
b13f29098f | ||
|
|
430c4bc9d0 | ||
|
|
4ae243fc6c | ||
|
|
8f20ad36dc | ||
|
|
799c794947 | ||
|
|
1ae7ae0b96 | ||
|
|
ccc7112291 | ||
|
|
5b24f8f505 | ||
|
|
fcd90d2583 | ||
|
|
8f757c7353 | ||
|
|
be1a3f2d11 | ||
|
|
ecae54a98d | ||
|
|
f318882955 | ||
|
|
c3399cac19 | ||
|
|
9237aaa77f | ||
|
|
766fcdd0fa | ||
|
|
f6ea29e24b | ||
|
|
8a3797a4ab | ||
|
|
745db8899d | ||
|
|
83db801cbf | ||
|
|
964a8eb754 | ||
|
|
ac61f2e058 | ||
|
|
8487e8b98a | ||
|
|
9c484c0019 | ||
|
|
0e96b4b5ce | ||
|
|
a563c97c5c | ||
|
|
e88c9ef62a | ||
|
|
0889eb33e0 | ||
|
|
0021a2b9a1 | ||
|
|
19ec468635 | ||
|
|
491ee7efe4 | ||
|
|
8522bcd97c | ||
|
|
ac71fd5919 | ||
|
|
8e953dcbb1 | ||
|
|
f4afb9a6a8 | ||
|
|
d5b8cf093c | ||
|
|
5c6e84c0ff | ||
|
|
1aaee908b9 | ||
|
|
b2d9fd9c9f | ||
|
|
bc2f83b95e | ||
|
|
85de33b04e | ||
|
|
7dfd966848 | ||
|
|
a25d03d7cb | ||
|
|
cabfd4b1f0 | ||
|
|
7b643d4cd0 | ||
|
|
1f1d01d498 | ||
|
|
21a42e2588 | ||
|
|
2df93a0c4a | ||
|
|
75972e200d | ||
|
|
d0d838638c | ||
|
|
8c17afc471 | ||
|
|
40d66e07df | ||
|
|
ab89a8678b | ||
|
|
4d7d056909 | ||
|
|
c35bc82606 | ||
|
|
2f56caf083 | ||
|
|
4066945919 | ||
|
|
2a84694b1e | ||
|
|
4046ffe1e1 | ||
|
|
d1d0612160 | ||
|
|
7b0f04ed1f | ||
|
|
2e21b06ea2 | ||
|
|
a6f75e6e89 | ||
|
|
bd18824c2a | ||
|
|
bdd044e67b | ||
|
|
f7e95fb2a0 | ||
|
|
9dd674e1d2 | ||
|
|
9c1e164e0c | ||
|
|
c706fbe9fe | ||
|
|
ebdcf70b0d | ||
|
|
5966095e65 | ||
|
|
9ee984fc76 | ||
|
|
53528e1d23 | ||
|
|
c931c4b8dd | ||
|
|
7acd042bbb | ||
|
|
bcfe485e01 | ||
|
|
479cc6d5a1 | ||
|
|
38286ee729 | ||
|
|
1a95953867 | ||
|
|
71febd1c52 | ||
|
|
f1bc56c99b | ||
|
|
64e419bd73 | ||
|
|
782ea947b4 | ||
|
|
f27224d57b | ||
|
|
c007188598 | ||
|
|
af93ecfd88 | ||
|
|
794771a164 | ||
|
|
6f2eaaf73d | ||
|
|
4c7a4dbc4d | ||
|
|
f86b299d0e | ||
|
|
e474996541 | ||
|
|
aed617e311 | ||
|
|
0fa67c1d68 | ||
|
|
365b3cc72d | ||
|
|
a272fe21a8 | ||
|
|
cec1c2f211 | ||
|
|
12053450dc | ||
|
|
46cffb0c47 | ||
|
|
c32a059f52 | ||
|
|
6911312e53 | ||
|
|
f22b5a6b96 | ||
|
|
58e55198c1 | ||
|
|
d61ed9f2f1 | ||
|
|
8bc4c6350e | ||
|
|
cfa4ffa23b | ||
|
|
4f1dc1463d | ||
|
|
17e0f41d34 | ||
|
|
b57b27ff8f | ||
|
|
bbe8cc6662 | ||
|
|
98106accb6 | ||
|
|
af1312bfc3 | ||
|
|
4c7d7215cd | ||
|
|
0370d9eb3d | ||
|
|
1434651d20 | ||
|
|
2c312ab84a | ||
|
|
0ee78d62d5 | ||
|
|
7f3c90ab25 | ||
|
|
1d3cd29730 | ||
|
|
4ef1fc9707 | ||
|
|
f9e6aa1dcf | ||
|
|
f83db9064b | ||
|
|
2da9a86399 | ||
|
|
ecaa535cf4 | ||
|
|
79dd92b1fe | ||
|
|
bd3844c9c2 | ||
|
|
7bf5e3a84a | ||
|
|
08a17dae5b | ||
|
|
924ea66ade | ||
|
|
5b72f5b74f | ||
|
|
bfa345744d | ||
|
|
f966461476 | ||
|
|
b8aea53682 | ||
|
|
c0d9eb7043 | ||
|
|
3ba6aabd25 | ||
|
|
a8b31505ed | ||
|
|
90a271e914 | ||
|
|
172d270607 | ||
|
|
22feed08a1 | ||
|
|
942b8ca3be | ||
|
|
3729c52f9d | ||
|
|
71679eaee8 | ||
|
|
76fe4ba3b2 | ||
|
|
164a4a5756 | ||
|
|
455951985b | ||
|
|
c29500e412 | ||
|
|
1bc1520adc | ||
|
|
022e05dc1f | ||
|
|
b34c9551aa | ||
|
|
84f19c026f | ||
|
|
6bde5492b6 | ||
|
|
6086df4d6a | ||
|
|
c98052c5da | ||
|
|
ab62bc5838 | ||
|
|
bc87ba8424 | ||
|
|
b79df1b68d | ||
|
|
2797c7be45 | ||
|
|
755f186e21 | ||
|
|
2240a1dc4d | ||
|
|
03d3af9768 | ||
|
|
5ce9527e16 | ||
|
|
c527f5ada0 | ||
|
|
ace52668f0 | ||
|
|
9c33eb027e | ||
|
|
679b711395 | ||
|
|
1727541315 | ||
|
|
45b0a0d11b | ||
|
|
e665fcd4da | ||
|
|
aae737d4af | ||
|
|
92a6de861e | ||
|
|
5ff881aee6 | ||
|
|
eae19a4473 | ||
|
|
f70c263ce5 | ||
|
|
92d135921f | ||
|
|
d8008dee4f | ||
|
|
bb38a12157 | ||
|
|
bcc8ef0a5a |
6
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
6
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
@@ -18,7 +18,7 @@ title: ''
|
|||||||
|
|
||||||
<!--
|
<!--
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2021.12.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
|
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
|
||||||
- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||||
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
|
|||||||
-->
|
-->
|
||||||
|
|
||||||
- [ ] I'm reporting a broken site support
|
- [ ] I'm reporting a broken site support
|
||||||
- [ ] I've verified that I'm running youtube-dl version **2020.12.12**
|
- [ ] I've verified that I'm running youtube-dl version **2021.12.17**
|
||||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||||
- [ ] I've searched the bugtracker for similar issues including closed ones
|
- [ ] I've searched the bugtracker for similar issues including closed ones
|
||||||
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
|
|||||||
[debug] User config: []
|
[debug] User config: []
|
||||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||||
[debug] youtube-dl version 2020.12.12
|
[debug] youtube-dl version 2021.12.17
|
||||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ labels: 'site-support-request'
|
|||||||
|
|
||||||
<!--
|
<!--
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2021.12.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||||
- Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
|
- Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
|
||||||
- Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
- Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||||
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
|
|||||||
-->
|
-->
|
||||||
|
|
||||||
- [ ] I'm reporting a new site support request
|
- [ ] I'm reporting a new site support request
|
||||||
- [ ] I've verified that I'm running youtube-dl version **2020.12.12**
|
- [ ] I've verified that I'm running youtube-dl version **2021.12.17**
|
||||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||||
- [ ] I've checked that none of provided URLs violate any copyrights
|
- [ ] I've checked that none of provided URLs violate any copyrights
|
||||||
- [ ] I've searched the bugtracker for similar site support requests including closed ones
|
- [ ] I've searched the bugtracker for similar site support requests including closed ones
|
||||||
|
|||||||
@@ -18,13 +18,13 @@ title: ''
|
|||||||
|
|
||||||
<!--
|
<!--
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2021.12.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||||
- Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
- Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||||
- Finally, put x into all relevant boxes (like this [x])
|
- Finally, put x into all relevant boxes (like this [x])
|
||||||
-->
|
-->
|
||||||
|
|
||||||
- [ ] I'm reporting a site feature request
|
- [ ] I'm reporting a site feature request
|
||||||
- [ ] I've verified that I'm running youtube-dl version **2020.12.12**
|
- [ ] I've verified that I'm running youtube-dl version **2021.12.17**
|
||||||
- [ ] I've searched the bugtracker for similar site feature requests including closed ones
|
- [ ] I've searched the bugtracker for similar site feature requests including closed ones
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
6
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
6
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
@@ -18,7 +18,7 @@ title: ''
|
|||||||
|
|
||||||
<!--
|
<!--
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2021.12.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
|
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
|
||||||
- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||||
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
|
|||||||
-->
|
-->
|
||||||
|
|
||||||
- [ ] I'm reporting a broken site support issue
|
- [ ] I'm reporting a broken site support issue
|
||||||
- [ ] I've verified that I'm running youtube-dl version **2020.12.12**
|
- [ ] I've verified that I'm running youtube-dl version **2021.12.17**
|
||||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||||
- [ ] I've searched the bugtracker for similar bug reports including closed ones
|
- [ ] I've searched the bugtracker for similar bug reports including closed ones
|
||||||
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
|
|||||||
[debug] User config: []
|
[debug] User config: []
|
||||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||||
[debug] youtube-dl version 2020.12.12
|
[debug] youtube-dl version 2021.12.17
|
||||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
|
|||||||
4
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
4
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
@@ -19,13 +19,13 @@ labels: 'request'
|
|||||||
|
|
||||||
<!--
|
<!--
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2021.12.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||||
- Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
- Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||||
- Finally, put x into all relevant boxes (like this [x])
|
- Finally, put x into all relevant boxes (like this [x])
|
||||||
-->
|
-->
|
||||||
|
|
||||||
- [ ] I'm reporting a feature request
|
- [ ] I'm reporting a feature request
|
||||||
- [ ] I've verified that I'm running youtube-dl version **2020.12.12**
|
- [ ] I've verified that I'm running youtube-dl version **2021.12.17**
|
||||||
- [ ] I've searched the bugtracker for similar feature requests including closed ones
|
- [ ] I've searched the bugtracker for similar feature requests including closed ones
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
blank_issues_enabled: false
|
||||||
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -7,8 +7,10 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
### Before submitting a *pull request* make sure you have:
|
### Before submitting a *pull request* make sure you have:
|
||||||
- [ ] At least skimmed through [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) sections
|
|
||||||
- [ ] [Searched](https://github.com/ytdl-org/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
|
- [ ] [Searched](https://github.com/ytdl-org/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
|
||||||
|
- [ ] Read [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site)
|
||||||
|
- [ ] Read [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) and adjusted the code to meet them
|
||||||
|
- [ ] Covered the code with tests (note that PRs without tests will be REJECTED)
|
||||||
- [ ] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
|
- [ ] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
|
||||||
|
|
||||||
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
|
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
|
||||||
|
|||||||
81
.github/workflows/ci.yml
vendored
Normal file
81
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
name: CI
|
||||||
|
on: [push, pull_request]
|
||||||
|
jobs:
|
||||||
|
tests:
|
||||||
|
name: Tests
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-18.04]
|
||||||
|
# TODO: python 2.6
|
||||||
|
python-version: [2.7, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, pypy-2.7, pypy-3.6, pypy-3.7]
|
||||||
|
python-impl: [cpython]
|
||||||
|
ytdl-test-set: [core, download]
|
||||||
|
run-tests-ext: [sh]
|
||||||
|
include:
|
||||||
|
# python 3.2 is only available on windows via setup-python
|
||||||
|
- os: windows-latest
|
||||||
|
python-version: 3.2
|
||||||
|
python-impl: cpython
|
||||||
|
ytdl-test-set: core
|
||||||
|
run-tests-ext: bat
|
||||||
|
- os: windows-latest
|
||||||
|
python-version: 3.2
|
||||||
|
python-impl: cpython
|
||||||
|
ytdl-test-set: download
|
||||||
|
run-tests-ext: bat
|
||||||
|
# jython
|
||||||
|
- os: ubuntu-18.04
|
||||||
|
python-impl: jython
|
||||||
|
ytdl-test-set: core
|
||||||
|
run-tests-ext: sh
|
||||||
|
- os: ubuntu-18.04
|
||||||
|
python-impl: jython
|
||||||
|
ytdl-test-set: download
|
||||||
|
run-tests-ext: sh
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
if: ${{ matrix.python-impl == 'cpython' }}
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
- name: Set up Java 8
|
||||||
|
if: ${{ matrix.python-impl == 'jython' }}
|
||||||
|
uses: actions/setup-java@v1
|
||||||
|
with:
|
||||||
|
java-version: 8
|
||||||
|
- name: Install Jython
|
||||||
|
if: ${{ matrix.python-impl == 'jython' }}
|
||||||
|
run: |
|
||||||
|
wget https://repo1.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar -O jython-installer.jar
|
||||||
|
java -jar jython-installer.jar -s -d "$HOME/jython"
|
||||||
|
echo "$HOME/jython/bin" >> $GITHUB_PATH
|
||||||
|
- name: Install nose
|
||||||
|
if: ${{ matrix.python-impl != 'jython' }}
|
||||||
|
run: pip install nose
|
||||||
|
- name: Install nose (Jython)
|
||||||
|
if: ${{ matrix.python-impl == 'jython' }}
|
||||||
|
# Working around deprecation of support for non-SNI clients at PyPI CDN (see https://status.python.org/incidents/hzmjhqsdjqgb)
|
||||||
|
run: |
|
||||||
|
wget https://files.pythonhosted.org/packages/99/4f/13fb671119e65c4dce97c60e67d3fd9e6f7f809f2b307e2611f4701205cb/nose-1.3.7-py2-none-any.whl
|
||||||
|
pip install nose-1.3.7-py2-none-any.whl
|
||||||
|
- name: Run tests
|
||||||
|
continue-on-error: ${{ matrix.ytdl-test-set == 'download' || matrix.python-impl == 'jython' }}
|
||||||
|
env:
|
||||||
|
YTDL_TEST_SET: ${{ matrix.ytdl-test-set }}
|
||||||
|
run: ./devscripts/run_tests.${{ matrix.run-tests-ext }}
|
||||||
|
flake8:
|
||||||
|
name: Linter
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
|
- name: Install flake8
|
||||||
|
run: pip install flake8
|
||||||
|
- name: Run flake8
|
||||||
|
run: flake8 .
|
||||||
50
.travis.yml
50
.travis.yml
@@ -1,50 +0,0 @@
|
|||||||
language: python
|
|
||||||
python:
|
|
||||||
- "2.6"
|
|
||||||
- "2.7"
|
|
||||||
- "3.2"
|
|
||||||
- "3.3"
|
|
||||||
- "3.4"
|
|
||||||
- "3.5"
|
|
||||||
- "3.6"
|
|
||||||
- "pypy"
|
|
||||||
- "pypy3"
|
|
||||||
dist: trusty
|
|
||||||
env:
|
|
||||||
- YTDL_TEST_SET=core
|
|
||||||
# - YTDL_TEST_SET=download
|
|
||||||
jobs:
|
|
||||||
include:
|
|
||||||
- python: 3.7
|
|
||||||
dist: xenial
|
|
||||||
env: YTDL_TEST_SET=core
|
|
||||||
# - python: 3.7
|
|
||||||
# dist: xenial
|
|
||||||
# env: YTDL_TEST_SET=download
|
|
||||||
- python: 3.8
|
|
||||||
dist: xenial
|
|
||||||
env: YTDL_TEST_SET=core
|
|
||||||
# - python: 3.8
|
|
||||||
# dist: xenial
|
|
||||||
# env: YTDL_TEST_SET=download
|
|
||||||
- python: 3.8-dev
|
|
||||||
dist: xenial
|
|
||||||
env: YTDL_TEST_SET=core
|
|
||||||
# - python: 3.8-dev
|
|
||||||
# dist: xenial
|
|
||||||
# env: YTDL_TEST_SET=download
|
|
||||||
- env: JYTHON=true; YTDL_TEST_SET=core
|
|
||||||
# - env: JYTHON=true; YTDL_TEST_SET=download
|
|
||||||
- name: flake8
|
|
||||||
python: 3.8
|
|
||||||
dist: xenial
|
|
||||||
install: pip install flake8
|
|
||||||
script: flake8 .
|
|
||||||
fast_finish: true
|
|
||||||
allow_failures:
|
|
||||||
# - env: YTDL_TEST_SET=download
|
|
||||||
- env: JYTHON=true; YTDL_TEST_SET=core
|
|
||||||
# - env: JYTHON=true; YTDL_TEST_SET=download
|
|
||||||
before_install:
|
|
||||||
- if [ "$JYTHON" == "true" ]; then ./devscripts/install_jython.sh; export PATH="$HOME/jython/bin:$PATH"; fi
|
|
||||||
script: ./devscripts/run_tests.sh
|
|
||||||
1
AUTHORS
1
AUTHORS
@@ -246,3 +246,4 @@ Enes Solak
|
|||||||
Nathan Rossi
|
Nathan Rossi
|
||||||
Thomas van der Berg
|
Thomas van der Berg
|
||||||
Luca Cherubin
|
Luca Cherubin
|
||||||
|
Adrian Heine
|
||||||
@@ -150,7 +150,7 @@ After you have ensured this site is distributing its content legally, you can fo
|
|||||||
# TODO more properties (see youtube_dl/extractor/common.py)
|
# TODO more properties (see youtube_dl/extractor/common.py)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/extractors.py). This makes the extractor available for use, as long as the class ends with `IE`.
|
||||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in.
|
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in.
|
||||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303). Add tests and code for as many as you want.
|
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303). Add tests and code for as many as you want.
|
||||||
8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
||||||
|
|||||||
585
ChangeLog
585
ChangeLog
@@ -1,3 +1,586 @@
|
|||||||
|
version 2021.12.17
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [postprocessor/ffmpeg] Show ffmpeg output on error (#22680, #29336)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [youtube] Update signature function patterns (#30363, #30366)
|
||||||
|
* [peertube] Only call description endpoint if necessary (#29383)
|
||||||
|
* [periscope] Pass referer to HLS requests (#29419)
|
||||||
|
- [liveleak] Remove extractor (#17625, #24222, #29331)
|
||||||
|
+ [pornhub] Add support for pornhubthbh7ap3u.onion
|
||||||
|
* [pornhub] Detect geo restriction
|
||||||
|
* [pornhub] Dismiss tbr extracted from download URLs (#28927)
|
||||||
|
* [curiositystream:collection] Extend _VALID_URL (#26326, #29117)
|
||||||
|
* [youtube] Make get_video_info processing more robust (#29333)
|
||||||
|
* [youtube] Workaround for get_video_info request (#29333)
|
||||||
|
* [bilibili] Strip uploader name (#29202)
|
||||||
|
* [youtube] Update invidious instance list (#29281)
|
||||||
|
* [umg:de] Update GraphQL API URL (#29304)
|
||||||
|
* [nrk] Switch psapi URL to https (#29344)
|
||||||
|
+ [egghead] Add support for app.egghead.io (#28404, #29303)
|
||||||
|
* [appleconnect] Fix extraction (#29208)
|
||||||
|
+ [orf:tvthek] Add support for MPD formats (#28672, #29236)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.06.06
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [facebook] Improve login required detection
|
||||||
|
* [youporn] Fix formats and view count extraction (#29216)
|
||||||
|
* [orf:tvthek] Fix thumbnails extraction (#29217)
|
||||||
|
* [formula1] Fix extraction (#29206)
|
||||||
|
* [ard] Relax URL regular expression and fix video ids (#22724, #29091)
|
||||||
|
+ [ustream] Detect https embeds (#29133)
|
||||||
|
* [ted] Prefer own formats over external sources (#29142)
|
||||||
|
* [twitch:clips] Improve extraction (#29149)
|
||||||
|
+ [twitch:clips] Add access token query to download URLs (#29136)
|
||||||
|
* [youtube] Fix get_video_info request (#29086, #29165)
|
||||||
|
* [vimeo] Fix vimeo pro embed extraction (#29126)
|
||||||
|
* [redbulltv] Fix embed data extraction (#28770)
|
||||||
|
* [shahid] Relax URL regular expression (#28772, #28930)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.05.16
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [options] Fix thumbnail option group name (#29042)
|
||||||
|
* [YoutubeDL] Improve extract_info doc (#28946)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [playstuff] Add support for play.stuff.co.nz (#28901, #28931)
|
||||||
|
* [eroprofile] Fix extraction (#23200, #23626, #29008)
|
||||||
|
+ [vivo] Add support for vivo.st (#29009)
|
||||||
|
+ [generic] Add support for og:audio (#28311, #29015)
|
||||||
|
* [phoenix] Fix extraction (#29057)
|
||||||
|
+ [generic] Add support for sibnet embeds
|
||||||
|
+ [vk] Add support for sibnet embeds (#9500)
|
||||||
|
+ [generic] Add Referer header for direct videojs download URLs (#2879,
|
||||||
|
#20217, #29053)
|
||||||
|
* [orf:radio] Switch download URLs to HTTPS (#29012, #29046)
|
||||||
|
- [blinkx] Remove extractor (#28941)
|
||||||
|
* [medaltv] Relax URL regular expression (#28884)
|
||||||
|
+ [funimation] Add support for optional lang code in URLs (#28950)
|
||||||
|
+ [gdcvault] Add support for HTML5 videos
|
||||||
|
* [dispeak] Improve FLV extraction (#13513, #28970)
|
||||||
|
* [kaltura] Improve iframe extraction (#28969)
|
||||||
|
* [kaltura] Make embed code alternatives actually work
|
||||||
|
* [cda] Improve extraction (#28709, #28937)
|
||||||
|
* [twitter] Improve formats extraction from vmap URL (#28909)
|
||||||
|
* [xtube] Fix formats extraction (#28870)
|
||||||
|
* [svtplay] Improve extraction (#28507, #28876)
|
||||||
|
* [tv2dk] Fix extraction (#28888)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.04.26
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [xfileshare] Add support for wolfstream.tv (#28858)
|
||||||
|
* [francetvinfo] Improve video id extraction (#28792)
|
||||||
|
* [medaltv] Fix extraction (#28807)
|
||||||
|
* [tver] Redirect all downloads to Brightcove (#28849)
|
||||||
|
* [go] Improve video id extraction (#25207, #25216, #26058)
|
||||||
|
* [youtube] Fix lazy extractors (#28780)
|
||||||
|
+ [bbc] Extract description and timestamp from __INITIAL_DATA__ (#28774)
|
||||||
|
* [cbsnews] Fix extraction for python <3.6 (#23359)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.04.17
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [utils] Add support for experimental HTTP response status code
|
||||||
|
308 Permanent Redirect (#27877, #28768)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [lbry] Add support for HLS videos (#27877, #28768)
|
||||||
|
* [youtube] Fix stretched ratio calculation
|
||||||
|
* [youtube] Improve stretch extraction (#28769)
|
||||||
|
* [youtube:tab] Improve grid extraction (#28725)
|
||||||
|
+ [youtube:tab] Detect series playlist on playlists page (#28723)
|
||||||
|
+ [youtube] Add more invidious instances (#28706)
|
||||||
|
* [pluralsight] Extend anti-throttling timeout (#28712)
|
||||||
|
* [youtube] Improve URL to extractor routing (#27572, #28335, #28742)
|
||||||
|
+ [maoritv] Add support for maoritelevision.com (#24552)
|
||||||
|
+ [youtube:tab] Pass innertube context and x-goog-visitor-id header along with
|
||||||
|
continuation requests (#28702)
|
||||||
|
* [mtv] Fix Viacom A/B Testing Video Player extraction (#28703)
|
||||||
|
+ [pornhub] Extract DASH and HLS formats from get_media end point (#28698)
|
||||||
|
* [cbssports] Fix extraction (#28682)
|
||||||
|
* [jamendo] Fix track extraction (#28686)
|
||||||
|
* [curiositystream] Fix format extraction (#26845, #28668)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.04.07
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [extractor/common] Use compat_cookies_SimpleCookie for _get_cookies
|
||||||
|
+ [compat] Introduce compat_cookies_SimpleCookie
|
||||||
|
* [extractor/common] Improve JSON-LD author extraction
|
||||||
|
* [extractor/common] Fix _get_cookies on python 2 (#20673, #23256, #20326,
|
||||||
|
#28640)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [youtube] Fix extraction of videos with restricted location (#28685)
|
||||||
|
+ [line] Add support for live.line.me (#17205, #28658)
|
||||||
|
* [vimeo] Improve extraction (#28591)
|
||||||
|
* [youku] Update ccode (#17852, #28447, #28460, #28648)
|
||||||
|
* [youtube] Prefer direct entry metadata over entry metadata from playlist
|
||||||
|
(#28619, #28636)
|
||||||
|
* [screencastomatic] Fix extraction (#11976, #24489)
|
||||||
|
+ [palcomp3] Add support for palcomp3.com (#13120)
|
||||||
|
+ [arnes] Add support for video.arnes.si (#28483)
|
||||||
|
+ [youtube:tab] Add support for hashtags (#28308)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.04.01
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [youtube] Setup CONSENT cookie when needed (#28604)
|
||||||
|
* [vimeo] Fix password protected review extraction (#27591)
|
||||||
|
* [youtube] Improve age-restricted video extraction (#28578)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.03.31
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [vlive] Fix inkey request (#28589)
|
||||||
|
* [francetvinfo] Improve video id extraction (#28584)
|
||||||
|
+ [instagram] Extract duration (#28469)
|
||||||
|
* [instagram] Improve title extraction (#28469)
|
||||||
|
+ [sbs] Add support for ondemand watch URLs (#28566)
|
||||||
|
* [youtube] Fix video's channel extraction (#28562)
|
||||||
|
* [picarto] Fix live stream extraction (#28532)
|
||||||
|
* [vimeo] Fix unlisted video extraction (#28414)
|
||||||
|
* [youtube:tab] Fix playlist/community continuation items extraction (#28266)
|
||||||
|
* [ard] Improve clip id extraction (#22724, #28528)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.03.25
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [zoom] Add support for zoom.us (#16597, #27002, #28531)
|
||||||
|
* [bbc] Fix BBC IPlayer Episodes/Group extraction (#28360)
|
||||||
|
* [youtube] Fix default value for youtube_include_dash_manifest (#28523)
|
||||||
|
* [zingmp3] Fix extraction (#11589, #16409, #16968, #27205)
|
||||||
|
+ [vgtv] Add support for new tv.aftonbladet.se URL schema (#28514)
|
||||||
|
+ [tiktok] Detect private videos (#28453)
|
||||||
|
* [vimeo:album] Fix extraction for albums with number of videos multiple
|
||||||
|
to page size (#28486)
|
||||||
|
* [vvvvid] Fix kenc format extraction (#28473)
|
||||||
|
* [mlb] Fix video extraction (#21241)
|
||||||
|
* [svtplay] Improve extraction (#28448)
|
||||||
|
* [applepodcasts] Fix extraction (#28445)
|
||||||
|
* [rtve] Improve extraction
|
||||||
|
+ Extract all formats
|
||||||
|
* Fix RTVE Infantil extraction (#24851)
|
||||||
|
+ Extract is_live and series
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.03.14
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ Introduce release_timestamp meta field (#28386)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [southpark] Add support for southparkstudios.com (#28413)
|
||||||
|
* [southpark] Fix extraction (#26763, #28413)
|
||||||
|
* [sportdeutschland] Fix extraction (#21856, #28425)
|
||||||
|
* [pinterest] Reduce the number of HLS format requests
|
||||||
|
* [peertube] Improve thumbnail extraction (#28419)
|
||||||
|
* [tver] Improve title extraction (#28418)
|
||||||
|
* [fujitv] Fix HLS formats extension (#28416)
|
||||||
|
* [shahid] Fix format extraction (#28383)
|
||||||
|
+ [lbry] Add support for channel filters (#28385)
|
||||||
|
+ [bandcamp] Extract release timestamp
|
||||||
|
+ [lbry] Extract release timestamp (#28386)
|
||||||
|
* [pornhub] Detect flagged videos
|
||||||
|
+ [pornhub] Extract formats from get_media end point (#28395)
|
||||||
|
* [bilibili] Fix video info extraction (#28341)
|
||||||
|
+ [cbs] Add support for Paramount+ (#28342)
|
||||||
|
+ [trovo] Add Origin header to VOD formats (#28346)
|
||||||
|
* [voxmedia] Fix volume embed extraction (#28338)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.03.03
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [youtube:tab] Switch continuation to browse API (#28289, #28327)
|
||||||
|
* [9c9media] Fix extraction for videos with multiple ContentPackages (#28309)
|
||||||
|
+ [bbc] Add support for BBC Reel videos (#21870, #23660, #28268)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.03.02
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [zdf] Rework extractors (#11606, #13473, #17354, #21185, #26711, #27068,
|
||||||
|
#27930, #28198, #28199, #28274)
|
||||||
|
* Generalize cross-extractor video ids for zdf based extractors
|
||||||
|
* Improve extraction
|
||||||
|
* Fix 3sat and phoenix
|
||||||
|
* [stretchinternet] Fix extraction (#28297)
|
||||||
|
* [urplay] Fix episode data extraction (#28292)
|
||||||
|
+ [bandaichannel] Add support for b-ch.com (#21404)
|
||||||
|
* [srgssr] Improve extraction (#14717, #14725, #27231, #28238)
|
||||||
|
+ Extract subtitle
|
||||||
|
* Fix extraction for new videos
|
||||||
|
* Update srf download domains
|
||||||
|
* [vvvvid] Reduce season request payload size
|
||||||
|
+ [vvvvid] Extract series sublists playlist title (#27601, #27618)
|
||||||
|
+ [dplay] Extract Ad-Free uplynk URLs (#28160)
|
||||||
|
+ [wat] Detect DRM protected videos (#27958)
|
||||||
|
* [tf1] Improve extraction (#27980, #28040)
|
||||||
|
* [tmz] Fix and improve extraction (#24603, #24687, 28211)
|
||||||
|
+ [gedidigital] Add support for Gedi group sites (#7347, #26946)
|
||||||
|
* [youtube] Fix get_video_info request
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.02.22
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [postprocessor/embedthumbnail] Recognize atomicparsley binary in lowercase
|
||||||
|
(#28112)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [apa] Fix and improve extraction (#27750)
|
||||||
|
+ [youporn] Extract duration (#28019)
|
||||||
|
+ [peertube] Add support for canard.tube (#28190)
|
||||||
|
* [youtube] Fixup m4a_dash formats (#28165)
|
||||||
|
+ [samplefocus] Add support for samplefocus.com (#27763)
|
||||||
|
+ [vimeo] Add support for unlisted video source format extraction
|
||||||
|
* [viki] Improve extraction (#26522, #28203)
|
||||||
|
* Extract uploader URL and episode number
|
||||||
|
* Report login required error
|
||||||
|
+ Extract 480p formats
|
||||||
|
* Fix API v4 calls
|
||||||
|
* [ninegag] Unescape title (#28201)
|
||||||
|
* [youtube] Improve URL regular expression (#28193)
|
||||||
|
+ [youtube] Add support for redirect.invidious.io (#28193)
|
||||||
|
+ [dplay] Add support for de.hgtv.com (#28182)
|
||||||
|
+ [dplay] Add support for discoveryplus.com (#24698)
|
||||||
|
+ [simplecast] Add support for simplecast.com (#24107)
|
||||||
|
* [youtube] Fix uploader extraction in flat playlist mode (#28045)
|
||||||
|
* [yandexmusic:playlist] Request missing tracks in chunks (#27355, #28184)
|
||||||
|
+ [storyfire] Add support for storyfire.com (#25628, #26349)
|
||||||
|
+ [zhihu] Add support for zhihu.com (#28177)
|
||||||
|
* [youtube] Fix controversial videos when authenticated with cookies (#28174)
|
||||||
|
* [ccma] Fix timestamp parsing in python 2
|
||||||
|
+ [videopress] Add support for video.wordpress.com
|
||||||
|
* [kakao] Improve info extraction and detect geo restriction (#26577)
|
||||||
|
* [xboxclips] Fix extraction (#27151)
|
||||||
|
* [ard] Improve formats extraction (#28155)
|
||||||
|
+ [canvas] Add support for dagelijksekost.een.be (#28119)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.02.10
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [youtube:tab] Improve grid continuation extraction (#28130)
|
||||||
|
* [ign] Fix extraction (#24771)
|
||||||
|
+ [xhamster] Extract format filesize
|
||||||
|
+ [xhamster] Extract formats from xplayer settings (#28114)
|
||||||
|
+ [youtube] Add support phone/tablet JS player (#26424)
|
||||||
|
* [archiveorg] Fix and improve extraction (#21330, #23586, #25277, #26780,
|
||||||
|
#27109, #27236, #28063)
|
||||||
|
+ [cda] Detect geo restricted videos (#28106)
|
||||||
|
* [urplay] Fix extraction (#28073, #28074)
|
||||||
|
* [youtube] Fix release date extraction (#28094)
|
||||||
|
+ [youtube] Extract abr and vbr (#28100)
|
||||||
|
* [youtube] Skip OTF formats (#28070)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.02.04.1
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [youtube] Prefer DASH formats (#28070)
|
||||||
|
* [azmedien] Fix extraction (#28064)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.02.04
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [pornhub] Implement lazy playlist extraction
|
||||||
|
* [svtplay] Fix video id extraction (#28058)
|
||||||
|
+ [pornhub] Add support for authentication (#18797, #21416, #24294)
|
||||||
|
* [pornhub:user] Improve paging
|
||||||
|
+ [pornhub:user] Add support for URLs unavailable via /videos page (#27853)
|
||||||
|
+ [bravotv] Add support for oxygen.com (#13357, #22500)
|
||||||
|
+ [youtube] Pass embed URL to get_video_info request
|
||||||
|
* [ccma] Improve metadata extraction (#27994)
|
||||||
|
+ Extract age limit, alt title, categories, series and episode number
|
||||||
|
* Fix timestamp multiple subtitles extraction
|
||||||
|
* [egghead] Update API domain (#28038)
|
||||||
|
- [vidzi] Remove extractor (#12629)
|
||||||
|
* [vidio] Improve metadata extraction
|
||||||
|
* [youtube] Improve subtitles extraction
|
||||||
|
* [youtube] Fix chapter extraction fallback
|
||||||
|
* [youtube] Rewrite extractor
|
||||||
|
* Improve format sorting
|
||||||
|
* Remove unused code
|
||||||
|
* Fix series metadata extraction
|
||||||
|
* Fix trailer video extraction
|
||||||
|
* Improve error reporting
|
||||||
|
+ Extract video location
|
||||||
|
+ [vvvvid] Add support for youtube embeds (#27825)
|
||||||
|
* [googledrive] Report download page errors (#28005)
|
||||||
|
* [vlive] Fix error message decoding for python 2 (#28004)
|
||||||
|
* [youtube] Improve DASH formats file size extraction
|
||||||
|
* [cda] Improve birth validation detection (#14022, #27929)
|
||||||
|
+ [awaan] Extract uploader id (#27963)
|
||||||
|
+ [medialaan] Add support DPG Media MyChannels based websites (#14871, #15597,
|
||||||
|
#16106, #16489)
|
||||||
|
* [abcnews] Fix extraction (#12394, #27920)
|
||||||
|
* [AMP] Fix upload date and timestamp extraction (#27970)
|
||||||
|
* [tv4] Relax URL regular expression (#27964)
|
||||||
|
+ [tv2] Add support for mtvuutiset.fi (#27744)
|
||||||
|
* [adn] Improve login warning reporting
|
||||||
|
* [zype] Fix uplynk id extraction (#27956)
|
||||||
|
+ [adn] Add support for authentication (#17091, #27841, #27937)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.01.24.1
|
||||||
|
|
||||||
|
Core
|
||||||
|
* Introduce --output-na-placeholder (#27896)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [franceculture] Make thumbnail optional (#18807)
|
||||||
|
* [franceculture] Fix extraction (#27891, #27903)
|
||||||
|
* [njpwworld] Fix extraction (#27890)
|
||||||
|
* [comedycentral] Fix extraction (#27905)
|
||||||
|
* [wat] Fix format extraction (#27901)
|
||||||
|
+ [americastestkitchen:season] Add support for seasons (#27861)
|
||||||
|
+ [trovo] Add support for trovo.live (#26125)
|
||||||
|
+ [aol] Add support for yahoo videos (#26650)
|
||||||
|
* [yahoo] Fix single video extraction
|
||||||
|
* [lbry] Unescape lbry URI (#27872)
|
||||||
|
* [9gag] Fix and improve extraction (#23022)
|
||||||
|
* [americastestkitchen] Improve metadata extraction for ATK episodes (#27860)
|
||||||
|
* [aljazeera] Fix extraction (#20911, #27779)
|
||||||
|
+ [minds] Add support for minds.com (#17934)
|
||||||
|
* [ard] Fix title and description extraction (#27761)
|
||||||
|
+ [spotify] Add support for Spotify Podcasts (#27443)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.01.16
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [YoutubeDL] Protect from infinite recursion due to recursively nested
|
||||||
|
playlists (#27833)
|
||||||
|
* [YoutubeDL] Ignore failure to create existing directory (#27811)
|
||||||
|
* [YoutubeDL] Raise syntax error for format selection expressions with multiple
|
||||||
|
+ operators (#27803)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [animeondemand] Add support for lazy playlist extraction (#27829)
|
||||||
|
* [youporn] Restrict fallback download URL (#27822)
|
||||||
|
* [youporn] Improve height and tbr extraction (#20425, #23659)
|
||||||
|
* [youporn] Fix extraction (#27822)
|
||||||
|
+ [twitter] Add support for unified cards (#27826)
|
||||||
|
+ [twitch] Add Authorization header with OAuth token for GraphQL requests
|
||||||
|
(#27790)
|
||||||
|
* [mixcloud:playlist:base] Extract video id in flat playlist mode (#27787)
|
||||||
|
* [cspan] Improve info extraction (#27791)
|
||||||
|
* [adn] Improve info extraction
|
||||||
|
* [adn] Fix extraction (#26963, #27732)
|
||||||
|
* [youtube:search] Extract from all sections (#27604)
|
||||||
|
* [youtube:search] fix viewcount and try to extract all video sections (#27604)
|
||||||
|
* [twitch] Improve login error extraction
|
||||||
|
* [twitch] Fix authentication (#27743)
|
||||||
|
* [3qsdn] Improve extraction (#21058)
|
||||||
|
* [peertube] Extract formats from streamingPlaylists (#26002, #27586, #27728)
|
||||||
|
* [khanacademy] Fix extraction (#2887, #26803)
|
||||||
|
* [spike] Update Paramount Network feed URL (#27715)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.01.08
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [downloader/hls] Disable decryption in tests (#27660)
|
||||||
|
+ [utils] Add a function to clean podcast URLs
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [rai] Improve subtitles extraction (#27698, #27705)
|
||||||
|
* [canvas] Match only supported VRT NU URLs (#27707)
|
||||||
|
+ [bibeltv] Add support for bibeltv.de (#14361)
|
||||||
|
+ [bfmtv] Add support for bfmtv.com (#16053, #26615)
|
||||||
|
+ [sbs] Add support for ondemand play and news embed URLs (#17650, #27629)
|
||||||
|
* [twitch] Drop legacy kraken API v5 code altogether and refactor
|
||||||
|
* [twitch:vod] Switch to GraphQL for video metadata
|
||||||
|
* [canvas] Fix VRT NU extraction (#26957, #27053)
|
||||||
|
* [twitch] Switch access token to GraphQL and refactor (#27646)
|
||||||
|
+ [rai] Detect ContentItem in iframe (#12652, #27673)
|
||||||
|
* [ketnet] Fix extraction (#27662)
|
||||||
|
+ [dplay] Add suport Discovery+ domains (#27680)
|
||||||
|
* [motherless] Improve extraction (#26495, #27450)
|
||||||
|
* [motherless] Fix recent videos upload date extraction (#27661)
|
||||||
|
* [nrk] Fix extraction for videos without a legalAge rating
|
||||||
|
- [googleplus] Remove extractor (#4955, #7400)
|
||||||
|
+ [applepodcasts] Add support for podcasts.apple.com (#25918)
|
||||||
|
+ [googlepodcasts] Add support for podcasts.google.com
|
||||||
|
+ [iheart] Add support for iheart.com (#27037)
|
||||||
|
* [acast] Clean podcast URLs
|
||||||
|
* [stitcher] Clean podcast URLs
|
||||||
|
+ [xfileshare] Add support for aparat.cam (#27651)
|
||||||
|
+ [twitter] Add support for summary card (#25121)
|
||||||
|
* [twitter] Try to use a Generic fallback for unknown twitter cards (#25982)
|
||||||
|
+ [stitcher] Add support for shows and show metadata extraction (#20510)
|
||||||
|
* [stv] Improve episode id extraction (#23083)
|
||||||
|
|
||||||
|
|
||||||
|
version 2021.01.03
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [nrk] Improve series metadata extraction (#27473)
|
||||||
|
+ [nrk] Extract subtitles
|
||||||
|
* [nrk] Fix age limit extraction
|
||||||
|
* [nrk] Improve video id extraction
|
||||||
|
+ [nrk] Add support for podcasts (#27634, #27635)
|
||||||
|
* [nrk] Generalize and delegate all item extractors to nrk
|
||||||
|
+ [nrk] Add support for mp3 formats
|
||||||
|
* [nrktv] Switch to playback endpoint
|
||||||
|
* [vvvvid] Fix season metadata extraction (#18130)
|
||||||
|
* [stitcher] Fix extraction (#20811, #27606)
|
||||||
|
* [acast] Fix extraction (#21444, #27612, #27613)
|
||||||
|
+ [arcpublishing] Add support for arcpublishing.com (#2298, #9340, #17200)
|
||||||
|
+ [sky] Add support for Sports News articles and Brighcove videos (#13054)
|
||||||
|
+ [vvvvid] Extract akamai formats
|
||||||
|
* [vvvvid] Skip unplayable episodes (#27599)
|
||||||
|
* [yandexvideo] Fix extraction for Python 3.4
|
||||||
|
|
||||||
|
|
||||||
|
version 2020.12.31
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [utils] Accept only supported protocols in url_or_none
|
||||||
|
* [YoutubeDL] Allow format filtering using audio language (#16209)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [redditr] Extract all thumbnails (#27503)
|
||||||
|
* [vvvvid] Improve info extraction
|
||||||
|
+ [vvvvid] Add support for playlists (#18130, #27574)
|
||||||
|
+ [yandexdisk] Extract info from webpage
|
||||||
|
* [yandexdisk] Fix extraction (#17861, #27131)
|
||||||
|
* [yandexvideo] Use old API call as fallback
|
||||||
|
* [yandexvideo] Fix extraction (#25000)
|
||||||
|
- [nbc] Remove CSNNE extractor
|
||||||
|
* [nbc] Fix NBCSport VPlayer URL extraction (#16640)
|
||||||
|
+ [aenetworks] Add support for biography.com (#3863)
|
||||||
|
* [uktvplay] Match new video URLs (#17909)
|
||||||
|
* [sevenplay] Detect API errors
|
||||||
|
* [tenplay] Fix format extraction (#26653)
|
||||||
|
* [brightcove] Raise error for DRM protected videos (#23467, #27568)
|
||||||
|
|
||||||
|
|
||||||
|
version 2020.12.29
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [youtube] Improve yt initial data extraction (#27524)
|
||||||
|
* [youtube:tab] Improve URL matching #27559)
|
||||||
|
* [youtube:tab] Restore retry on browse requests (#27313, #27564)
|
||||||
|
* [aparat] Fix extraction (#22285, #22611, #23348, #24354, #24591, #24904,
|
||||||
|
#25418, #26070, #26350, #26738, #27563)
|
||||||
|
- [brightcove] Remove sonyliv specific code
|
||||||
|
* [piksel] Improve format extraction
|
||||||
|
+ [zype] Add support for uplynk videos
|
||||||
|
+ [toggle] Add support for live.mewatch.sg (#27555)
|
||||||
|
+ [go] Add support for fxnow.fxnetworks.com (#13972, #22467, #23754, #26826)
|
||||||
|
* [teachable] Improve embed detection (#26923)
|
||||||
|
* [mitele] Fix free video extraction (#24624, #25827, #26757)
|
||||||
|
* [telecinco] Fix extraction
|
||||||
|
* [youtube] Update invidious.snopyta.org (#22667)
|
||||||
|
* [amcnetworks] Improve auth only video detection (#27548)
|
||||||
|
+ [generic] Add support for VHX Embeds (#27546)
|
||||||
|
|
||||||
|
|
||||||
|
version 2020.12.26
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [instagram] Fix comment count extraction
|
||||||
|
+ [instagram] Add support for reel URLs (#26234, #26250)
|
||||||
|
* [bbc] Switch to media selector v6 (#23232, #23933, #26303, #26432, #26821,
|
||||||
|
#27538)
|
||||||
|
* [instagram] Improve thumbnail extraction
|
||||||
|
* [instagram] Fix extraction when authenticated (#22880, #26377, #26981,
|
||||||
|
#27422)
|
||||||
|
* [spankbang:playlist] Fix extraction (#24087)
|
||||||
|
+ [spankbang] Add support for playlist videos
|
||||||
|
* [pornhub] Improve like and dislike count extraction (#27356)
|
||||||
|
* [pornhub] Fix lq formats extraction (#27386, #27393)
|
||||||
|
+ [bongacams] Add support for bongacams.com (#27440)
|
||||||
|
* [youtube:tab] Extend URL regular expression (#27501)
|
||||||
|
* [theweatherchannel] Fix extraction (#25930, #26051)
|
||||||
|
+ [sprout] Add support for Universal Kids (#22518)
|
||||||
|
* [theplatform] Allow passing geo bypass countries from other extractors
|
||||||
|
+ [wistia] Add support for playlists (#27533)
|
||||||
|
+ [ctv] Add support for ctv.ca (#27525)
|
||||||
|
* [9c9media] Improve info extraction
|
||||||
|
* [youtube] Fix automatic captions extraction (#27162, #27388)
|
||||||
|
* [sonyliv] Fix title for movies
|
||||||
|
* [sonyliv] Fix extraction (#25667)
|
||||||
|
* [streetvoice] Fix extraction (#27455, #27492)
|
||||||
|
+ [facebook] Add support for watchparty pages (#27507)
|
||||||
|
* [cbslocal] Fix video extraction
|
||||||
|
+ [brightcove] Add another method to extract policyKey
|
||||||
|
* [mewatch] Relax URL regular expression (#27506)
|
||||||
|
|
||||||
|
|
||||||
|
version 2020.12.22
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [common] Remove unwanted query params from unsigned akamai manifest URLs
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
- [tastytrade] Remove extractor (#25716)
|
||||||
|
* [niconico] Fix playlist extraction (#27428)
|
||||||
|
- [everyonesmixtape] Remove extractor
|
||||||
|
- [kanalplay] Remove extractor
|
||||||
|
* [arkena] Fix extraction
|
||||||
|
* [nba] Rewrite extractor
|
||||||
|
* [turner] Improve info extraction
|
||||||
|
* [youtube] Improve xsrf token extraction (#27442)
|
||||||
|
* [generic] Improve RSS age limit extraction
|
||||||
|
* [generic] Fix RSS itunes thumbnail extraction (#27405)
|
||||||
|
+ [redditr] Extract duration (#27426)
|
||||||
|
- [zaq1] Remove extractor
|
||||||
|
+ [asiancrush] Add support for retrocrush.tv
|
||||||
|
* [asiancrush] Fix extraction
|
||||||
|
- [noco] Remove extractor (#10864)
|
||||||
|
* [nfl] Fix extraction (#22245)
|
||||||
|
* [skysports] Relax URL regular expression (#27435)
|
||||||
|
+ [tv5unis] Add support for tv5unis.ca (#22399, #24890)
|
||||||
|
+ [videomore] Add support for more.tv (#27088)
|
||||||
|
+ [yandexmusic] Add support for music.yandex.com (#27425)
|
||||||
|
+ [nhk:program] Add support for audio programs and program clips
|
||||||
|
+ [nhk] Add support for NHK video programs (#27230)
|
||||||
|
|
||||||
|
|
||||||
|
version 2020.12.14
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [extractor/common] Improve JSON-LD interaction statistic extraction (#23306)
|
||||||
|
* [downloader/hls] Delegate manifests with media initialization to ffmpeg
|
||||||
|
+ [extractor/common] Document duration meta field for playlists
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [mdr] Bypass geo restriction
|
||||||
|
* [mdr] Improve extraction (#24346, #26873)
|
||||||
|
* [yandexmusic:album] Improve album title extraction (#27418)
|
||||||
|
* [eporner] Fix view count extraction and make optional (#23306)
|
||||||
|
+ [eporner] Extend URL regular expression
|
||||||
|
* [eporner] Fix hash extraction and extend _VALID_URL (#27396)
|
||||||
|
* [slideslive] Use m3u8 entry protocol for m3u8 formats (#27400)
|
||||||
|
* [twitcasting] Fix format extraction and improve info extraction (#24868)
|
||||||
|
* [linuxacademy] Fix authentication and extraction (#21129, #26223, #27402)
|
||||||
|
* [itv] Clean description from HTML tags (#27399)
|
||||||
|
* [vlive] Sort live formats (#27404)
|
||||||
|
* [hotstart] Fix and improve extraction
|
||||||
|
* Fix format extraction (#26690)
|
||||||
|
+ Extract thumbnail URL (#16079, #20412)
|
||||||
|
+ Add support for country specific playlist URLs (#23496)
|
||||||
|
* Select the last id in video URL (#26412)
|
||||||
|
+ [youtube] Add some invidious instances (#27373)
|
||||||
|
|
||||||
|
|
||||||
version 2020.12.12
|
version 2020.12.12
|
||||||
|
|
||||||
Core
|
Core
|
||||||
@@ -106,7 +689,7 @@ version 2020.12.02
|
|||||||
|
|
||||||
Extractors
|
Extractors
|
||||||
+ [tva] Add support for qub.ca (#27235)
|
+ [tva] Add support for qub.ca (#27235)
|
||||||
+ [toggle] Detect DRM protected videos (closes #16479)(closes #20805)
|
+ [toggle] Detect DRM protected videos (#16479, #20805)
|
||||||
+ [toggle] Add support for new MeWatch URLs (#27256)
|
+ [toggle] Add support for new MeWatch URLs (#27256)
|
||||||
* [youtube:tab] Extract channels only from channels tab (#27266)
|
* [youtube:tab] Extract channels only from channels tab (#27266)
|
||||||
+ [cspan] Extract info from jwplayer data (#3672, #3734, #10638, #13030,
|
+ [cspan] Extract info from jwplayer data (#3672, #3734, #10638, #13030,
|
||||||
|
|||||||
781
README.md
781
README.md
@@ -1,4 +1,5 @@
|
|||||||
[](https://travis-ci.com/ytdl-org/youtube-dl)
|
[](https://github.com/ytdl-org/youtube-dl/actions?query=workflow%3ACI)
|
||||||
|
|
||||||
|
|
||||||
youtube-dl - download videos from youtube.com or other video platforms
|
youtube-dl - download videos from youtube.com or other video platforms
|
||||||
|
|
||||||
@@ -51,394 +52,431 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo
|
|||||||
youtube-dl [OPTIONS] URL [URL...]
|
youtube-dl [OPTIONS] URL [URL...]
|
||||||
|
|
||||||
# OPTIONS
|
# OPTIONS
|
||||||
-h, --help Print this help text and exit
|
-h, --help Print this help text and exit
|
||||||
--version Print program version and exit
|
--version Print program version and exit
|
||||||
-U, --update Update this program to latest version. Make
|
-U, --update Update this program to latest version.
|
||||||
sure that you have sufficient permissions
|
Make sure that you have sufficient
|
||||||
(run with sudo if needed)
|
permissions (run with sudo if needed)
|
||||||
-i, --ignore-errors Continue on download errors, for example to
|
-i, --ignore-errors Continue on download errors, for
|
||||||
skip unavailable videos in a playlist
|
example to skip unavailable videos in a
|
||||||
--abort-on-error Abort downloading of further videos (in the
|
playlist
|
||||||
playlist or the command line) if an error
|
--abort-on-error Abort downloading of further videos (in
|
||||||
occurs
|
the playlist or the command line) if an
|
||||||
--dump-user-agent Display the current browser identification
|
error occurs
|
||||||
--list-extractors List all supported extractors
|
--dump-user-agent Display the current browser
|
||||||
--extractor-descriptions Output descriptions of all supported
|
identification
|
||||||
extractors
|
--list-extractors List all supported extractors
|
||||||
--force-generic-extractor Force extraction to use the generic
|
--extractor-descriptions Output descriptions of all supported
|
||||||
extractor
|
extractors
|
||||||
--default-search PREFIX Use this prefix for unqualified URLs. For
|
--force-generic-extractor Force extraction to use the generic
|
||||||
example "gvsearch2:" downloads two videos
|
extractor
|
||||||
from google videos for youtube-dl "large
|
--default-search PREFIX Use this prefix for unqualified URLs.
|
||||||
apple". Use the value "auto" to let
|
For example "gvsearch2:" downloads two
|
||||||
youtube-dl guess ("auto_warning" to emit a
|
videos from google videos for youtube-
|
||||||
warning when guessing). "error" just throws
|
dl "large apple". Use the value "auto"
|
||||||
an error. The default value "fixup_error"
|
to let youtube-dl guess ("auto_warning"
|
||||||
repairs broken URLs, but emits an error if
|
to emit a warning when guessing).
|
||||||
this is not possible instead of searching.
|
"error" just throws an error. The
|
||||||
--ignore-config Do not read configuration files. When given
|
default value "fixup_error" repairs
|
||||||
in the global configuration file
|
broken URLs, but emits an error if this
|
||||||
/etc/youtube-dl.conf: Do not read the user
|
is not possible instead of searching.
|
||||||
configuration in ~/.config/youtube-
|
--ignore-config Do not read configuration files. When
|
||||||
dl/config (%APPDATA%/youtube-dl/config.txt
|
given in the global configuration file
|
||||||
on Windows)
|
/etc/youtube-dl.conf: Do not read the
|
||||||
--config-location PATH Location of the configuration file; either
|
user configuration in
|
||||||
the path to the config or its containing
|
~/.config/youtube-dl/config
|
||||||
directory.
|
(%APPDATA%/youtube-dl/config.txt on
|
||||||
--flat-playlist Do not extract the videos of a playlist,
|
Windows)
|
||||||
only list them.
|
--config-location PATH Location of the configuration file;
|
||||||
--mark-watched Mark videos watched (YouTube only)
|
either the path to the config or its
|
||||||
--no-mark-watched Do not mark videos watched (YouTube only)
|
containing directory.
|
||||||
--no-color Do not emit color codes in output
|
--flat-playlist Do not extract the videos of a
|
||||||
|
playlist, only list them.
|
||||||
|
--mark-watched Mark videos watched (YouTube only)
|
||||||
|
--no-mark-watched Do not mark videos watched (YouTube
|
||||||
|
only)
|
||||||
|
--no-color Do not emit color codes in output
|
||||||
|
|
||||||
## Network Options:
|
## Network Options:
|
||||||
--proxy URL Use the specified HTTP/HTTPS/SOCKS proxy.
|
--proxy URL Use the specified HTTP/HTTPS/SOCKS
|
||||||
To enable SOCKS proxy, specify a proper
|
proxy. To enable SOCKS proxy, specify a
|
||||||
scheme. For example
|
proper scheme. For example
|
||||||
socks5://127.0.0.1:1080/. Pass in an empty
|
socks5://127.0.0.1:1080/. Pass in an
|
||||||
string (--proxy "") for direct connection
|
empty string (--proxy "") for direct
|
||||||
--socket-timeout SECONDS Time to wait before giving up, in seconds
|
connection
|
||||||
--source-address IP Client-side IP address to bind to
|
--socket-timeout SECONDS Time to wait before giving up, in
|
||||||
-4, --force-ipv4 Make all connections via IPv4
|
seconds
|
||||||
-6, --force-ipv6 Make all connections via IPv6
|
--source-address IP Client-side IP address to bind to
|
||||||
|
-4, --force-ipv4 Make all connections via IPv4
|
||||||
|
-6, --force-ipv6 Make all connections via IPv6
|
||||||
|
|
||||||
## Geo Restriction:
|
## Geo Restriction:
|
||||||
--geo-verification-proxy URL Use this proxy to verify the IP address for
|
--geo-verification-proxy URL Use this proxy to verify the IP address
|
||||||
some geo-restricted sites. The default
|
for some geo-restricted sites. The
|
||||||
proxy specified by --proxy (or none, if the
|
default proxy specified by --proxy (or
|
||||||
option is not present) is used for the
|
none, if the option is not present) is
|
||||||
actual downloading.
|
used for the actual downloading.
|
||||||
--geo-bypass Bypass geographic restriction via faking
|
--geo-bypass Bypass geographic restriction via
|
||||||
X-Forwarded-For HTTP header
|
faking X-Forwarded-For HTTP header
|
||||||
--no-geo-bypass Do not bypass geographic restriction via
|
--no-geo-bypass Do not bypass geographic restriction
|
||||||
faking X-Forwarded-For HTTP header
|
via faking X-Forwarded-For HTTP header
|
||||||
--geo-bypass-country CODE Force bypass geographic restriction with
|
--geo-bypass-country CODE Force bypass geographic restriction
|
||||||
explicitly provided two-letter ISO 3166-2
|
with explicitly provided two-letter ISO
|
||||||
country code
|
3166-2 country code
|
||||||
--geo-bypass-ip-block IP_BLOCK Force bypass geographic restriction with
|
--geo-bypass-ip-block IP_BLOCK Force bypass geographic restriction
|
||||||
explicitly provided IP block in CIDR
|
with explicitly provided IP block in
|
||||||
notation
|
CIDR notation
|
||||||
|
|
||||||
## Video Selection:
|
## Video Selection:
|
||||||
--playlist-start NUMBER Playlist video to start at (default is 1)
|
--playlist-start NUMBER Playlist video to start at (default is
|
||||||
--playlist-end NUMBER Playlist video to end at (default is last)
|
1)
|
||||||
--playlist-items ITEM_SPEC Playlist video items to download. Specify
|
--playlist-end NUMBER Playlist video to end at (default is
|
||||||
indices of the videos in the playlist
|
last)
|
||||||
separated by commas like: "--playlist-items
|
--playlist-items ITEM_SPEC Playlist video items to download.
|
||||||
1,2,5,8" if you want to download videos
|
Specify indices of the videos in the
|
||||||
indexed 1, 2, 5, 8 in the playlist. You can
|
playlist separated by commas like: "--
|
||||||
specify range: "--playlist-items
|
playlist-items 1,2,5,8" if you want to
|
||||||
1-3,7,10-13", it will download the videos
|
download videos indexed 1, 2, 5, 8 in
|
||||||
at index 1, 2, 3, 7, 10, 11, 12 and 13.
|
the playlist. You can specify range: "
|
||||||
--match-title REGEX Download only matching titles (regex or
|
--playlist-items 1-3,7,10-13", it will
|
||||||
caseless sub-string)
|
download the videos at index 1, 2, 3,
|
||||||
--reject-title REGEX Skip download for matching titles (regex or
|
7, 10, 11, 12 and 13.
|
||||||
caseless sub-string)
|
--match-title REGEX Download only matching titles (regex or
|
||||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
caseless sub-string)
|
||||||
--min-filesize SIZE Do not download any videos smaller than
|
--reject-title REGEX Skip download for matching titles
|
||||||
SIZE (e.g. 50k or 44.6m)
|
(regex or caseless sub-string)
|
||||||
--max-filesize SIZE Do not download any videos larger than SIZE
|
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||||
(e.g. 50k or 44.6m)
|
--min-filesize SIZE Do not download any videos smaller than
|
||||||
--date DATE Download only videos uploaded in this date
|
SIZE (e.g. 50k or 44.6m)
|
||||||
--datebefore DATE Download only videos uploaded on or before
|
--max-filesize SIZE Do not download any videos larger than
|
||||||
this date (i.e. inclusive)
|
SIZE (e.g. 50k or 44.6m)
|
||||||
--dateafter DATE Download only videos uploaded on or after
|
--date DATE Download only videos uploaded in this
|
||||||
this date (i.e. inclusive)
|
date
|
||||||
--min-views COUNT Do not download any videos with less than
|
--datebefore DATE Download only videos uploaded on or
|
||||||
COUNT views
|
before this date (i.e. inclusive)
|
||||||
--max-views COUNT Do not download any videos with more than
|
--dateafter DATE Download only videos uploaded on or
|
||||||
COUNT views
|
after this date (i.e. inclusive)
|
||||||
--match-filter FILTER Generic video filter. Specify any key (see
|
--min-views COUNT Do not download any videos with less
|
||||||
the "OUTPUT TEMPLATE" for a list of
|
than COUNT views
|
||||||
available keys) to match if the key is
|
--max-views COUNT Do not download any videos with more
|
||||||
present, !key to check if the key is not
|
than COUNT views
|
||||||
present, key > NUMBER (like "comment_count
|
--match-filter FILTER Generic video filter. Specify any key
|
||||||
> 12", also works with >=, <, <=, !=, =) to
|
(see the "OUTPUT TEMPLATE" for a list
|
||||||
compare against a number, key = 'LITERAL'
|
of available keys) to match if the key
|
||||||
(like "uploader = 'Mike Smith'", also works
|
is present, !key to check if the key is
|
||||||
with !=) to match against a string literal
|
not present, key > NUMBER (like
|
||||||
and & to require multiple matches. Values
|
"comment_count > 12", also works with
|
||||||
which are not known are excluded unless you
|
>=, <, <=, !=, =) to compare against a
|
||||||
put a question mark (?) after the operator.
|
number, key = 'LITERAL' (like "uploader
|
||||||
For example, to only match videos that have
|
= 'Mike Smith'", also works with !=) to
|
||||||
been liked more than 100 times and disliked
|
match against a string literal and & to
|
||||||
less than 50 times (or the dislike
|
require multiple matches. Values which
|
||||||
functionality is not available at the given
|
are not known are excluded unless you
|
||||||
service), but who also have a description,
|
put a question mark (?) after the
|
||||||
use --match-filter "like_count > 100 &
|
operator. For example, to only match
|
||||||
dislike_count <? 50 & description" .
|
videos that have been liked more than
|
||||||
--no-playlist Download only the video, if the URL refers
|
100 times and disliked less than 50
|
||||||
to a video and a playlist.
|
times (or the dislike functionality is
|
||||||
--yes-playlist Download the playlist, if the URL refers to
|
not available at the given service),
|
||||||
a video and a playlist.
|
but who also have a description, use
|
||||||
--age-limit YEARS Download only videos suitable for the given
|
--match-filter "like_count > 100 &
|
||||||
age
|
dislike_count <? 50 & description" .
|
||||||
--download-archive FILE Download only videos not listed in the
|
--no-playlist Download only the video, if the URL
|
||||||
archive file. Record the IDs of all
|
refers to a video and a playlist.
|
||||||
downloaded videos in it.
|
--yes-playlist Download the playlist, if the URL
|
||||||
--include-ads Download advertisements as well
|
refers to a video and a playlist.
|
||||||
(experimental)
|
--age-limit YEARS Download only videos suitable for the
|
||||||
|
given age
|
||||||
|
--download-archive FILE Download only videos not listed in the
|
||||||
|
archive file. Record the IDs of all
|
||||||
|
downloaded videos in it.
|
||||||
|
--include-ads Download advertisements as well
|
||||||
|
(experimental)
|
||||||
|
|
||||||
## Download Options:
|
## Download Options:
|
||||||
-r, --limit-rate RATE Maximum download rate in bytes per second
|
-r, --limit-rate RATE Maximum download rate in bytes per
|
||||||
(e.g. 50K or 4.2M)
|
second (e.g. 50K or 4.2M)
|
||||||
-R, --retries RETRIES Number of retries (default is 10), or
|
-R, --retries RETRIES Number of retries (default is 10), or
|
||||||
"infinite".
|
"infinite".
|
||||||
--fragment-retries RETRIES Number of retries for a fragment (default
|
--fragment-retries RETRIES Number of retries for a fragment
|
||||||
is 10), or "infinite" (DASH, hlsnative and
|
(default is 10), or "infinite" (DASH,
|
||||||
ISM)
|
hlsnative and ISM)
|
||||||
--skip-unavailable-fragments Skip unavailable fragments (DASH, hlsnative
|
--skip-unavailable-fragments Skip unavailable fragments (DASH,
|
||||||
and ISM)
|
hlsnative and ISM)
|
||||||
--abort-on-unavailable-fragment Abort downloading when some fragment is not
|
--abort-on-unavailable-fragment Abort downloading when some fragment is
|
||||||
available
|
not available
|
||||||
--keep-fragments Keep downloaded fragments on disk after
|
--keep-fragments Keep downloaded fragments on disk after
|
||||||
downloading is finished; fragments are
|
downloading is finished; fragments are
|
||||||
erased by default
|
erased by default
|
||||||
--buffer-size SIZE Size of download buffer (e.g. 1024 or 16K)
|
--buffer-size SIZE Size of download buffer (e.g. 1024 or
|
||||||
(default is 1024)
|
16K) (default is 1024)
|
||||||
--no-resize-buffer Do not automatically adjust the buffer
|
--no-resize-buffer Do not automatically adjust the buffer
|
||||||
size. By default, the buffer size is
|
size. By default, the buffer size is
|
||||||
automatically resized from an initial value
|
automatically resized from an initial
|
||||||
of SIZE.
|
value of SIZE.
|
||||||
--http-chunk-size SIZE Size of a chunk for chunk-based HTTP
|
--http-chunk-size SIZE Size of a chunk for chunk-based HTTP
|
||||||
downloading (e.g. 10485760 or 10M) (default
|
downloading (e.g. 10485760 or 10M)
|
||||||
is disabled). May be useful for bypassing
|
(default is disabled). May be useful
|
||||||
bandwidth throttling imposed by a webserver
|
for bypassing bandwidth throttling
|
||||||
(experimental)
|
imposed by a webserver (experimental)
|
||||||
--playlist-reverse Download playlist videos in reverse order
|
--playlist-reverse Download playlist videos in reverse
|
||||||
--playlist-random Download playlist videos in random order
|
order
|
||||||
--xattr-set-filesize Set file xattribute ytdl.filesize with
|
--playlist-random Download playlist videos in random
|
||||||
expected file size
|
order
|
||||||
--hls-prefer-native Use the native HLS downloader instead of
|
--xattr-set-filesize Set file xattribute ytdl.filesize with
|
||||||
ffmpeg
|
expected file size
|
||||||
--hls-prefer-ffmpeg Use ffmpeg instead of the native HLS
|
--hls-prefer-native Use the native HLS downloader instead
|
||||||
downloader
|
of ffmpeg
|
||||||
--hls-use-mpegts Use the mpegts container for HLS videos,
|
--hls-prefer-ffmpeg Use ffmpeg instead of the native HLS
|
||||||
allowing to play the video while
|
downloader
|
||||||
downloading (some players may not be able
|
--hls-use-mpegts Use the mpegts container for HLS
|
||||||
to play it)
|
videos, allowing to play the video
|
||||||
--external-downloader COMMAND Use the specified external downloader.
|
while downloading (some players may not
|
||||||
Currently supports
|
be able to play it)
|
||||||
aria2c,avconv,axel,curl,ffmpeg,httpie,wget
|
--external-downloader COMMAND Use the specified external downloader.
|
||||||
--external-downloader-args ARGS Give these arguments to the external
|
Currently supports aria2c,avconv,axel,c
|
||||||
downloader
|
url,ffmpeg,httpie,wget
|
||||||
|
--external-downloader-args ARGS Give these arguments to the external
|
||||||
|
downloader
|
||||||
|
|
||||||
## Filesystem Options:
|
## Filesystem Options:
|
||||||
-a, --batch-file FILE File containing URLs to download ('-' for
|
-a, --batch-file FILE File containing URLs to download ('-'
|
||||||
stdin), one URL per line. Lines starting
|
for stdin), one URL per line. Lines
|
||||||
with '#', ';' or ']' are considered as
|
starting with '#', ';' or ']' are
|
||||||
comments and ignored.
|
considered as comments and ignored.
|
||||||
--id Use only video ID in file name
|
--id Use only video ID in file name
|
||||||
-o, --output TEMPLATE Output filename template, see the "OUTPUT
|
-o, --output TEMPLATE Output filename template, see the
|
||||||
TEMPLATE" for all the info
|
"OUTPUT TEMPLATE" for all the info
|
||||||
--autonumber-start NUMBER Specify the start value for %(autonumber)s
|
--output-na-placeholder PLACEHOLDER Placeholder value for unavailable meta
|
||||||
(default is 1)
|
fields in output filename template
|
||||||
--restrict-filenames Restrict filenames to only ASCII
|
(default is "NA")
|
||||||
characters, and avoid "&" and spaces in
|
--autonumber-start NUMBER Specify the start value for
|
||||||
filenames
|
%(autonumber)s (default is 1)
|
||||||
-w, --no-overwrites Do not overwrite files
|
--restrict-filenames Restrict filenames to only ASCII
|
||||||
-c, --continue Force resume of partially downloaded files.
|
characters, and avoid "&" and spaces in
|
||||||
By default, youtube-dl will resume
|
filenames
|
||||||
downloads if possible.
|
-w, --no-overwrites Do not overwrite files
|
||||||
--no-continue Do not resume partially downloaded files
|
-c, --continue Force resume of partially downloaded
|
||||||
(restart from beginning)
|
files. By default, youtube-dl will
|
||||||
--no-part Do not use .part files - write directly
|
resume downloads if possible.
|
||||||
into output file
|
--no-continue Do not resume partially downloaded
|
||||||
--no-mtime Do not use the Last-modified header to set
|
files (restart from beginning)
|
||||||
the file modification time
|
--no-part Do not use .part files - write directly
|
||||||
--write-description Write video description to a .description
|
into output file
|
||||||
file
|
--no-mtime Do not use the Last-modified header to
|
||||||
--write-info-json Write video metadata to a .info.json file
|
set the file modification time
|
||||||
--write-annotations Write video annotations to a
|
--write-description Write video description to a
|
||||||
.annotations.xml file
|
.description file
|
||||||
--load-info-json FILE JSON file containing the video information
|
--write-info-json Write video metadata to a .info.json
|
||||||
(created with the "--write-info-json"
|
file
|
||||||
option)
|
--write-annotations Write video annotations to a
|
||||||
--cookies FILE File to read cookies from and dump cookie
|
.annotations.xml file
|
||||||
jar in
|
--load-info-json FILE JSON file containing the video
|
||||||
--cache-dir DIR Location in the filesystem where youtube-dl
|
information (created with the "--write-
|
||||||
can store some downloaded information
|
info-json" option)
|
||||||
permanently. By default
|
--cookies FILE File to read cookies from and dump
|
||||||
$XDG_CACHE_HOME/youtube-dl or
|
cookie jar in
|
||||||
~/.cache/youtube-dl . At the moment, only
|
--cache-dir DIR Location in the filesystem where
|
||||||
YouTube player files (for videos with
|
youtube-dl can store some downloaded
|
||||||
obfuscated signatures) are cached, but that
|
information permanently. By default
|
||||||
may change.
|
$XDG_CACHE_HOME/youtube-dl or
|
||||||
--no-cache-dir Disable filesystem caching
|
~/.cache/youtube-dl . At the moment,
|
||||||
--rm-cache-dir Delete all filesystem cache files
|
only YouTube player files (for videos
|
||||||
|
with obfuscated signatures) are cached,
|
||||||
|
but that may change.
|
||||||
|
--no-cache-dir Disable filesystem caching
|
||||||
|
--rm-cache-dir Delete all filesystem cache files
|
||||||
|
|
||||||
## Thumbnail images:
|
## Thumbnail Options:
|
||||||
--write-thumbnail Write thumbnail image to disk
|
--write-thumbnail Write thumbnail image to disk
|
||||||
--write-all-thumbnails Write all thumbnail image formats to disk
|
--write-all-thumbnails Write all thumbnail image formats to
|
||||||
--list-thumbnails Simulate and list all available thumbnail
|
disk
|
||||||
formats
|
--list-thumbnails Simulate and list all available
|
||||||
|
thumbnail formats
|
||||||
|
|
||||||
## Verbosity / Simulation Options:
|
## Verbosity / Simulation Options:
|
||||||
-q, --quiet Activate quiet mode
|
-q, --quiet Activate quiet mode
|
||||||
--no-warnings Ignore warnings
|
--no-warnings Ignore warnings
|
||||||
-s, --simulate Do not download the video and do not write
|
-s, --simulate Do not download the video and do not
|
||||||
anything to disk
|
write anything to disk
|
||||||
--skip-download Do not download the video
|
--skip-download Do not download the video
|
||||||
-g, --get-url Simulate, quiet but print URL
|
-g, --get-url Simulate, quiet but print URL
|
||||||
-e, --get-title Simulate, quiet but print title
|
-e, --get-title Simulate, quiet but print title
|
||||||
--get-id Simulate, quiet but print id
|
--get-id Simulate, quiet but print id
|
||||||
--get-thumbnail Simulate, quiet but print thumbnail URL
|
--get-thumbnail Simulate, quiet but print thumbnail URL
|
||||||
--get-description Simulate, quiet but print video description
|
--get-description Simulate, quiet but print video
|
||||||
--get-duration Simulate, quiet but print video length
|
description
|
||||||
--get-filename Simulate, quiet but print output filename
|
--get-duration Simulate, quiet but print video length
|
||||||
--get-format Simulate, quiet but print output format
|
--get-filename Simulate, quiet but print output
|
||||||
-j, --dump-json Simulate, quiet but print JSON information.
|
filename
|
||||||
See the "OUTPUT TEMPLATE" for a description
|
--get-format Simulate, quiet but print output format
|
||||||
of available keys.
|
-j, --dump-json Simulate, quiet but print JSON
|
||||||
-J, --dump-single-json Simulate, quiet but print JSON information
|
information. See the "OUTPUT TEMPLATE"
|
||||||
for each command-line argument. If the URL
|
for a description of available keys.
|
||||||
refers to a playlist, dump the whole
|
-J, --dump-single-json Simulate, quiet but print JSON
|
||||||
playlist information in a single line.
|
information for each command-line
|
||||||
--print-json Be quiet and print the video information as
|
argument. If the URL refers to a
|
||||||
JSON (video is still being downloaded).
|
playlist, dump the whole playlist
|
||||||
--newline Output progress bar as new lines
|
information in a single line.
|
||||||
--no-progress Do not print progress bar
|
--print-json Be quiet and print the video
|
||||||
--console-title Display progress in console titlebar
|
information as JSON (video is still
|
||||||
-v, --verbose Print various debugging information
|
being downloaded).
|
||||||
--dump-pages Print downloaded pages encoded using base64
|
--newline Output progress bar as new lines
|
||||||
to debug problems (very verbose)
|
--no-progress Do not print progress bar
|
||||||
--write-pages Write downloaded intermediary pages to
|
--console-title Display progress in console titlebar
|
||||||
files in the current directory to debug
|
-v, --verbose Print various debugging information
|
||||||
problems
|
--dump-pages Print downloaded pages encoded using
|
||||||
--print-traffic Display sent and read HTTP traffic
|
base64 to debug problems (very verbose)
|
||||||
-C, --call-home Contact the youtube-dl server for debugging
|
--write-pages Write downloaded intermediary pages to
|
||||||
--no-call-home Do NOT contact the youtube-dl server for
|
files in the current directory to debug
|
||||||
debugging
|
problems
|
||||||
|
--print-traffic Display sent and read HTTP traffic
|
||||||
|
-C, --call-home Contact the youtube-dl server for
|
||||||
|
debugging
|
||||||
|
--no-call-home Do NOT contact the youtube-dl server
|
||||||
|
for debugging
|
||||||
|
|
||||||
## Workarounds:
|
## Workarounds:
|
||||||
--encoding ENCODING Force the specified encoding (experimental)
|
--encoding ENCODING Force the specified encoding
|
||||||
--no-check-certificate Suppress HTTPS certificate validation
|
(experimental)
|
||||||
--prefer-insecure Use an unencrypted connection to retrieve
|
--no-check-certificate Suppress HTTPS certificate validation
|
||||||
information about the video. (Currently
|
--prefer-insecure Use an unencrypted connection to
|
||||||
supported only for YouTube)
|
retrieve information about the video.
|
||||||
--user-agent UA Specify a custom user agent
|
(Currently supported only for YouTube)
|
||||||
--referer URL Specify a custom referer, use if the video
|
--user-agent UA Specify a custom user agent
|
||||||
access is restricted to one domain
|
--referer URL Specify a custom referer, use if the
|
||||||
--add-header FIELD:VALUE Specify a custom HTTP header and its value,
|
video access is restricted to one
|
||||||
separated by a colon ':'. You can use this
|
domain
|
||||||
option multiple times
|
--add-header FIELD:VALUE Specify a custom HTTP header and its
|
||||||
--bidi-workaround Work around terminals that lack
|
value, separated by a colon ':'. You
|
||||||
bidirectional text support. Requires bidiv
|
can use this option multiple times
|
||||||
or fribidi executable in PATH
|
--bidi-workaround Work around terminals that lack
|
||||||
--sleep-interval SECONDS Number of seconds to sleep before each
|
bidirectional text support. Requires
|
||||||
download when used alone or a lower bound
|
bidiv or fribidi executable in PATH
|
||||||
of a range for randomized sleep before each
|
--sleep-interval SECONDS Number of seconds to sleep before each
|
||||||
download (minimum possible number of
|
download when used alone or a lower
|
||||||
seconds to sleep) when used along with
|
bound of a range for randomized sleep
|
||||||
--max-sleep-interval.
|
before each download (minimum possible
|
||||||
--max-sleep-interval SECONDS Upper bound of a range for randomized sleep
|
number of seconds to sleep) when used
|
||||||
before each download (maximum possible
|
along with --max-sleep-interval.
|
||||||
number of seconds to sleep). Must only be
|
--max-sleep-interval SECONDS Upper bound of a range for randomized
|
||||||
used along with --min-sleep-interval.
|
sleep before each download (maximum
|
||||||
|
possible number of seconds to sleep).
|
||||||
|
Must only be used along with --min-
|
||||||
|
sleep-interval.
|
||||||
|
|
||||||
## Video Format Options:
|
## Video Format Options:
|
||||||
-f, --format FORMAT Video format code, see the "FORMAT
|
-f, --format FORMAT Video format code, see the "FORMAT
|
||||||
SELECTION" for all the info
|
SELECTION" for all the info
|
||||||
--all-formats Download all available video formats
|
--all-formats Download all available video formats
|
||||||
--prefer-free-formats Prefer free video formats unless a specific
|
--prefer-free-formats Prefer free video formats unless a
|
||||||
one is requested
|
specific one is requested
|
||||||
-F, --list-formats List all available formats of requested
|
-F, --list-formats List all available formats of requested
|
||||||
videos
|
videos
|
||||||
--youtube-skip-dash-manifest Do not download the DASH manifests and
|
--youtube-skip-dash-manifest Do not download the DASH manifests and
|
||||||
related data on YouTube videos
|
related data on YouTube videos
|
||||||
--merge-output-format FORMAT If a merge is required (e.g.
|
--merge-output-format FORMAT If a merge is required (e.g.
|
||||||
bestvideo+bestaudio), output to given
|
bestvideo+bestaudio), output to given
|
||||||
container format. One of mkv, mp4, ogg,
|
container format. One of mkv, mp4, ogg,
|
||||||
webm, flv. Ignored if no merge is required
|
webm, flv. Ignored if no merge is
|
||||||
|
required
|
||||||
|
|
||||||
## Subtitle Options:
|
## Subtitle Options:
|
||||||
--write-sub Write subtitle file
|
--write-sub Write subtitle file
|
||||||
--write-auto-sub Write automatically generated subtitle file
|
--write-auto-sub Write automatically generated subtitle
|
||||||
(YouTube only)
|
file (YouTube only)
|
||||||
--all-subs Download all the available subtitles of the
|
--all-subs Download all the available subtitles of
|
||||||
video
|
the video
|
||||||
--list-subs List all available subtitles for the video
|
--list-subs List all available subtitles for the
|
||||||
--sub-format FORMAT Subtitle format, accepts formats
|
video
|
||||||
preference, for example: "srt" or
|
--sub-format FORMAT Subtitle format, accepts formats
|
||||||
"ass/srt/best"
|
preference, for example: "srt" or
|
||||||
--sub-lang LANGS Languages of the subtitles to download
|
"ass/srt/best"
|
||||||
(optional) separated by commas, use --list-
|
--sub-lang LANGS Languages of the subtitles to download
|
||||||
subs for available language tags
|
(optional) separated by commas, use
|
||||||
|
--list-subs for available language tags
|
||||||
|
|
||||||
## Authentication Options:
|
## Authentication Options:
|
||||||
-u, --username USERNAME Login with this account ID
|
-u, --username USERNAME Login with this account ID
|
||||||
-p, --password PASSWORD Account password. If this option is left
|
-p, --password PASSWORD Account password. If this option is
|
||||||
out, youtube-dl will ask interactively.
|
left out, youtube-dl will ask
|
||||||
-2, --twofactor TWOFACTOR Two-factor authentication code
|
interactively.
|
||||||
-n, --netrc Use .netrc authentication data
|
-2, --twofactor TWOFACTOR Two-factor authentication code
|
||||||
--video-password PASSWORD Video password (vimeo, youku)
|
-n, --netrc Use .netrc authentication data
|
||||||
|
--video-password PASSWORD Video password (vimeo, youku)
|
||||||
|
|
||||||
## Adobe Pass Options:
|
## Adobe Pass Options:
|
||||||
--ap-mso MSO Adobe Pass multiple-system operator (TV
|
--ap-mso MSO Adobe Pass multiple-system operator (TV
|
||||||
provider) identifier, use --ap-list-mso for
|
provider) identifier, use --ap-list-mso
|
||||||
a list of available MSOs
|
for a list of available MSOs
|
||||||
--ap-username USERNAME Multiple-system operator account login
|
--ap-username USERNAME Multiple-system operator account login
|
||||||
--ap-password PASSWORD Multiple-system operator account password.
|
--ap-password PASSWORD Multiple-system operator account
|
||||||
If this option is left out, youtube-dl will
|
password. If this option is left out,
|
||||||
ask interactively.
|
youtube-dl will ask interactively.
|
||||||
--ap-list-mso List all supported multiple-system
|
--ap-list-mso List all supported multiple-system
|
||||||
operators
|
operators
|
||||||
|
|
||||||
## Post-processing Options:
|
## Post-processing Options:
|
||||||
-x, --extract-audio Convert video files to audio-only files
|
-x, --extract-audio Convert video files to audio-only files
|
||||||
(requires ffmpeg or avconv and ffprobe or
|
(requires ffmpeg/avconv and
|
||||||
avprobe)
|
ffprobe/avprobe)
|
||||||
--audio-format FORMAT Specify audio format: "best", "aac",
|
--audio-format FORMAT Specify audio format: "best", "aac",
|
||||||
"flac", "mp3", "m4a", "opus", "vorbis", or
|
"flac", "mp3", "m4a", "opus", "vorbis",
|
||||||
"wav"; "best" by default; No effect without
|
or "wav"; "best" by default; No effect
|
||||||
-x
|
without -x
|
||||||
--audio-quality QUALITY Specify ffmpeg/avconv audio quality, insert
|
--audio-quality QUALITY Specify ffmpeg/avconv audio quality,
|
||||||
a value between 0 (better) and 9 (worse)
|
insert a value between 0 (better) and 9
|
||||||
for VBR or a specific bitrate like 128K
|
(worse) for VBR or a specific bitrate
|
||||||
(default 5)
|
like 128K (default 5)
|
||||||
--recode-video FORMAT Encode the video to another format if
|
--recode-video FORMAT Encode the video to another format if
|
||||||
necessary (currently supported:
|
necessary (currently supported:
|
||||||
mp4|flv|ogg|webm|mkv|avi)
|
mp4|flv|ogg|webm|mkv|avi)
|
||||||
--postprocessor-args ARGS Give these arguments to the postprocessor
|
--postprocessor-args ARGS Give these arguments to the
|
||||||
-k, --keep-video Keep the video file on disk after the post-
|
postprocessor
|
||||||
processing; the video is erased by default
|
-k, --keep-video Keep the video file on disk after the
|
||||||
--no-post-overwrites Do not overwrite post-processed files; the
|
post-processing; the video is erased by
|
||||||
post-processed files are overwritten by
|
default
|
||||||
default
|
--no-post-overwrites Do not overwrite post-processed files;
|
||||||
--embed-subs Embed subtitles in the video (only for mp4,
|
the post-processed files are
|
||||||
webm and mkv videos)
|
overwritten by default
|
||||||
--embed-thumbnail Embed thumbnail in the audio as cover art
|
--embed-subs Embed subtitles in the video (only for
|
||||||
--add-metadata Write metadata to the video file
|
mp4, webm and mkv videos)
|
||||||
--metadata-from-title FORMAT Parse additional metadata like song title /
|
--embed-thumbnail Embed thumbnail in the audio as cover
|
||||||
artist from the video title. The format
|
art
|
||||||
syntax is the same as --output. Regular
|
--add-metadata Write metadata to the video file
|
||||||
expression with named capture groups may
|
--metadata-from-title FORMAT Parse additional metadata like song
|
||||||
also be used. The parsed parameters replace
|
title / artist from the video title.
|
||||||
existing values. Example: --metadata-from-
|
The format syntax is the same as
|
||||||
title "%(artist)s - %(title)s" matches a
|
--output. Regular expression with named
|
||||||
title like "Coldplay - Paradise". Example
|
capture groups may also be used. The
|
||||||
(regex): --metadata-from-title
|
parsed parameters replace existing
|
||||||
"(?P<artist>.+?) - (?P<title>.+)"
|
values. Example: --metadata-from-title
|
||||||
--xattrs Write metadata to the video file's xattrs
|
"%(artist)s - %(title)s" matches a
|
||||||
(using dublin core and xdg standards)
|
title like "Coldplay - Paradise".
|
||||||
--fixup POLICY Automatically correct known faults of the
|
Example (regex): --metadata-from-title
|
||||||
file. One of never (do nothing), warn (only
|
"(?P<artist>.+?) - (?P<title>.+)"
|
||||||
emit a warning), detect_or_warn (the
|
--xattrs Write metadata to the video file's
|
||||||
default; fix file if we can, warn
|
xattrs (using dublin core and xdg
|
||||||
otherwise)
|
standards)
|
||||||
--prefer-avconv Prefer avconv over ffmpeg for running the
|
--fixup POLICY Automatically correct known faults of
|
||||||
postprocessors
|
the file. One of never (do nothing),
|
||||||
--prefer-ffmpeg Prefer ffmpeg over avconv for running the
|
warn (only emit a warning),
|
||||||
postprocessors (default)
|
detect_or_warn (the default; fix file
|
||||||
--ffmpeg-location PATH Location of the ffmpeg/avconv binary;
|
if we can, warn otherwise)
|
||||||
either the path to the binary or its
|
--prefer-avconv Prefer avconv over ffmpeg for running
|
||||||
containing directory.
|
the postprocessors
|
||||||
--exec CMD Execute a command on the file after
|
--prefer-ffmpeg Prefer ffmpeg over avconv for running
|
||||||
downloading and post-processing, similar to
|
the postprocessors (default)
|
||||||
find's -exec syntax. Example: --exec 'adb
|
--ffmpeg-location PATH Location of the ffmpeg/avconv binary;
|
||||||
push {} /sdcard/Music/ && rm {}'
|
either the path to the binary or its
|
||||||
--convert-subs FORMAT Convert the subtitles to other format
|
containing directory.
|
||||||
(currently supported: srt|ass|vtt|lrc)
|
--exec CMD Execute a command on the file after
|
||||||
|
downloading and post-processing,
|
||||||
|
similar to find's -exec syntax.
|
||||||
|
Example: --exec 'adb push {}
|
||||||
|
/sdcard/Music/ && rm {}'
|
||||||
|
--convert-subs FORMAT Convert the subtitles to other format
|
||||||
|
(currently supported: srt|ass|vtt|lrc)
|
||||||
|
|
||||||
# CONFIGURATION
|
# CONFIGURATION
|
||||||
|
|
||||||
@@ -582,7 +620,7 @@ Available for the media that is a track or a part of a music album:
|
|||||||
- `disc_number` (numeric): Number of the disc or other physical medium the track belongs to
|
- `disc_number` (numeric): Number of the disc or other physical medium the track belongs to
|
||||||
- `release_year` (numeric): Year (YYYY) when the album was released
|
- `release_year` (numeric): Year (YYYY) when the album was released
|
||||||
|
|
||||||
Each aforementioned sequence when referenced in an output template will be replaced by the actual value corresponding to the sequence name. Note that some of the sequences are not guaranteed to be present since they depend on the metadata obtained by a particular extractor. Such sequences will be replaced with `NA`.
|
Each aforementioned sequence when referenced in an output template will be replaced by the actual value corresponding to the sequence name. Note that some of the sequences are not guaranteed to be present since they depend on the metadata obtained by a particular extractor. Such sequences will be replaced with placeholder value provided with `--output-na-placeholder` (`NA` by default).
|
||||||
|
|
||||||
For example for `-o %(title)s-%(id)s.%(ext)s` and an mp4 video with title `youtube-dl test video` and id `BaW_jenozKcj`, this will result in a `youtube-dl test video-BaW_jenozKcj.mp4` file created in the current directory.
|
For example for `-o %(title)s-%(id)s.%(ext)s` and an mp4 video with title `youtube-dl test video` and id `BaW_jenozKcj`, this will result in a `youtube-dl test video-BaW_jenozKcj.mp4` file created in the current directory.
|
||||||
|
|
||||||
@@ -677,6 +715,7 @@ Also filtering work for comparisons `=` (equals), `^=` (starts with), `$=` (ends
|
|||||||
- `container`: Name of the container format
|
- `container`: Name of the container format
|
||||||
- `protocol`: The protocol that will be used for the actual download, lower-case (`http`, `https`, `rtsp`, `rtmp`, `rtmpe`, `mms`, `f4m`, `ism`, `http_dash_segments`, `m3u8`, or `m3u8_native`)
|
- `protocol`: The protocol that will be used for the actual download, lower-case (`http`, `https`, `rtsp`, `rtmp`, `rtmpe`, `mms`, `f4m`, `ism`, `http_dash_segments`, `m3u8`, or `m3u8_native`)
|
||||||
- `format_id`: A short description of the format
|
- `format_id`: A short description of the format
|
||||||
|
- `language`: Language code
|
||||||
|
|
||||||
Any string comparison may be prefixed with negation `!` in order to produce an opposite comparison, e.g. `!*=` (does not contain).
|
Any string comparison may be prefixed with negation `!` in order to produce an opposite comparison, e.g. `!*=` (does not contain).
|
||||||
|
|
||||||
@@ -854,7 +893,7 @@ Since June 2012 ([#342](https://github.com/ytdl-org/youtube-dl/issues/342)) yout
|
|||||||
|
|
||||||
### The exe throws an error due to missing `MSVCR100.dll`
|
### The exe throws an error due to missing `MSVCR100.dll`
|
||||||
|
|
||||||
To run the exe you need to install first the [Microsoft Visual C++ 2010 Redistributable Package (x86)](https://www.microsoft.com/en-US/download/details.aspx?id=5555).
|
To run the exe you need to install first the [Microsoft Visual C++ 2010 Service Pack 1 Redistributable Package (x86)](https://download.microsoft.com/download/1/6/5/165255E7-1014-4D0A-B094-B6A430A6BFFC/vcredist_x86.exe).
|
||||||
|
|
||||||
### On Windows, how should I set up ffmpeg and youtube-dl? Where should I put the exe files?
|
### On Windows, how should I set up ffmpeg and youtube-dl? Where should I put the exe files?
|
||||||
|
|
||||||
@@ -879,7 +918,7 @@ Either prepend `https://www.youtube.com/watch?v=` or separate the ID from the op
|
|||||||
|
|
||||||
Use the `--cookies` option, for example `--cookies /path/to/cookies/file.txt`.
|
Use the `--cookies` option, for example `--cookies /path/to/cookies/file.txt`.
|
||||||
|
|
||||||
In order to extract cookies from browser use any conforming browser extension for exporting cookies. For example, [cookies.txt](https://chrome.google.com/webstore/detail/cookiestxt/njabckikapfpffapmjgojcnbfjonfjfg) (for Chrome) or [cookies.txt](https://addons.mozilla.org/en-US/firefox/addon/cookies-txt/) (for Firefox).
|
In order to extract cookies from browser use any conforming browser extension for exporting cookies. For example, [Get cookies.txt](https://chrome.google.com/webstore/detail/get-cookiestxt/bgaddhkoddajcdgocldbbfleckgcbcid/) (for Chrome) or [cookies.txt](https://addons.mozilla.org/en-US/firefox/addon/cookies-txt/) (for Firefox).
|
||||||
|
|
||||||
Note that the cookies file must be in Mozilla/Netscape format and the first line of the cookies file must be either `# HTTP Cookie File` or `# Netscape HTTP Cookie File`. Make sure you have correct [newline format](https://en.wikipedia.org/wiki/Newline) in the cookies file and convert newlines if necessary to correspond with your OS, namely `CRLF` (`\r\n`) for Windows and `LF` (`\n`) for Unix and Unix-like systems (Linux, macOS, etc.). `HTTP Error 400: Bad Request` when using `--cookies` is a good sign of invalid newline format.
|
Note that the cookies file must be in Mozilla/Netscape format and the first line of the cookies file must be either `# HTTP Cookie File` or `# Netscape HTTP Cookie File`. Make sure you have correct [newline format](https://en.wikipedia.org/wiki/Newline) in the cookies file and convert newlines if necessary to correspond with your OS, namely `CRLF` (`\r\n`) for Windows and `LF` (`\n`) for Unix and Unix-like systems (Linux, macOS, etc.). `HTTP Error 400: Bad Request` when using `--cookies` is a good sign of invalid newline format.
|
||||||
|
|
||||||
@@ -1030,9 +1069,11 @@ After you have ensured this site is distributing its content legally, you can fo
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
||||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in.
|
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test (actually, test case) then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note:
|
||||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303). Add tests and code for as many as you want.
|
* the test names use the extractor class name **without the trailing `IE`**
|
||||||
8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
* tests with `only_matching` key in test's dict are not counted.
|
||||||
|
8. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303). Add tests and code for as many as you want.
|
||||||
|
9. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
||||||
|
|
||||||
$ flake8 youtube_dl/extractor/yourextractor.py
|
$ flake8 youtube_dl/extractor/yourextractor.py
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
wget http://central.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar
|
|
||||||
java -jar jython-installer-2.7.1.jar -s -d "$HOME/jython"
|
|
||||||
$HOME/jython/bin/jython -m pip install nose
|
|
||||||
17
devscripts/run_tests.bat
Normal file
17
devscripts/run_tests.bat
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
@echo off
|
||||||
|
|
||||||
|
rem Keep this list in sync with the `offlinetest` target in Makefile
|
||||||
|
set DOWNLOAD_TESTS="age_restriction^|download^|iqiyi_sdk_interpreter^|socks^|subtitles^|write_annotations^|youtube_lists^|youtube_signature"
|
||||||
|
|
||||||
|
if "%YTDL_TEST_SET%" == "core" (
|
||||||
|
set test_set="-I test_("%DOWNLOAD_TESTS%")\.py"
|
||||||
|
set multiprocess_args=""
|
||||||
|
) else if "%YTDL_TEST_SET%" == "download" (
|
||||||
|
set test_set="-I test_(?!"%DOWNLOAD_TESTS%").+\.py"
|
||||||
|
set multiprocess_args="--processes=4 --process-timeout=540"
|
||||||
|
) else (
|
||||||
|
echo YTDL_TEST_SET is not set or invalid
|
||||||
|
exit /b 1
|
||||||
|
)
|
||||||
|
|
||||||
|
nosetests test --verbose %test_set:"=% %multiprocess_args:"=%
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
# Supported sites
|
# Supported sites
|
||||||
- **1tv**: Первый канал
|
- **1tv**: Первый канал
|
||||||
- **1up.com**
|
|
||||||
- **20min**
|
- **20min**
|
||||||
- **220.ro**
|
- **220.ro**
|
||||||
- **23video**
|
- **23video**
|
||||||
|
- **247sports**
|
||||||
- **24video**
|
- **24video**
|
||||||
- **3qsdn**: 3Q SDN
|
- **3qsdn**: 3Q SDN
|
||||||
- **3sat**
|
- **3sat**
|
||||||
@@ -46,17 +46,20 @@
|
|||||||
- **Amara**
|
- **Amara**
|
||||||
- **AMCNetworks**
|
- **AMCNetworks**
|
||||||
- **AmericasTestKitchen**
|
- **AmericasTestKitchen**
|
||||||
|
- **AmericasTestKitchenSeason**
|
||||||
- **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
- **AnimeOnDemand**
|
- **AnimeOnDemand**
|
||||||
- **Anvato**
|
- **Anvato**
|
||||||
- **aol.com**
|
- **aol.com**: Yahoo screen and movies
|
||||||
- **APA**
|
- **APA**
|
||||||
- **Aparat**
|
- **Aparat**
|
||||||
- **AppleConnect**
|
- **AppleConnect**
|
||||||
- **AppleDaily**: 臺灣蘋果日報
|
- **AppleDaily**: 臺灣蘋果日報
|
||||||
|
- **ApplePodcasts**
|
||||||
- **appletrailers**
|
- **appletrailers**
|
||||||
- **appletrailers:section**
|
- **appletrailers:section**
|
||||||
- **archive.org**: archive.org videos
|
- **archive.org**: archive.org videos
|
||||||
|
- **ArcPublishing**
|
||||||
- **ARD**
|
- **ARD**
|
||||||
- **ARD:mediathek**
|
- **ARD:mediathek**
|
||||||
- **ARDBetaMediathek**
|
- **ARDBetaMediathek**
|
||||||
@@ -80,6 +83,7 @@
|
|||||||
- **awaan:video**
|
- **awaan:video**
|
||||||
- **AZMedien**: AZ Medien videos
|
- **AZMedien**: AZ Medien videos
|
||||||
- **BaiduVideo**: 百度视频
|
- **BaiduVideo**: 百度视频
|
||||||
|
- **bandaichannel**
|
||||||
- **Bandcamp**
|
- **Bandcamp**
|
||||||
- **Bandcamp:album**
|
- **Bandcamp:album**
|
||||||
- **Bandcamp:weekly**
|
- **Bandcamp:weekly**
|
||||||
@@ -87,7 +91,8 @@
|
|||||||
- **bbc**: BBC
|
- **bbc**: BBC
|
||||||
- **bbc.co.uk**: BBC iPlayer
|
- **bbc.co.uk**: BBC iPlayer
|
||||||
- **bbc.co.uk:article**: BBC articles
|
- **bbc.co.uk:article**: BBC articles
|
||||||
- **bbc.co.uk:iplayer:playlist**
|
- **bbc.co.uk:iplayer:episodes**
|
||||||
|
- **bbc.co.uk:iplayer:group**
|
||||||
- **bbc.co.uk:playlist**
|
- **bbc.co.uk:playlist**
|
||||||
- **BBVTV**
|
- **BBVTV**
|
||||||
- **Beatport**
|
- **Beatport**
|
||||||
@@ -97,6 +102,10 @@
|
|||||||
- **BellMedia**
|
- **BellMedia**
|
||||||
- **Bet**
|
- **Bet**
|
||||||
- **bfi:player**
|
- **bfi:player**
|
||||||
|
- **bfmtv**
|
||||||
|
- **bfmtv:article**
|
||||||
|
- **bfmtv:live**
|
||||||
|
- **BibelTV**
|
||||||
- **Bigflix**
|
- **Bigflix**
|
||||||
- **Bild**: Bild.de
|
- **Bild**: Bild.de
|
||||||
- **BiliBili**
|
- **BiliBili**
|
||||||
@@ -104,14 +113,15 @@
|
|||||||
- **BilibiliAudioAlbum**
|
- **BilibiliAudioAlbum**
|
||||||
- **BiliBiliPlayer**
|
- **BiliBiliPlayer**
|
||||||
- **BioBioChileTV**
|
- **BioBioChileTV**
|
||||||
|
- **Biography**
|
||||||
- **BIQLE**
|
- **BIQLE**
|
||||||
- **BitChute**
|
- **BitChute**
|
||||||
- **BitChuteChannel**
|
- **BitChuteChannel**
|
||||||
- **BleacherReport**
|
- **BleacherReport**
|
||||||
- **BleacherReportCMS**
|
- **BleacherReportCMS**
|
||||||
- **blinkx**
|
|
||||||
- **Bloomberg**
|
- **Bloomberg**
|
||||||
- **BokeCC**
|
- **BokeCC**
|
||||||
|
- **BongaCams**
|
||||||
- **BostonGlobe**
|
- **BostonGlobe**
|
||||||
- **Box**
|
- **Box**
|
||||||
- **Bpb**: Bundeszentrale für politische Bildung
|
- **Bpb**: Bundeszentrale für politische Bildung
|
||||||
@@ -146,10 +156,12 @@
|
|||||||
- **CBS**
|
- **CBS**
|
||||||
- **CBSInteractive**
|
- **CBSInteractive**
|
||||||
- **CBSLocal**
|
- **CBSLocal**
|
||||||
|
- **CBSLocalArticle**
|
||||||
- **cbsnews**: CBS News
|
- **cbsnews**: CBS News
|
||||||
- **cbsnews:embed**
|
- **cbsnews:embed**
|
||||||
- **cbsnews:livevideo**: CBS News Live Videos
|
- **cbsnews:livevideo**: CBS News Live Videos
|
||||||
- **CBSSports**
|
- **cbssports**
|
||||||
|
- **cbssports:embed**
|
||||||
- **CCMA**
|
- **CCMA**
|
||||||
- **CCTV**: 央视网
|
- **CCTV**: 央视网
|
||||||
- **CDA**
|
- **CDA**
|
||||||
@@ -183,8 +195,6 @@
|
|||||||
- **CNNArticle**
|
- **CNNArticle**
|
||||||
- **CNNBlogs**
|
- **CNNBlogs**
|
||||||
- **ComedyCentral**
|
- **ComedyCentral**
|
||||||
- **ComedyCentralFullEpisodes**
|
|
||||||
- **ComedyCentralShortname**
|
|
||||||
- **ComedyCentralTV**
|
- **ComedyCentralTV**
|
||||||
- **CondeNast**: Condé Nast media group: Allure, Architectural Digest, Ars Technica, Bon Appétit, Brides, Condé Nast, Condé Nast Traveler, Details, Epicurious, GQ, Glamour, Golf Digest, SELF, Teen Vogue, The New Yorker, Vanity Fair, Vogue, W Magazine, WIRED
|
- **CondeNast**: Condé Nast media group: Allure, Architectural Digest, Ars Technica, Bon Appétit, Brides, Condé Nast, Condé Nast Traveler, Details, Epicurious, GQ, Glamour, Golf Digest, SELF, Teen Vogue, The New Yorker, Vanity Fair, Vogue, W Magazine, WIRED
|
||||||
- **CONtv**
|
- **CONtv**
|
||||||
@@ -195,9 +205,9 @@
|
|||||||
- **CrooksAndLiars**
|
- **CrooksAndLiars**
|
||||||
- **crunchyroll**
|
- **crunchyroll**
|
||||||
- **crunchyroll:playlist**
|
- **crunchyroll:playlist**
|
||||||
- **CSNNE**
|
|
||||||
- **CSpan**: C-SPAN
|
- **CSpan**: C-SPAN
|
||||||
- **CtsNews**: 華視新聞
|
- **CtsNews**: 華視新聞
|
||||||
|
- **CTV**
|
||||||
- **CTVNews**
|
- **CTVNews**
|
||||||
- **cu.ntv.co.jp**: Nippon Television Network
|
- **cu.ntv.co.jp**: Nippon Television Network
|
||||||
- **Culturebox**
|
- **Culturebox**
|
||||||
@@ -205,6 +215,7 @@
|
|||||||
- **curiositystream**
|
- **curiositystream**
|
||||||
- **curiositystream:collection**
|
- **curiositystream:collection**
|
||||||
- **CWTV**
|
- **CWTV**
|
||||||
|
- **DagelijkseKost**: dagelijksekost.een.be
|
||||||
- **DailyMail**
|
- **DailyMail**
|
||||||
- **dailymotion**
|
- **dailymotion**
|
||||||
- **dailymotion:playlist**
|
- **dailymotion:playlist**
|
||||||
@@ -226,6 +237,7 @@
|
|||||||
- **DiscoveryGo**
|
- **DiscoveryGo**
|
||||||
- **DiscoveryGoPlaylist**
|
- **DiscoveryGoPlaylist**
|
||||||
- **DiscoveryNetworksDe**
|
- **DiscoveryNetworksDe**
|
||||||
|
- **DiscoveryPlus**
|
||||||
- **DiscoveryVR**
|
- **DiscoveryVR**
|
||||||
- **Disney**
|
- **Disney**
|
||||||
- **dlive:stream**
|
- **dlive:stream**
|
||||||
@@ -268,7 +280,6 @@
|
|||||||
- **ESPNArticle**
|
- **ESPNArticle**
|
||||||
- **EsriVideo**
|
- **EsriVideo**
|
||||||
- **Europa**
|
- **Europa**
|
||||||
- **EveryonesMixtape**
|
|
||||||
- **EWETV**
|
- **EWETV**
|
||||||
- **ExpoTV**
|
- **ExpoTV**
|
||||||
- **Expressen**
|
- **Expressen**
|
||||||
@@ -315,7 +326,6 @@
|
|||||||
- **Funk**
|
- **Funk**
|
||||||
- **Fusion**
|
- **Fusion**
|
||||||
- **Fux**
|
- **Fux**
|
||||||
- **FXNetworks**
|
|
||||||
- **Gaia**
|
- **Gaia**
|
||||||
- **GameInformer**
|
- **GameInformer**
|
||||||
- **GameSpot**
|
- **GameSpot**
|
||||||
@@ -323,6 +333,7 @@
|
|||||||
- **Gaskrank**
|
- **Gaskrank**
|
||||||
- **Gazeta**
|
- **Gazeta**
|
||||||
- **GDCVault**
|
- **GDCVault**
|
||||||
|
- **GediDigital**
|
||||||
- **generic**: Generic downloader that works on some sites
|
- **generic**: Generic downloader that works on some sites
|
||||||
- **Gfycat**
|
- **Gfycat**
|
||||||
- **GiantBomb**
|
- **GiantBomb**
|
||||||
@@ -334,6 +345,8 @@
|
|||||||
- **Go**
|
- **Go**
|
||||||
- **GodTube**
|
- **GodTube**
|
||||||
- **Golem**
|
- **Golem**
|
||||||
|
- **google:podcasts**
|
||||||
|
- **google:podcasts:feed**
|
||||||
- **GoogleDrive**
|
- **GoogleDrive**
|
||||||
- **Goshgay**
|
- **Goshgay**
|
||||||
- **GPUTechConf**
|
- **GPUTechConf**
|
||||||
@@ -346,8 +359,10 @@
|
|||||||
- **HentaiStigma**
|
- **HentaiStigma**
|
||||||
- **hetklokhuis**
|
- **hetklokhuis**
|
||||||
- **hgtv.com:show**
|
- **hgtv.com:show**
|
||||||
|
- **HGTVDe**
|
||||||
- **HiDive**
|
- **HiDive**
|
||||||
- **HistoricFilms**
|
- **HistoricFilms**
|
||||||
|
- **history:player**
|
||||||
- **history:topic**: History.com Topic
|
- **history:topic**: History.com Topic
|
||||||
- **hitbox**
|
- **hitbox**
|
||||||
- **hitbox:live**
|
- **hitbox:live**
|
||||||
@@ -367,6 +382,10 @@
|
|||||||
- **HungamaSong**
|
- **HungamaSong**
|
||||||
- **Hypem**
|
- **Hypem**
|
||||||
- **ign.com**
|
- **ign.com**
|
||||||
|
- **IGNArticle**
|
||||||
|
- **IGNVideo**
|
||||||
|
- **IHeartRadio**
|
||||||
|
- **iheartradio:podcast**
|
||||||
- **imdb**: Internet Movie Database trailers
|
- **imdb**: Internet Movie Database trailers
|
||||||
- **imdb:list**: Internet Movie Database lists
|
- **imdb:list**: Internet Movie Database lists
|
||||||
- **Imgur**
|
- **Imgur**
|
||||||
@@ -400,14 +419,14 @@
|
|||||||
- **JWPlatform**
|
- **JWPlatform**
|
||||||
- **Kakao**
|
- **Kakao**
|
||||||
- **Kaltura**
|
- **Kaltura**
|
||||||
- **KanalPlay**: Kanal 5/9/11 Play
|
|
||||||
- **Kankan**
|
- **Kankan**
|
||||||
- **Karaoketv**
|
- **Karaoketv**
|
||||||
- **KarriereVideos**
|
- **KarriereVideos**
|
||||||
- **Katsomo**
|
- **Katsomo**
|
||||||
- **KeezMovies**
|
- **KeezMovies**
|
||||||
- **Ketnet**
|
- **Ketnet**
|
||||||
- **KhanAcademy**
|
- **khanacademy**
|
||||||
|
- **khanacademy:unit**
|
||||||
- **KickStarter**
|
- **KickStarter**
|
||||||
- **KinjaEmbed**
|
- **KinjaEmbed**
|
||||||
- **KinoPoisk**
|
- **KinoPoisk**
|
||||||
@@ -445,14 +464,14 @@
|
|||||||
- **limelight**
|
- **limelight**
|
||||||
- **limelight:channel**
|
- **limelight:channel**
|
||||||
- **limelight:channel_list**
|
- **limelight:channel_list**
|
||||||
|
- **LineLive**
|
||||||
|
- **LineLiveChannel**
|
||||||
- **LineTV**
|
- **LineTV**
|
||||||
- **linkedin:learning**
|
- **linkedin:learning**
|
||||||
- **linkedin:learning:course**
|
- **linkedin:learning:course**
|
||||||
- **LinuxAcademy**
|
- **LinuxAcademy**
|
||||||
- **LiTV**
|
- **LiTV**
|
||||||
- **LiveJournal**
|
- **LiveJournal**
|
||||||
- **LiveLeak**
|
|
||||||
- **LiveLeakEmbed**
|
|
||||||
- **livestream**
|
- **livestream**
|
||||||
- **livestream:original**
|
- **livestream:original**
|
||||||
- **LnkGo**
|
- **LnkGo**
|
||||||
@@ -470,6 +489,7 @@
|
|||||||
- **mangomolo:live**
|
- **mangomolo:live**
|
||||||
- **mangomolo:video**
|
- **mangomolo:video**
|
||||||
- **ManyVids**
|
- **ManyVids**
|
||||||
|
- **MaoriTV**
|
||||||
- **Markiza**
|
- **Markiza**
|
||||||
- **MarkizaPage**
|
- **MarkizaPage**
|
||||||
- **massengeschmack.tv**
|
- **massengeschmack.tv**
|
||||||
@@ -494,6 +514,9 @@
|
|||||||
- **Mgoon**
|
- **Mgoon**
|
||||||
- **MGTV**: 芒果TV
|
- **MGTV**: 芒果TV
|
||||||
- **MiaoPai**
|
- **MiaoPai**
|
||||||
|
- **minds**
|
||||||
|
- **minds:channel**
|
||||||
|
- **minds:group**
|
||||||
- **MinistryGrid**
|
- **MinistryGrid**
|
||||||
- **Minoto**
|
- **Minoto**
|
||||||
- **miomio.tv**
|
- **miomio.tv**
|
||||||
@@ -502,6 +525,7 @@
|
|||||||
- **mixcloud:playlist**
|
- **mixcloud:playlist**
|
||||||
- **mixcloud:user**
|
- **mixcloud:user**
|
||||||
- **MLB**
|
- **MLB**
|
||||||
|
- **MLBVideo**
|
||||||
- **Mnet**
|
- **Mnet**
|
||||||
- **MNetTV**
|
- **MNetTV**
|
||||||
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
|
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
|
||||||
@@ -523,6 +547,7 @@
|
|||||||
- **mtv:video**
|
- **mtv:video**
|
||||||
- **mtvjapan**
|
- **mtvjapan**
|
||||||
- **mtvservices:embedded**
|
- **mtvservices:embedded**
|
||||||
|
- **MTVUutisetArticle**
|
||||||
- **MuenchenTV**: münchen.tv
|
- **MuenchenTV**: münchen.tv
|
||||||
- **mva**: Microsoft Virtual Academy videos
|
- **mva**: Microsoft Virtual Academy videos
|
||||||
- **mva:course**: Microsoft Virtual Academy courses
|
- **mva:course**: Microsoft Virtual Academy courses
|
||||||
@@ -541,6 +566,11 @@
|
|||||||
- **NationalGeographicTV**
|
- **NationalGeographicTV**
|
||||||
- **Naver**
|
- **Naver**
|
||||||
- **NBA**
|
- **NBA**
|
||||||
|
- **nba:watch**
|
||||||
|
- **nba:watch:collection**
|
||||||
|
- **NBAChannel**
|
||||||
|
- **NBAEmbed**
|
||||||
|
- **NBAWatchEmbed**
|
||||||
- **NBC**
|
- **NBC**
|
||||||
- **NBCNews**
|
- **NBCNews**
|
||||||
- **nbcolympics**
|
- **nbcolympics**
|
||||||
@@ -570,8 +600,10 @@
|
|||||||
- **NextTV**: 壹電視
|
- **NextTV**: 壹電視
|
||||||
- **Nexx**
|
- **Nexx**
|
||||||
- **NexxEmbed**
|
- **NexxEmbed**
|
||||||
- **nfl.com**
|
- **nfl.com** (Currently broken)
|
||||||
|
- **nfl.com:article** (Currently broken)
|
||||||
- **NhkVod**
|
- **NhkVod**
|
||||||
|
- **NhkVodProgram**
|
||||||
- **nhl.com**
|
- **nhl.com**
|
||||||
- **nick.com**
|
- **nick.com**
|
||||||
- **nick.de**
|
- **nick.de**
|
||||||
@@ -585,7 +617,6 @@
|
|||||||
- **njoy:embed**
|
- **njoy:embed**
|
||||||
- **NJPWWorld**: 新日本プロレスワールド
|
- **NJPWWorld**: 新日本プロレスワールド
|
||||||
- **NobelPrize**
|
- **NobelPrize**
|
||||||
- **Noco**
|
|
||||||
- **NonkTube**
|
- **NonkTube**
|
||||||
- **Noovo**
|
- **Noovo**
|
||||||
- **Normalboots**
|
- **Normalboots**
|
||||||
@@ -603,6 +634,7 @@
|
|||||||
- **Npr**
|
- **Npr**
|
||||||
- **NRK**
|
- **NRK**
|
||||||
- **NRKPlaylist**
|
- **NRKPlaylist**
|
||||||
|
- **NRKRadioPodkast**
|
||||||
- **NRKSkole**: NRK Skole
|
- **NRKSkole**: NRK Skole
|
||||||
- **NRKTV**: NRK TV and NRK Radio
|
- **NRKTV**: NRK TV and NRK Radio
|
||||||
- **NRKTVDirekte**: NRK TV Direkte and NRK Radio Direkte
|
- **NRKTVDirekte**: NRK TV Direkte and NRK Radio Direkte
|
||||||
@@ -649,12 +681,14 @@
|
|||||||
- **OutsideTV**
|
- **OutsideTV**
|
||||||
- **PacktPub**
|
- **PacktPub**
|
||||||
- **PacktPubCourse**
|
- **PacktPubCourse**
|
||||||
|
- **PalcoMP3:artist**
|
||||||
|
- **PalcoMP3:song**
|
||||||
|
- **PalcoMP3:video**
|
||||||
- **pandora.tv**: 판도라TV
|
- **pandora.tv**: 판도라TV
|
||||||
- **ParamountNetwork**
|
- **ParamountNetwork**
|
||||||
- **parliamentlive.tv**: UK parliament videos
|
- **parliamentlive.tv**: UK parliament videos
|
||||||
- **Patreon**
|
- **Patreon**
|
||||||
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
||||||
- **pcmag**
|
|
||||||
- **PearVideo**
|
- **PearVideo**
|
||||||
- **PeerTube**
|
- **PeerTube**
|
||||||
- **People**
|
- **People**
|
||||||
@@ -676,13 +710,13 @@
|
|||||||
- **play.fm**
|
- **play.fm**
|
||||||
- **player.sky.it**
|
- **player.sky.it**
|
||||||
- **PlayPlusTV**
|
- **PlayPlusTV**
|
||||||
|
- **PlayStuff**
|
||||||
- **PlaysTV**
|
- **PlaysTV**
|
||||||
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
||||||
- **Playvid**
|
- **Playvid**
|
||||||
- **Playwire**
|
- **Playwire**
|
||||||
- **pluralsight**
|
- **pluralsight**
|
||||||
- **pluralsight:course**
|
- **pluralsight:course**
|
||||||
- **plus.google**: Google Plus
|
|
||||||
- **podomatic**
|
- **podomatic**
|
||||||
- **Pokemon**
|
- **Pokemon**
|
||||||
- **PolskieRadio**
|
- **PolskieRadio**
|
||||||
@@ -782,6 +816,7 @@
|
|||||||
- **safari:course**: safaribooksonline.com online courses
|
- **safari:course**: safaribooksonline.com online courses
|
||||||
- **SAKTV**
|
- **SAKTV**
|
||||||
- **SaltTV**
|
- **SaltTV**
|
||||||
|
- **SampleFocus**
|
||||||
- **Sapo**: SAPO Vídeos
|
- **Sapo**: SAPO Vídeos
|
||||||
- **savefrom.net**
|
- **savefrom.net**
|
||||||
- **SBS**: sbs.com.au
|
- **SBS**: sbs.com.au
|
||||||
@@ -804,14 +839,18 @@
|
|||||||
- **ShahidShow**
|
- **ShahidShow**
|
||||||
- **Shared**: shared.sx
|
- **Shared**: shared.sx
|
||||||
- **ShowRoomLive**
|
- **ShowRoomLive**
|
||||||
|
- **simplecast**
|
||||||
|
- **simplecast:episode**
|
||||||
|
- **simplecast:podcast**
|
||||||
- **Sina**
|
- **Sina**
|
||||||
- **sky.it**
|
- **sky.it**
|
||||||
|
- **sky:news**
|
||||||
|
- **sky:sports**
|
||||||
|
- **sky:sports:news**
|
||||||
- **skyacademy.it**
|
- **skyacademy.it**
|
||||||
- **SkylineWebcams**
|
- **SkylineWebcams**
|
||||||
- **SkyNews**
|
|
||||||
- **skynewsarabia:article**
|
- **skynewsarabia:article**
|
||||||
- **skynewsarabia:video**
|
- **skynewsarabia:video**
|
||||||
- **SkySports**
|
|
||||||
- **Slideshare**
|
- **Slideshare**
|
||||||
- **SlidesLive**
|
- **SlidesLive**
|
||||||
- **Slutload**
|
- **Slutload**
|
||||||
@@ -840,6 +879,8 @@
|
|||||||
- **Sport5**
|
- **Sport5**
|
||||||
- **SportBox**
|
- **SportBox**
|
||||||
- **SportDeutschland**
|
- **SportDeutschland**
|
||||||
|
- **spotify**
|
||||||
|
- **spotify:show**
|
||||||
- **Spreaker**
|
- **Spreaker**
|
||||||
- **SpreakerPage**
|
- **SpreakerPage**
|
||||||
- **SpreakerShow**
|
- **SpreakerShow**
|
||||||
@@ -852,6 +893,10 @@
|
|||||||
- **stanfordoc**: Stanford Open ClassRoom
|
- **stanfordoc**: Stanford Open ClassRoom
|
||||||
- **Steam**
|
- **Steam**
|
||||||
- **Stitcher**
|
- **Stitcher**
|
||||||
|
- **StitcherShow**
|
||||||
|
- **StoryFire**
|
||||||
|
- **StoryFireSeries**
|
||||||
|
- **StoryFireUser**
|
||||||
- **Streamable**
|
- **Streamable**
|
||||||
- **streamcloud.eu**
|
- **streamcloud.eu**
|
||||||
- **StreamCZ**
|
- **StreamCZ**
|
||||||
@@ -872,7 +917,6 @@
|
|||||||
- **Tagesschau**
|
- **Tagesschau**
|
||||||
- **tagesschau:player**
|
- **tagesschau:player**
|
||||||
- **Tass**
|
- **Tass**
|
||||||
- **TastyTrade**
|
|
||||||
- **TBS**
|
- **TBS**
|
||||||
- **TDSLifeway**
|
- **TDSLifeway**
|
||||||
- **Teachable**
|
- **Teachable**
|
||||||
@@ -921,12 +965,13 @@
|
|||||||
- **TNAFlixNetworkEmbed**
|
- **TNAFlixNetworkEmbed**
|
||||||
- **toggle**
|
- **toggle**
|
||||||
- **ToonGoggles**
|
- **ToonGoggles**
|
||||||
- **Tosh**: Tosh.0
|
|
||||||
- **tou.tv**
|
- **tou.tv**
|
||||||
- **Toypics**: Toypics video
|
- **Toypics**: Toypics video
|
||||||
- **ToypicsUser**: Toypics user profile
|
- **ToypicsUser**: Toypics user profile
|
||||||
- **TrailerAddict** (Currently broken)
|
- **TrailerAddict** (Currently broken)
|
||||||
- **Trilulilu**
|
- **Trilulilu**
|
||||||
|
- **Trovo**
|
||||||
|
- **TrovoVod**
|
||||||
- **TruNews**
|
- **TruNews**
|
||||||
- **TruTV**
|
- **TruTV**
|
||||||
- **Tube8**
|
- **Tube8**
|
||||||
@@ -946,6 +991,8 @@
|
|||||||
- **TV2DKBornholmPlay**
|
- **TV2DKBornholmPlay**
|
||||||
- **TV4**: tv4.se and tv4play.se
|
- **TV4**: tv4.se and tv4play.se
|
||||||
- **TV5MondePlus**: TV5MONDE+
|
- **TV5MondePlus**: TV5MONDE+
|
||||||
|
- **tv5unis**
|
||||||
|
- **tv5unis:video**
|
||||||
- **tv8.it**
|
- **tv8.it**
|
||||||
- **TVA**
|
- **TVA**
|
||||||
- **TVANouvelles**
|
- **TVANouvelles**
|
||||||
@@ -1018,6 +1065,7 @@
|
|||||||
- **Vidbit**
|
- **Vidbit**
|
||||||
- **Viddler**
|
- **Viddler**
|
||||||
- **Videa**
|
- **Videa**
|
||||||
|
- **video.arnes.si**: Arnes Video
|
||||||
- **video.google:search**: Google Video search
|
- **video.google:search**: Google Video search
|
||||||
- **video.sky.it**
|
- **video.sky.it**
|
||||||
- **video.sky.it:live**
|
- **video.sky.it:live**
|
||||||
@@ -1032,7 +1080,6 @@
|
|||||||
- **vidme**
|
- **vidme**
|
||||||
- **vidme:user**
|
- **vidme:user**
|
||||||
- **vidme:user:likes**
|
- **vidme:user:likes**
|
||||||
- **Vidzi**
|
|
||||||
- **vier**: vier.be and vijf.be
|
- **vier**: vier.be and vijf.be
|
||||||
- **vier:videos**
|
- **vier:videos**
|
||||||
- **viewlift**
|
- **viewlift**
|
||||||
@@ -1077,10 +1124,12 @@
|
|||||||
- **vrv**
|
- **vrv**
|
||||||
- **vrv:series**
|
- **vrv:series**
|
||||||
- **VShare**
|
- **VShare**
|
||||||
|
- **VTM**
|
||||||
- **VTXTV**
|
- **VTXTV**
|
||||||
- **vube**: Vube.com
|
- **vube**: Vube.com
|
||||||
- **VuClip**
|
- **VuClip**
|
||||||
- **VVVVID**
|
- **VVVVID**
|
||||||
|
- **VVVVIDShow**
|
||||||
- **VyboryMos**
|
- **VyboryMos**
|
||||||
- **Vzaar**
|
- **Vzaar**
|
||||||
- **Wakanim**
|
- **Wakanim**
|
||||||
@@ -1103,6 +1152,7 @@
|
|||||||
- **WeiboMobile**
|
- **WeiboMobile**
|
||||||
- **WeiqiTV**: WQTV
|
- **WeiqiTV**: WQTV
|
||||||
- **Wistia**
|
- **Wistia**
|
||||||
|
- **WistiaPlaylist**
|
||||||
- **wnl**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **wnl**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
- **WorldStarHipHop**
|
- **WorldStarHipHop**
|
||||||
- **WSJ**: Wall Street Journal
|
- **WSJ**: Wall Street Journal
|
||||||
@@ -1110,7 +1160,7 @@
|
|||||||
- **WWE**
|
- **WWE**
|
||||||
- **XBef**
|
- **XBef**
|
||||||
- **XboxClips**
|
- **XboxClips**
|
||||||
- **XFileShare**: XFileShare based sites: ClipWatching, GoUnlimited, GoVid, HolaVid, Streamty, TheVideoBee, Uqload, VidBom, vidlo, VidLocker, VidShare, VUp, XVideoSharing
|
- **XFileShare**: XFileShare based sites: Aparat, ClipWatching, GoUnlimited, GoVid, HolaVid, Streamty, TheVideoBee, Uqload, VidBom, vidlo, VidLocker, VidShare, VUp, WolfStream, XVideoSharing
|
||||||
- **XHamster**
|
- **XHamster**
|
||||||
- **XHamsterEmbed**
|
- **XHamsterEmbed**
|
||||||
- **XHamsterUser**
|
- **XHamsterUser**
|
||||||
@@ -1165,10 +1215,12 @@
|
|||||||
- **YoutubeYtBe**
|
- **YoutubeYtBe**
|
||||||
- **YoutubeYtUser**
|
- **YoutubeYtUser**
|
||||||
- **Zapiks**
|
- **Zapiks**
|
||||||
- **Zaq1**
|
|
||||||
- **Zattoo**
|
- **Zattoo**
|
||||||
- **ZattooLive**
|
- **ZattooLive**
|
||||||
- **ZDF**
|
- **ZDF**
|
||||||
- **ZDFChannel**
|
- **ZDFChannel**
|
||||||
|
- **Zhihu**
|
||||||
- **zingmp3**: mp3.zing.vn
|
- **zingmp3**: mp3.zing.vn
|
||||||
|
- **zingmp3:album**
|
||||||
|
- **zoom**
|
||||||
- **Zype**
|
- **Zype**
|
||||||
|
|||||||
@@ -18,7 +18,6 @@
|
|||||||
"noprogress": false,
|
"noprogress": false,
|
||||||
"outtmpl": "%(id)s.%(ext)s",
|
"outtmpl": "%(id)s.%(ext)s",
|
||||||
"password": null,
|
"password": null,
|
||||||
"playlistend": -1,
|
|
||||||
"playliststart": 1,
|
"playliststart": 1,
|
||||||
"prefer_free_formats": false,
|
"prefer_free_formats": false,
|
||||||
"quiet": false,
|
"quiet": false,
|
||||||
|
|||||||
@@ -98,6 +98,55 @@ class TestInfoExtractor(unittest.TestCase):
|
|||||||
self.assertRaises(RegexNotFoundError, ie._html_search_meta, 'z', html, None, fatal=True)
|
self.assertRaises(RegexNotFoundError, ie._html_search_meta, 'z', html, None, fatal=True)
|
||||||
self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True)
|
self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True)
|
||||||
|
|
||||||
|
def test_search_json_ld_realworld(self):
|
||||||
|
# https://github.com/ytdl-org/youtube-dl/issues/23306
|
||||||
|
expect_dict(
|
||||||
|
self,
|
||||||
|
self.ie._search_json_ld(r'''<script type="application/ld+json">
|
||||||
|
{
|
||||||
|
"@context": "http://schema.org/",
|
||||||
|
"@type": "VideoObject",
|
||||||
|
"name": "1 On 1 With Kleio",
|
||||||
|
"url": "https://www.eporner.com/hd-porn/xN49A1cT3eB/1-On-1-With-Kleio/",
|
||||||
|
"duration": "PT0H12M23S",
|
||||||
|
"thumbnailUrl": ["https://static-eu-cdn.eporner.com/thumbs/static4/7/78/780/780814/9_360.jpg", "https://imggen.eporner.com/780814/1920/1080/9.jpg"],
|
||||||
|
"contentUrl": "https://gvideo.eporner.com/xN49A1cT3eB/xN49A1cT3eB.mp4",
|
||||||
|
"embedUrl": "https://www.eporner.com/embed/xN49A1cT3eB/1-On-1-With-Kleio/",
|
||||||
|
"image": "https://static-eu-cdn.eporner.com/thumbs/static4/7/78/780/780814/9_360.jpg",
|
||||||
|
"width": "1920",
|
||||||
|
"height": "1080",
|
||||||
|
"encodingFormat": "mp4",
|
||||||
|
"bitrate": "6617kbps",
|
||||||
|
"isFamilyFriendly": "False",
|
||||||
|
"description": "Kleio Valentien",
|
||||||
|
"uploadDate": "2015-12-05T21:24:35+01:00",
|
||||||
|
"interactionStatistic": {
|
||||||
|
"@type": "InteractionCounter",
|
||||||
|
"interactionType": { "@type": "http://schema.org/WatchAction" },
|
||||||
|
"userInteractionCount": 1120958
|
||||||
|
}, "aggregateRating": {
|
||||||
|
"@type": "AggregateRating",
|
||||||
|
"ratingValue": "88",
|
||||||
|
"ratingCount": "630",
|
||||||
|
"bestRating": "100",
|
||||||
|
"worstRating": "0"
|
||||||
|
}, "actor": [{
|
||||||
|
"@type": "Person",
|
||||||
|
"name": "Kleio Valentien",
|
||||||
|
"url": "https://www.eporner.com/pornstar/kleio-valentien/"
|
||||||
|
}]}
|
||||||
|
</script>''', None),
|
||||||
|
{
|
||||||
|
'title': '1 On 1 With Kleio',
|
||||||
|
'description': 'Kleio Valentien',
|
||||||
|
'url': 'https://gvideo.eporner.com/xN49A1cT3eB/xN49A1cT3eB.mp4',
|
||||||
|
'timestamp': 1449347075,
|
||||||
|
'duration': 743.0,
|
||||||
|
'view_count': 1120958,
|
||||||
|
'width': 1920,
|
||||||
|
'height': 1080,
|
||||||
|
})
|
||||||
|
|
||||||
def test_download_json(self):
|
def test_download_json(self):
|
||||||
uri = encode_data_uri(b'{"foo": "blah"}', 'application/json')
|
uri = encode_data_uri(b'{"foo": "blah"}', 'application/json')
|
||||||
self.assertEqual(self.ie._download_json(uri, None), {'foo': 'blah'})
|
self.assertEqual(self.ie._download_json(uri, None), {'foo': 'blah'})
|
||||||
|
|||||||
@@ -464,6 +464,7 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
assert_syntax_error('+bestaudio')
|
assert_syntax_error('+bestaudio')
|
||||||
assert_syntax_error('bestvideo+')
|
assert_syntax_error('bestvideo+')
|
||||||
assert_syntax_error('/')
|
assert_syntax_error('/')
|
||||||
|
assert_syntax_error('bestvideo+bestvideo+bestaudio')
|
||||||
|
|
||||||
def test_format_filtering(self):
|
def test_format_filtering(self):
|
||||||
formats = [
|
formats = [
|
||||||
@@ -632,13 +633,20 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
'title2': '%PATH%',
|
'title2': '%PATH%',
|
||||||
}
|
}
|
||||||
|
|
||||||
def fname(templ):
|
def fname(templ, na_placeholder='NA'):
|
||||||
ydl = YoutubeDL({'outtmpl': templ})
|
params = {'outtmpl': templ}
|
||||||
|
if na_placeholder != 'NA':
|
||||||
|
params['outtmpl_na_placeholder'] = na_placeholder
|
||||||
|
ydl = YoutubeDL(params)
|
||||||
return ydl.prepare_filename(info)
|
return ydl.prepare_filename(info)
|
||||||
self.assertEqual(fname('%(id)s.%(ext)s'), '1234.mp4')
|
self.assertEqual(fname('%(id)s.%(ext)s'), '1234.mp4')
|
||||||
self.assertEqual(fname('%(id)s-%(width)s.%(ext)s'), '1234-NA.mp4')
|
self.assertEqual(fname('%(id)s-%(width)s.%(ext)s'), '1234-NA.mp4')
|
||||||
# Replace missing fields with 'NA'
|
NA_TEST_OUTTMPL = '%(uploader_date)s-%(width)d-%(id)s.%(ext)s'
|
||||||
self.assertEqual(fname('%(uploader_date)s-%(id)s.%(ext)s'), 'NA-1234.mp4')
|
# Replace missing fields with 'NA' by default
|
||||||
|
self.assertEqual(fname(NA_TEST_OUTTMPL), 'NA-NA-1234.mp4')
|
||||||
|
# Or by provided placeholder
|
||||||
|
self.assertEqual(fname(NA_TEST_OUTTMPL, na_placeholder='none'), 'none-none-1234.mp4')
|
||||||
|
self.assertEqual(fname(NA_TEST_OUTTMPL, na_placeholder=''), '--1234.mp4')
|
||||||
self.assertEqual(fname('%(height)d.%(ext)s'), '1080.mp4')
|
self.assertEqual(fname('%(height)d.%(ext)s'), '1080.mp4')
|
||||||
self.assertEqual(fname('%(height)6d.%(ext)s'), ' 1080.mp4')
|
self.assertEqual(fname('%(height)6d.%(ext)s'), ' 1080.mp4')
|
||||||
self.assertEqual(fname('%(height)-6d.%(ext)s'), '1080 .mp4')
|
self.assertEqual(fname('%(height)-6d.%(ext)s'), '1080 .mp4')
|
||||||
@@ -989,6 +997,25 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
self.assertEqual(downloaded['extractor'], 'Video')
|
self.assertEqual(downloaded['extractor'], 'Video')
|
||||||
self.assertEqual(downloaded['extractor_key'], 'Video')
|
self.assertEqual(downloaded['extractor_key'], 'Video')
|
||||||
|
|
||||||
|
def test_default_times(self):
|
||||||
|
"""Test addition of missing upload/release/_date from /release_/timestamp"""
|
||||||
|
info = {
|
||||||
|
'id': '1234',
|
||||||
|
'url': TEST_URL,
|
||||||
|
'title': 'Title',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'timestamp': 1631352900,
|
||||||
|
'release_timestamp': 1632995931,
|
||||||
|
}
|
||||||
|
|
||||||
|
params = {'simulate': True, }
|
||||||
|
ydl = FakeYDL(params)
|
||||||
|
out_info = ydl.process_ie_result(info)
|
||||||
|
self.assertTrue(isinstance(out_info['upload_date'], compat_str))
|
||||||
|
self.assertEqual(out_info['upload_date'], '20210911')
|
||||||
|
self.assertTrue(isinstance(out_info['release_date'], compat_str))
|
||||||
|
self.assertEqual(out_info['release_date'], '20210930')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') # 585
|
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') # 585
|
||||||
assertPlaylist('PL63F0C78739B09958')
|
assertPlaylist('PL63F0C78739B09958')
|
||||||
assertTab('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
assertTab('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||||
assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
assertTab('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||||
assertTab('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
assertTab('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||||
assertTab('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668
|
assertTab('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668
|
||||||
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
|
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
|
||||||
@@ -57,8 +57,8 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec')
|
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec')
|
||||||
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
|
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
|
||||||
|
|
||||||
# def test_youtube_user_matching(self):
|
def test_youtube_user_matching(self):
|
||||||
# self.assertMatch('http://www.youtube.com/NASAgovVideo/videos', ['youtube:tab'])
|
self.assertMatch('http://www.youtube.com/NASAgovVideo/videos', ['youtube:tab'])
|
||||||
|
|
||||||
def test_youtube_feeds(self):
|
def test_youtube_feeds(self):
|
||||||
self.assertMatch('https://www.youtube.com/feed/library', ['youtube:tab'])
|
self.assertMatch('https://www.youtube.com/feed/library', ['youtube:tab'])
|
||||||
@@ -66,18 +66,9 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:tab'])
|
self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:tab'])
|
||||||
self.assertMatch('https://www.youtube.com/feed/subscriptions', ['youtube:tab'])
|
self.assertMatch('https://www.youtube.com/feed/subscriptions', ['youtube:tab'])
|
||||||
|
|
||||||
# def test_youtube_search_matching(self):
|
def test_youtube_search_matching(self):
|
||||||
# self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
|
self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
|
||||||
# self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
|
self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
|
||||||
|
|
||||||
def test_youtube_extract(self):
|
|
||||||
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
|
|
||||||
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
|
||||||
assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
|
||||||
assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc')
|
|
||||||
assertExtractId('https://www.youtube.com/watch_popup?v=BaW_jenozKc', 'BaW_jenozKc')
|
|
||||||
assertExtractId('http://www.youtube.com/watch?v=BaW_jenozKcsharePLED17F32AD9753930', 'BaW_jenozKc')
|
|
||||||
assertExtractId('BaW_jenozKc', 'BaW_jenozKc')
|
|
||||||
|
|
||||||
def test_facebook_matching(self):
|
def test_facebook_matching(self):
|
||||||
self.assertTrue(FacebookIE.suitable('https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268'))
|
self.assertTrue(FacebookIE.suitable('https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268'))
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ from youtube_dl.compat import (
|
|||||||
from youtube_dl.utils import (
|
from youtube_dl.utils import (
|
||||||
DownloadError,
|
DownloadError,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
error_to_compat_str,
|
||||||
format_bytes,
|
format_bytes,
|
||||||
UnavailableVideoError,
|
UnavailableVideoError,
|
||||||
)
|
)
|
||||||
@@ -100,27 +101,28 @@ def generator(test_case, tname):
|
|||||||
|
|
||||||
def print_skipping(reason):
|
def print_skipping(reason):
|
||||||
print('Skipping %s: %s' % (test_case['name'], reason))
|
print('Skipping %s: %s' % (test_case['name'], reason))
|
||||||
|
self.skipTest(reason)
|
||||||
|
|
||||||
if not ie.working():
|
if not ie.working():
|
||||||
print_skipping('IE marked as not _WORKING')
|
print_skipping('IE marked as not _WORKING')
|
||||||
return
|
|
||||||
|
|
||||||
for tc in test_cases:
|
for tc in test_cases:
|
||||||
info_dict = tc.get('info_dict', {})
|
info_dict = tc.get('info_dict', {})
|
||||||
if not (info_dict.get('id') and info_dict.get('ext')):
|
if not (info_dict.get('id') and info_dict.get('ext')):
|
||||||
raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?')
|
raise Exception('Test definition (%s) requires both \'id\' and \'ext\' keys present to define the output file' % (tname, ))
|
||||||
|
|
||||||
if 'skip' in test_case:
|
if 'skip' in test_case:
|
||||||
print_skipping(test_case['skip'])
|
print_skipping(test_case['skip'])
|
||||||
return
|
|
||||||
for other_ie in other_ies:
|
for other_ie in other_ies:
|
||||||
if not other_ie.working():
|
if not other_ie.working():
|
||||||
print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
|
print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
|
||||||
return
|
|
||||||
|
|
||||||
params = get_params(test_case.get('params', {}))
|
params = get_params(test_case.get('params', {}))
|
||||||
params['outtmpl'] = tname + '_' + params['outtmpl']
|
params['outtmpl'] = tname + '_' + params['outtmpl']
|
||||||
if is_playlist and 'playlist' not in test_case:
|
if is_playlist and 'playlist' not in test_case:
|
||||||
params.setdefault('extract_flat', 'in_playlist')
|
params.setdefault('extract_flat', 'in_playlist')
|
||||||
|
params.setdefault('playlistend', test_case.get('playlist_mincount'))
|
||||||
params.setdefault('skip_download', True)
|
params.setdefault('skip_download', True)
|
||||||
|
|
||||||
ydl = YoutubeDL(params, auto_init=False)
|
ydl = YoutubeDL(params, auto_init=False)
|
||||||
@@ -160,7 +162,9 @@ def generator(test_case, tname):
|
|||||||
except (DownloadError, ExtractorError) as err:
|
except (DownloadError, ExtractorError) as err:
|
||||||
# Check if the exception is not a network related one
|
# Check if the exception is not a network related one
|
||||||
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
|
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
|
||||||
raise
|
msg = getattr(err, 'msg', error_to_compat_str(err))
|
||||||
|
err.msg = '%s (%s)' % (msg, tname, )
|
||||||
|
raise err
|
||||||
|
|
||||||
if try_num == RETRIES:
|
if try_num == RETRIES:
|
||||||
report_warning('%s failed due to network errors, skipping...' % tname)
|
report_warning('%s failed due to network errors, skipping...' % tname)
|
||||||
|
|||||||
@@ -39,6 +39,16 @@ class TestExecution(unittest.TestCase):
|
|||||||
_, stderr = p.communicate()
|
_, stderr = p.communicate()
|
||||||
self.assertFalse(stderr)
|
self.assertFalse(stderr)
|
||||||
|
|
||||||
|
def test_lazy_extractors(self):
|
||||||
|
try:
|
||||||
|
subprocess.check_call([sys.executable, 'devscripts/make_lazy_extractors.py', 'youtube_dl/extractor/lazy_extractors.py'], cwd=rootDir, stdout=_DEV_NULL)
|
||||||
|
subprocess.check_call([sys.executable, 'test/test_all_urls.py'], cwd=rootDir, stdout=_DEV_NULL)
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
os.remove('youtube_dl/extractor/lazy_extractors.py')
|
||||||
|
except (IOError, OSError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -112,6 +112,72 @@ class TestJSInterpreter(unittest.TestCase):
|
|||||||
''')
|
''')
|
||||||
self.assertEqual(jsi.call_function('z'), 5)
|
self.assertEqual(jsi.call_function('z'), 5)
|
||||||
|
|
||||||
|
def test_for_loop(self):
|
||||||
|
# function x() { a=0; for (i=0; i-10; i++) {a++} a }
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { a=0; for (i=0; i-10; i = i + 1) {a++} a }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 10)
|
||||||
|
|
||||||
|
def test_switch(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x(f) { switch(f){
|
||||||
|
case 1:f+=1;
|
||||||
|
case 2:f+=2;
|
||||||
|
case 3:f+=3;break;
|
||||||
|
case 4:f+=4;
|
||||||
|
default:f=0;
|
||||||
|
} return f }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x', 1), 7)
|
||||||
|
self.assertEqual(jsi.call_function('x', 3), 6)
|
||||||
|
self.assertEqual(jsi.call_function('x', 5), 0)
|
||||||
|
|
||||||
|
def test_switch_default(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x(f) { switch(f){
|
||||||
|
case 2: f+=2;
|
||||||
|
default: f-=1;
|
||||||
|
case 5:
|
||||||
|
case 6: f+=6;
|
||||||
|
case 0: break;
|
||||||
|
case 1: f+=1;
|
||||||
|
} return f }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x', 1), 2)
|
||||||
|
self.assertEqual(jsi.call_function('x', 5), 11)
|
||||||
|
self.assertEqual(jsi.call_function('x', 9), 14)
|
||||||
|
|
||||||
|
def test_try(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { try{return 10} catch(e){return 5} }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 10)
|
||||||
|
|
||||||
|
def test_for_loop_continue(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { a=0; for (i=0; i-10; i++) { continue; a++ } a }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 0)
|
||||||
|
|
||||||
|
def test_for_loop_break(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { a=0; for (i=0; i-10; i++) { break; a++ } a }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 0)
|
||||||
|
|
||||||
|
def test_literal_list(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { [1, 2, "asdf", [5, 6, 7]][3] }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), [5, 6, 7])
|
||||||
|
|
||||||
|
def test_comma(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { a=5; a -= 1, a+=3; return a }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 7)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -38,6 +38,9 @@ class BaseTestSubtitles(unittest.TestCase):
|
|||||||
self.DL = FakeYDL()
|
self.DL = FakeYDL()
|
||||||
self.ie = self.IE()
|
self.ie = self.IE()
|
||||||
self.DL.add_info_extractor(self.ie)
|
self.DL.add_info_extractor(self.ie)
|
||||||
|
if not self.IE.working():
|
||||||
|
print('Skipping: %s marked as not _WORKING' % self.IE.ie_key())
|
||||||
|
self.skipTest('IE marked as not _WORKING')
|
||||||
|
|
||||||
def getInfoDict(self):
|
def getInfoDict(self):
|
||||||
info_dict = self.DL.extract_info(self.url, download=False)
|
info_dict = self.DL.extract_info(self.url, download=False)
|
||||||
@@ -56,6 +59,21 @@ class BaseTestSubtitles(unittest.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
class TestYoutubeSubtitles(BaseTestSubtitles):
|
class TestYoutubeSubtitles(BaseTestSubtitles):
|
||||||
|
# Available subtitles for QRS8MkLhQmM:
|
||||||
|
# Language formats
|
||||||
|
# ru vtt, ttml, srv3, srv2, srv1, json3
|
||||||
|
# fr vtt, ttml, srv3, srv2, srv1, json3
|
||||||
|
# en vtt, ttml, srv3, srv2, srv1, json3
|
||||||
|
# nl vtt, ttml, srv3, srv2, srv1, json3
|
||||||
|
# de vtt, ttml, srv3, srv2, srv1, json3
|
||||||
|
# ko vtt, ttml, srv3, srv2, srv1, json3
|
||||||
|
# it vtt, ttml, srv3, srv2, srv1, json3
|
||||||
|
# zh-Hant vtt, ttml, srv3, srv2, srv1, json3
|
||||||
|
# hi vtt, ttml, srv3, srv2, srv1, json3
|
||||||
|
# pt-BR vtt, ttml, srv3, srv2, srv1, json3
|
||||||
|
# es-MX vtt, ttml, srv3, srv2, srv1, json3
|
||||||
|
# ja vtt, ttml, srv3, srv2, srv1, json3
|
||||||
|
# pl vtt, ttml, srv3, srv2, srv1, json3
|
||||||
url = 'QRS8MkLhQmM'
|
url = 'QRS8MkLhQmM'
|
||||||
IE = YoutubeIE
|
IE = YoutubeIE
|
||||||
|
|
||||||
@@ -64,41 +82,60 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
|
|||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(len(subtitles.keys()), 13)
|
self.assertEqual(len(subtitles.keys()), 13)
|
||||||
self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
|
self.assertEqual(md5(subtitles['en']), 'ae1bd34126571a77aabd4d276b28044d')
|
||||||
self.assertEqual(md5(subtitles['it']), '6d752b98c31f1cf8d597050c7a2cb4b5')
|
self.assertEqual(md5(subtitles['it']), '0e0b667ba68411d88fd1c5f4f4eab2f9')
|
||||||
for lang in ['fr', 'de']:
|
for lang in ['fr', 'de']:
|
||||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||||
|
|
||||||
def test_youtube_subtitles_ttml_format(self):
|
def _test_subtitles_format(self, fmt, md5_hash, lang='en'):
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['subtitlesformat'] = 'ttml'
|
self.DL.params['subtitlesformat'] = fmt
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(md5(subtitles['en']), 'e306f8c42842f723447d9f63ad65df54')
|
self.assertEqual(md5(subtitles[lang]), md5_hash)
|
||||||
|
|
||||||
|
def test_youtube_subtitles_ttml_format(self):
|
||||||
|
self._test_subtitles_format('ttml', 'c97ddf1217390906fa9fbd34901f3da2')
|
||||||
|
|
||||||
def test_youtube_subtitles_vtt_format(self):
|
def test_youtube_subtitles_vtt_format(self):
|
||||||
self.DL.params['writesubtitles'] = True
|
self._test_subtitles_format('vtt', 'ae1bd34126571a77aabd4d276b28044d')
|
||||||
self.DL.params['subtitlesformat'] = 'vtt'
|
|
||||||
|
def test_youtube_subtitles_json3_format(self):
|
||||||
|
self._test_subtitles_format('json3', '688dd1ce0981683867e7fe6fde2a224b')
|
||||||
|
|
||||||
|
def _test_automatic_captions(self, url, lang):
|
||||||
|
self.url = url
|
||||||
|
self.DL.params['writeautomaticsub'] = True
|
||||||
|
self.DL.params['subtitleslangs'] = [lang]
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
|
self.assertTrue(subtitles[lang] is not None)
|
||||||
|
|
||||||
def test_youtube_automatic_captions(self):
|
def test_youtube_automatic_captions(self):
|
||||||
self.url = '8YoUxe5ncPo'
|
# Available automatic captions for 8YoUxe5ncPo:
|
||||||
self.DL.params['writeautomaticsub'] = True
|
# Language formats (all in vtt, ttml, srv3, srv2, srv1, json3)
|
||||||
self.DL.params['subtitleslangs'] = ['it']
|
# gu, zh-Hans, zh-Hant, gd, ga, gl, lb, la, lo, tt, tr,
|
||||||
subtitles = self.getSubtitles()
|
# lv, lt, tk, th, tg, te, fil, haw, yi, ceb, yo, de, da,
|
||||||
self.assertTrue(subtitles['it'] is not None)
|
# el, eo, en, eu, et, es, ru, rw, ro, bn, be, bg, uk, jv,
|
||||||
|
# bs, ja, or, xh, co, ca, cy, cs, ps, pt, pa, vi, pl, hy,
|
||||||
|
# hr, ht, hu, hmn, hi, ha, mg, uz, ml, mn, mi, mk, ur,
|
||||||
|
# mt, ms, mr, ug, ta, my, af, sw, is, am,
|
||||||
|
# *it*, iw, sv, ar,
|
||||||
|
# su, zu, az, id, ig, nl, no, ne, ny, fr, ku, fy, fa, fi,
|
||||||
|
# ka, kk, sr, sq, ko, kn, km, st, sk, si, so, sn, sm, sl,
|
||||||
|
# ky, sd
|
||||||
|
# ...
|
||||||
|
self._test_automatic_captions('8YoUxe5ncPo', 'it')
|
||||||
|
|
||||||
|
@unittest.skip('ASR subs all in all supported langs now')
|
||||||
def test_youtube_translated_subtitles(self):
|
def test_youtube_translated_subtitles(self):
|
||||||
# This video has a subtitles track, which can be translated
|
# This video has a subtitles track, which can be translated (#4555)
|
||||||
self.url = 'Ky9eprVWzlI'
|
self._test_automatic_captions('Ky9eprVWzlI', 'it')
|
||||||
self.DL.params['writeautomaticsub'] = True
|
|
||||||
self.DL.params['subtitleslangs'] = ['it']
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertTrue(subtitles['it'] is not None)
|
|
||||||
|
|
||||||
def test_youtube_nosubtitles(self):
|
def test_youtube_nosubtitles(self):
|
||||||
self.DL.expect_warning('video doesn\'t have subtitles')
|
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||||
self.url = 'n5BB19UTcdA'
|
# Available automatic captions for 8YoUxe5ncPo:
|
||||||
|
# ...
|
||||||
|
# 8YoUxe5ncPo has no subtitles
|
||||||
|
self.url = '8YoUxe5ncPo'
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
@@ -128,6 +165,7 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
|
|||||||
self.assertFalse(subtitles)
|
self.assertFalse(subtitles)
|
||||||
|
|
||||||
|
|
||||||
|
@unittest.skip('IE broken')
|
||||||
class TestTedSubtitles(BaseTestSubtitles):
|
class TestTedSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
|
url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
|
||||||
IE = TEDIE
|
IE = TEDIE
|
||||||
@@ -152,18 +190,19 @@ class TestVimeoSubtitles(BaseTestSubtitles):
|
|||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
|
self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
|
||||||
self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888')
|
self.assertEqual(md5(subtitles['en']), '386cbc9320b94e25cb364b97935e5dd1')
|
||||||
self.assertEqual(md5(subtitles['fr']), 'b6191146a6c5d3a452244d853fde6dc8')
|
self.assertEqual(md5(subtitles['fr']), 'c9b69eef35bc6641c0d4da8a04f9dfac')
|
||||||
|
|
||||||
def test_nosubtitles(self):
|
def test_nosubtitles(self):
|
||||||
self.DL.expect_warning('video doesn\'t have subtitles')
|
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||||
self.url = 'http://vimeo.com/56015672'
|
self.url = 'http://vimeo.com/68093876'
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertFalse(subtitles)
|
self.assertFalse(subtitles)
|
||||||
|
|
||||||
|
|
||||||
|
@unittest.skip('IE broken')
|
||||||
class TestWallaSubtitles(BaseTestSubtitles):
|
class TestWallaSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
|
url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
|
||||||
IE = WallaIE
|
IE = WallaIE
|
||||||
@@ -185,6 +224,7 @@ class TestWallaSubtitles(BaseTestSubtitles):
|
|||||||
self.assertFalse(subtitles)
|
self.assertFalse(subtitles)
|
||||||
|
|
||||||
|
|
||||||
|
@unittest.skip('IE broken')
|
||||||
class TestCeskaTelevizeSubtitles(BaseTestSubtitles):
|
class TestCeskaTelevizeSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://www.ceskatelevize.cz/ivysilani/10600540290-u6-uzasny-svet-techniky'
|
url = 'http://www.ceskatelevize.cz/ivysilani/10600540290-u6-uzasny-svet-techniky'
|
||||||
IE = CeskaTelevizeIE
|
IE = CeskaTelevizeIE
|
||||||
@@ -206,6 +246,7 @@ class TestCeskaTelevizeSubtitles(BaseTestSubtitles):
|
|||||||
self.assertFalse(subtitles)
|
self.assertFalse(subtitles)
|
||||||
|
|
||||||
|
|
||||||
|
@unittest.skip('IE broken')
|
||||||
class TestLyndaSubtitles(BaseTestSubtitles):
|
class TestLyndaSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html'
|
url = 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html'
|
||||||
IE = LyndaIE
|
IE = LyndaIE
|
||||||
@@ -218,6 +259,7 @@ class TestLyndaSubtitles(BaseTestSubtitles):
|
|||||||
self.assertEqual(md5(subtitles['en']), '09bbe67222259bed60deaa26997d73a7')
|
self.assertEqual(md5(subtitles['en']), '09bbe67222259bed60deaa26997d73a7')
|
||||||
|
|
||||||
|
|
||||||
|
@unittest.skip('IE broken')
|
||||||
class TestNPOSubtitles(BaseTestSubtitles):
|
class TestNPOSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://www.npo.nl/nos-journaal/28-08-2014/POW_00722860'
|
url = 'http://www.npo.nl/nos-journaal/28-08-2014/POW_00722860'
|
||||||
IE = NPOIE
|
IE = NPOIE
|
||||||
@@ -230,6 +272,7 @@ class TestNPOSubtitles(BaseTestSubtitles):
|
|||||||
self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4')
|
self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4')
|
||||||
|
|
||||||
|
|
||||||
|
@unittest.skip('IE broken')
|
||||||
class TestMTVSubtitles(BaseTestSubtitles):
|
class TestMTVSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://www.cc.com/video-clips/p63lk0/adam-devine-s-house-party-chasing-white-swans'
|
url = 'http://www.cc.com/video-clips/p63lk0/adam-devine-s-house-party-chasing-white-swans'
|
||||||
IE = ComedyCentralIE
|
IE = ComedyCentralIE
|
||||||
@@ -253,22 +296,31 @@ class TestNRKSubtitles(BaseTestSubtitles):
|
|||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(set(subtitles.keys()), set(['no']))
|
self.assertEqual(set(subtitles.keys()), set(['nb-ttv']))
|
||||||
self.assertEqual(md5(subtitles['no']), '544fa917d3197fcbee64634559221cc2')
|
self.assertEqual(md5(subtitles['nb-ttv']), '67e06ff02d0deaf975e68f6cb8f6a149')
|
||||||
|
|
||||||
|
|
||||||
class TestRaiPlaySubtitles(BaseTestSubtitles):
|
class TestRaiPlaySubtitles(BaseTestSubtitles):
|
||||||
url = 'http://www.raiplay.it/video/2014/04/Report-del-07042014-cb27157f-9dd0-4aee-b788-b1f67643a391.html'
|
|
||||||
IE = RaiPlayIE
|
IE = RaiPlayIE
|
||||||
|
|
||||||
def test_allsubtitles(self):
|
def test_subtitles_key(self):
|
||||||
|
self.url = 'http://www.raiplay.it/video/2014/04/Report-del-07042014-cb27157f-9dd0-4aee-b788-b1f67643a391.html'
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(set(subtitles.keys()), set(['it']))
|
self.assertEqual(set(subtitles.keys()), set(['it']))
|
||||||
self.assertEqual(md5(subtitles['it']), 'b1d90a98755126b61e667567a1f6680a')
|
self.assertEqual(md5(subtitles['it']), 'b1d90a98755126b61e667567a1f6680a')
|
||||||
|
|
||||||
|
def test_subtitles_array_key(self):
|
||||||
|
self.url = 'https://www.raiplay.it/video/2020/12/Report---04-01-2021-2e90f1de-8eee-4de4-ac0e-78d21db5b600.html'
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['it']))
|
||||||
|
self.assertEqual(md5(subtitles['it']), '4b3264186fbb103508abe5311cfcb9cd')
|
||||||
|
|
||||||
|
|
||||||
|
@unittest.skip('IE broken - DRM only')
|
||||||
class TestVikiSubtitles(BaseTestSubtitles):
|
class TestVikiSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://www.viki.com/videos/1060846v-punch-episode-18'
|
url = 'http://www.viki.com/videos/1060846v-punch-episode-18'
|
||||||
IE = VikiIE
|
IE = VikiIE
|
||||||
@@ -295,6 +347,7 @@ class TestThePlatformSubtitles(BaseTestSubtitles):
|
|||||||
self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
|
self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
|
||||||
|
|
||||||
|
|
||||||
|
@unittest.skip('IE broken')
|
||||||
class TestThePlatformFeedSubtitles(BaseTestSubtitles):
|
class TestThePlatformFeedSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207'
|
url = 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207'
|
||||||
IE = ThePlatformFeedIE
|
IE = ThePlatformFeedIE
|
||||||
@@ -330,7 +383,7 @@ class TestDemocracynowSubtitles(BaseTestSubtitles):
|
|||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||||
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
|
self.assertEqual(md5(subtitles['en']), 'a3cc4c0b5eadd74d9974f1c1f5101045')
|
||||||
|
|
||||||
def test_subtitles_in_page(self):
|
def test_subtitles_in_page(self):
|
||||||
self.url = 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree'
|
self.url = 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree'
|
||||||
@@ -338,7 +391,7 @@ class TestDemocracynowSubtitles(BaseTestSubtitles):
|
|||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||||
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
|
self.assertEqual(md5(subtitles['en']), 'a3cc4c0b5eadd74d9974f1c1f5101045')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ from youtube_dl.utils import (
|
|||||||
encode_base_n,
|
encode_base_n,
|
||||||
caesar,
|
caesar,
|
||||||
clean_html,
|
clean_html,
|
||||||
|
clean_podcast_url,
|
||||||
date_from_str,
|
date_from_str,
|
||||||
DateRange,
|
DateRange,
|
||||||
detect_exe_version,
|
detect_exe_version,
|
||||||
@@ -554,6 +555,11 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(url_or_none('http$://foo.de'), None)
|
self.assertEqual(url_or_none('http$://foo.de'), None)
|
||||||
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
|
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
|
||||||
self.assertEqual(url_or_none('//foo.de'), '//foo.de')
|
self.assertEqual(url_or_none('//foo.de'), '//foo.de')
|
||||||
|
self.assertEqual(url_or_none('s3://foo.de'), None)
|
||||||
|
self.assertEqual(url_or_none('rtmpte://foo.de'), 'rtmpte://foo.de')
|
||||||
|
self.assertEqual(url_or_none('mms://foo.de'), 'mms://foo.de')
|
||||||
|
self.assertEqual(url_or_none('rtspu://foo.de'), 'rtspu://foo.de')
|
||||||
|
self.assertEqual(url_or_none('ftps://foo.de'), 'ftps://foo.de')
|
||||||
|
|
||||||
def test_parse_age_limit(self):
|
def test_parse_age_limit(self):
|
||||||
self.assertEqual(parse_age_limit(None), None)
|
self.assertEqual(parse_age_limit(None), None)
|
||||||
@@ -1465,6 +1471,10 @@ Line 1
|
|||||||
self.assertEqual(get_elements_by_attribute('class', 'foo', html), [])
|
self.assertEqual(get_elements_by_attribute('class', 'foo', html), [])
|
||||||
self.assertEqual(get_elements_by_attribute('class', 'no-such-foo', html), [])
|
self.assertEqual(get_elements_by_attribute('class', 'no-such-foo', html), [])
|
||||||
|
|
||||||
|
def test_clean_podcast_url(self):
|
||||||
|
self.assertEqual(clean_podcast_url('https://www.podtrac.com/pts/redirect.mp3/chtbl.com/track/5899E/traffic.megaphone.fm/HSW7835899191.mp3'), 'https://traffic.megaphone.fm/HSW7835899191.mp3')
|
||||||
|
self.assertEqual(clean_podcast_url('https://play.podtrac.com/npr-344098539/edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3'), 'https://edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -1,275 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# coding: utf-8
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
# Allow direct execution
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import unittest
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
||||||
|
|
||||||
from test.helper import expect_value
|
|
||||||
from youtube_dl.extractor import YoutubeIE
|
|
||||||
|
|
||||||
|
|
||||||
class TestYoutubeChapters(unittest.TestCase):
|
|
||||||
|
|
||||||
_TEST_CASES = [
|
|
||||||
(
|
|
||||||
# https://www.youtube.com/watch?v=A22oy8dFjqc
|
|
||||||
# pattern: 00:00 - <title>
|
|
||||||
'''This is the absolute ULTIMATE experience of Queen's set at LIVE AID, this is the best video mixed to the absolutely superior stereo radio broadcast. This vastly superior audio mix takes a huge dump on all of the official mixes. Best viewed in 1080p. ENJOY! ***MAKE SURE TO READ THE DESCRIPTION***<br /><a href="#" onclick="yt.www.watch.player.seekTo(00*60+36);return false;">00:36</a> - Bohemian Rhapsody<br /><a href="#" onclick="yt.www.watch.player.seekTo(02*60+42);return false;">02:42</a> - Radio Ga Ga<br /><a href="#" onclick="yt.www.watch.player.seekTo(06*60+53);return false;">06:53</a> - Ay Oh!<br /><a href="#" onclick="yt.www.watch.player.seekTo(07*60+34);return false;">07:34</a> - Hammer To Fall<br /><a href="#" onclick="yt.www.watch.player.seekTo(12*60+08);return false;">12:08</a> - Crazy Little Thing Called Love<br /><a href="#" onclick="yt.www.watch.player.seekTo(16*60+03);return false;">16:03</a> - We Will Rock You<br /><a href="#" onclick="yt.www.watch.player.seekTo(17*60+18);return false;">17:18</a> - We Are The Champions<br /><a href="#" onclick="yt.www.watch.player.seekTo(21*60+12);return false;">21:12</a> - Is This The World We Created...?<br /><br />Short song analysis:<br /><br />- "Bohemian Rhapsody": Although it's a short medley version, it's one of the best performances of the ballad section, with Freddie nailing the Bb4s with the correct studio phrasing (for the first time ever!).<br /><br />- "Radio Ga Ga": Although it's missing one chorus, this is one of - if not the best - the best versions ever, Freddie nails all the Bb4s and sounds very clean! Spike Edney's Roland Jupiter 8 also really shines through on this mix, compared to the DVD releases!<br /><br />- "Audience Improv": A great improv, Freddie sounds strong and confident. You gotta love when he sustains that A4 for 4 seconds!<br /><br />- "Hammer To Fall": Despite missing a verse and a chorus, it's a strong version (possibly the best ever). Freddie sings the song amazingly, and even ad-libs a C#5 and a C5! Also notice how heavy Brian's guitar sounds compared to the thin DVD mixes - it roars!<br /><br />- "Crazy Little Thing Called Love": A great version, the crowd loves the song, the jam is great as well! Only downside to this is the slight feedback issues.<br /><br />- "We Will Rock You": Although cut down to the 1st verse and chorus, Freddie sounds strong. He nails the A4, and the solo from Dr. May is brilliant!<br /><br />- "We Are the Champions": Perhaps the high-light of the performance - Freddie is very daring on this version, he sustains the pre-chorus Bb4s, nails the 1st C5, belts great A4s, but most importantly: He nails the chorus Bb4s, in all 3 choruses! This is the only time he has ever done so! It has to be said though, the last one sounds a bit rough, but that's a side effect of belting high notes for the past 18 minutes, with nodules AND laryngitis!<br /><br />- "Is This The World We Created... ?": Freddie and Brian perform a beautiful version of this, and it is one of the best versions ever. It's both sad and hilarious that a couple of BBC engineers are talking over the song, one of them being completely oblivious of the fact that he is interrupting the performance, on live television... Which was being televised to almost 2 billion homes.<br /><br /><br />All rights go to their respective owners!<br />-----Copyright Disclaimer Under Section 107 of the Copyright Act 1976, allowance is made for fair use for purposes such as criticism, comment, news reporting, teaching, scholarship, and research. Fair use is a use permitted by copyright statute that might otherwise be infringing. Non-profit, educational or personal use tips the balance in favor of fair use''',
|
|
||||||
1477,
|
|
||||||
[{
|
|
||||||
'start_time': 36,
|
|
||||||
'end_time': 162,
|
|
||||||
'title': 'Bohemian Rhapsody',
|
|
||||||
}, {
|
|
||||||
'start_time': 162,
|
|
||||||
'end_time': 413,
|
|
||||||
'title': 'Radio Ga Ga',
|
|
||||||
}, {
|
|
||||||
'start_time': 413,
|
|
||||||
'end_time': 454,
|
|
||||||
'title': 'Ay Oh!',
|
|
||||||
}, {
|
|
||||||
'start_time': 454,
|
|
||||||
'end_time': 728,
|
|
||||||
'title': 'Hammer To Fall',
|
|
||||||
}, {
|
|
||||||
'start_time': 728,
|
|
||||||
'end_time': 963,
|
|
||||||
'title': 'Crazy Little Thing Called Love',
|
|
||||||
}, {
|
|
||||||
'start_time': 963,
|
|
||||||
'end_time': 1038,
|
|
||||||
'title': 'We Will Rock You',
|
|
||||||
}, {
|
|
||||||
'start_time': 1038,
|
|
||||||
'end_time': 1272,
|
|
||||||
'title': 'We Are The Champions',
|
|
||||||
}, {
|
|
||||||
'start_time': 1272,
|
|
||||||
'end_time': 1477,
|
|
||||||
'title': 'Is This The World We Created...?',
|
|
||||||
}]
|
|
||||||
),
|
|
||||||
(
|
|
||||||
# https://www.youtube.com/watch?v=ekYlRhALiRQ
|
|
||||||
# pattern: <num>. <title> 0:00
|
|
||||||
'1. Those Beaten Paths of Confusion <a href="#" onclick="yt.www.watch.player.seekTo(0*60+00);return false;">0:00</a><br />2. Beyond the Shadows of Emptiness & Nothingness <a href="#" onclick="yt.www.watch.player.seekTo(11*60+47);return false;">11:47</a><br />3. Poison Yourself...With Thought <a href="#" onclick="yt.www.watch.player.seekTo(26*60+30);return false;">26:30</a><br />4. The Agents of Transformation <a href="#" onclick="yt.www.watch.player.seekTo(35*60+57);return false;">35:57</a><br />5. Drowning in the Pain of Consciousness <a href="#" onclick="yt.www.watch.player.seekTo(44*60+32);return false;">44:32</a><br />6. Deny the Disease of Life <a href="#" onclick="yt.www.watch.player.seekTo(53*60+07);return false;">53:07</a><br /><br />More info/Buy: http://crepusculonegro.storenvy.com/products/257645-cn-03-arizmenda-within-the-vacuum-of-infinity<br /><br />No copyright is intended. The rights to this video are assumed by the owner and its affiliates.',
|
|
||||||
4009,
|
|
||||||
[{
|
|
||||||
'start_time': 0,
|
|
||||||
'end_time': 707,
|
|
||||||
'title': '1. Those Beaten Paths of Confusion',
|
|
||||||
}, {
|
|
||||||
'start_time': 707,
|
|
||||||
'end_time': 1590,
|
|
||||||
'title': '2. Beyond the Shadows of Emptiness & Nothingness',
|
|
||||||
}, {
|
|
||||||
'start_time': 1590,
|
|
||||||
'end_time': 2157,
|
|
||||||
'title': '3. Poison Yourself...With Thought',
|
|
||||||
}, {
|
|
||||||
'start_time': 2157,
|
|
||||||
'end_time': 2672,
|
|
||||||
'title': '4. The Agents of Transformation',
|
|
||||||
}, {
|
|
||||||
'start_time': 2672,
|
|
||||||
'end_time': 3187,
|
|
||||||
'title': '5. Drowning in the Pain of Consciousness',
|
|
||||||
}, {
|
|
||||||
'start_time': 3187,
|
|
||||||
'end_time': 4009,
|
|
||||||
'title': '6. Deny the Disease of Life',
|
|
||||||
}]
|
|
||||||
),
|
|
||||||
(
|
|
||||||
# https://www.youtube.com/watch?v=WjL4pSzog9w
|
|
||||||
# pattern: 00:00 <title>
|
|
||||||
'<a href="https://arizmenda.bandcamp.com/merch/despairs-depths-descended-cd" class="yt-uix-servicelink " data-target-new-window="True" data-servicelink="CDAQ6TgYACITCNf1raqT2dMCFdRjGAod_o0CBSj4HQ" data-url="https://arizmenda.bandcamp.com/merch/despairs-depths-descended-cd" rel="nofollow noopener" target="_blank">https://arizmenda.bandcamp.com/merch/...</a><br /><br /><a href="#" onclick="yt.www.watch.player.seekTo(00*60+00);return false;">00:00</a> Christening Unborn Deformities <br /><a href="#" onclick="yt.www.watch.player.seekTo(07*60+08);return false;">07:08</a> Taste of Purity<br /><a href="#" onclick="yt.www.watch.player.seekTo(16*60+16);return false;">16:16</a> Sculpting Sins of a Universal Tongue<br /><a href="#" onclick="yt.www.watch.player.seekTo(24*60+45);return false;">24:45</a> Birth<br /><a href="#" onclick="yt.www.watch.player.seekTo(31*60+24);return false;">31:24</a> Neves<br /><a href="#" onclick="yt.www.watch.player.seekTo(37*60+55);return false;">37:55</a> Libations in Limbo',
|
|
||||||
2705,
|
|
||||||
[{
|
|
||||||
'start_time': 0,
|
|
||||||
'end_time': 428,
|
|
||||||
'title': 'Christening Unborn Deformities',
|
|
||||||
}, {
|
|
||||||
'start_time': 428,
|
|
||||||
'end_time': 976,
|
|
||||||
'title': 'Taste of Purity',
|
|
||||||
}, {
|
|
||||||
'start_time': 976,
|
|
||||||
'end_time': 1485,
|
|
||||||
'title': 'Sculpting Sins of a Universal Tongue',
|
|
||||||
}, {
|
|
||||||
'start_time': 1485,
|
|
||||||
'end_time': 1884,
|
|
||||||
'title': 'Birth',
|
|
||||||
}, {
|
|
||||||
'start_time': 1884,
|
|
||||||
'end_time': 2275,
|
|
||||||
'title': 'Neves',
|
|
||||||
}, {
|
|
||||||
'start_time': 2275,
|
|
||||||
'end_time': 2705,
|
|
||||||
'title': 'Libations in Limbo',
|
|
||||||
}]
|
|
||||||
),
|
|
||||||
(
|
|
||||||
# https://www.youtube.com/watch?v=o3r1sn-t3is
|
|
||||||
# pattern: <title> 00:00 <note>
|
|
||||||
'Download this show in MP3: <a href="http://sh.st/njZKK" class="yt-uix-servicelink " data-url="http://sh.st/njZKK" data-target-new-window="True" data-servicelink="CDAQ6TgYACITCK3j8_6o2dMCFVDCGAoduVAKKij4HQ" rel="nofollow noopener" target="_blank">http://sh.st/njZKK</a><br /><br />Setlist:<br />I-E-A-I-A-I-O <a href="#" onclick="yt.www.watch.player.seekTo(00*60+45);return false;">00:45</a><br />Suite-Pee <a href="#" onclick="yt.www.watch.player.seekTo(4*60+26);return false;">4:26</a> (Incomplete)<br />Attack <a href="#" onclick="yt.www.watch.player.seekTo(5*60+31);return false;">5:31</a> (First live performance since 2011)<br />Prison Song <a href="#" onclick="yt.www.watch.player.seekTo(8*60+42);return false;">8:42</a><br />Know <a href="#" onclick="yt.www.watch.player.seekTo(12*60+32);return false;">12:32</a> (First live performance since 2011)<br />Aerials <a href="#" onclick="yt.www.watch.player.seekTo(15*60+32);return false;">15:32</a><br />Soldier Side - Intro <a href="#" onclick="yt.www.watch.player.seekTo(19*60+13);return false;">19:13</a><br />B.Y.O.B. <a href="#" onclick="yt.www.watch.player.seekTo(20*60+09);return false;">20:09</a><br />Soil <a href="#" onclick="yt.www.watch.player.seekTo(24*60+32);return false;">24:32</a><br />Darts <a href="#" onclick="yt.www.watch.player.seekTo(27*60+48);return false;">27:48</a><br />Radio/Video <a href="#" onclick="yt.www.watch.player.seekTo(30*60+38);return false;">30:38</a><br />Hypnotize <a href="#" onclick="yt.www.watch.player.seekTo(35*60+05);return false;">35:05</a><br />Temper <a href="#" onclick="yt.www.watch.player.seekTo(38*60+08);return false;">38:08</a> (First live performance since 1999)<br />CUBErt <a href="#" onclick="yt.www.watch.player.seekTo(41*60+00);return false;">41:00</a><br />Needles <a href="#" onclick="yt.www.watch.player.seekTo(42*60+57);return false;">42:57</a><br />Deer Dance <a href="#" onclick="yt.www.watch.player.seekTo(46*60+27);return false;">46:27</a><br />Bounce <a href="#" onclick="yt.www.watch.player.seekTo(49*60+38);return false;">49:38</a><br />Suggestions <a href="#" onclick="yt.www.watch.player.seekTo(51*60+25);return false;">51:25</a><br />Psycho <a href="#" onclick="yt.www.watch.player.seekTo(53*60+52);return false;">53:52</a><br />Chop Suey! <a href="#" onclick="yt.www.watch.player.seekTo(58*60+13);return false;">58:13</a><br />Lonely Day <a href="#" onclick="yt.www.watch.player.seekTo(1*3600+01*60+15);return false;">1:01:15</a><br />Question! <a href="#" onclick="yt.www.watch.player.seekTo(1*3600+04*60+14);return false;">1:04:14</a><br />Lost in Hollywood <a href="#" onclick="yt.www.watch.player.seekTo(1*3600+08*60+10);return false;">1:08:10</a><br />Vicinity of Obscenity <a href="#" onclick="yt.www.watch.player.seekTo(1*3600+13*60+40);return false;">1:13:40</a>(First live performance since 2012)<br />Forest <a href="#" onclick="yt.www.watch.player.seekTo(1*3600+16*60+17);return false;">1:16:17</a><br />Cigaro <a href="#" onclick="yt.www.watch.player.seekTo(1*3600+20*60+02);return false;">1:20:02</a><br />Toxicity <a href="#" onclick="yt.www.watch.player.seekTo(1*3600+23*60+57);return false;">1:23:57</a>(with Chino Moreno)<br />Sugar <a href="#" onclick="yt.www.watch.player.seekTo(1*3600+27*60+53);return false;">1:27:53</a>',
|
|
||||||
5640,
|
|
||||||
[{
|
|
||||||
'start_time': 45,
|
|
||||||
'end_time': 266,
|
|
||||||
'title': 'I-E-A-I-A-I-O',
|
|
||||||
}, {
|
|
||||||
'start_time': 266,
|
|
||||||
'end_time': 331,
|
|
||||||
'title': 'Suite-Pee (Incomplete)',
|
|
||||||
}, {
|
|
||||||
'start_time': 331,
|
|
||||||
'end_time': 522,
|
|
||||||
'title': 'Attack (First live performance since 2011)',
|
|
||||||
}, {
|
|
||||||
'start_time': 522,
|
|
||||||
'end_time': 752,
|
|
||||||
'title': 'Prison Song',
|
|
||||||
}, {
|
|
||||||
'start_time': 752,
|
|
||||||
'end_time': 932,
|
|
||||||
'title': 'Know (First live performance since 2011)',
|
|
||||||
}, {
|
|
||||||
'start_time': 932,
|
|
||||||
'end_time': 1153,
|
|
||||||
'title': 'Aerials',
|
|
||||||
}, {
|
|
||||||
'start_time': 1153,
|
|
||||||
'end_time': 1209,
|
|
||||||
'title': 'Soldier Side - Intro',
|
|
||||||
}, {
|
|
||||||
'start_time': 1209,
|
|
||||||
'end_time': 1472,
|
|
||||||
'title': 'B.Y.O.B.',
|
|
||||||
}, {
|
|
||||||
'start_time': 1472,
|
|
||||||
'end_time': 1668,
|
|
||||||
'title': 'Soil',
|
|
||||||
}, {
|
|
||||||
'start_time': 1668,
|
|
||||||
'end_time': 1838,
|
|
||||||
'title': 'Darts',
|
|
||||||
}, {
|
|
||||||
'start_time': 1838,
|
|
||||||
'end_time': 2105,
|
|
||||||
'title': 'Radio/Video',
|
|
||||||
}, {
|
|
||||||
'start_time': 2105,
|
|
||||||
'end_time': 2288,
|
|
||||||
'title': 'Hypnotize',
|
|
||||||
}, {
|
|
||||||
'start_time': 2288,
|
|
||||||
'end_time': 2460,
|
|
||||||
'title': 'Temper (First live performance since 1999)',
|
|
||||||
}, {
|
|
||||||
'start_time': 2460,
|
|
||||||
'end_time': 2577,
|
|
||||||
'title': 'CUBErt',
|
|
||||||
}, {
|
|
||||||
'start_time': 2577,
|
|
||||||
'end_time': 2787,
|
|
||||||
'title': 'Needles',
|
|
||||||
}, {
|
|
||||||
'start_time': 2787,
|
|
||||||
'end_time': 2978,
|
|
||||||
'title': 'Deer Dance',
|
|
||||||
}, {
|
|
||||||
'start_time': 2978,
|
|
||||||
'end_time': 3085,
|
|
||||||
'title': 'Bounce',
|
|
||||||
}, {
|
|
||||||
'start_time': 3085,
|
|
||||||
'end_time': 3232,
|
|
||||||
'title': 'Suggestions',
|
|
||||||
}, {
|
|
||||||
'start_time': 3232,
|
|
||||||
'end_time': 3493,
|
|
||||||
'title': 'Psycho',
|
|
||||||
}, {
|
|
||||||
'start_time': 3493,
|
|
||||||
'end_time': 3675,
|
|
||||||
'title': 'Chop Suey!',
|
|
||||||
}, {
|
|
||||||
'start_time': 3675,
|
|
||||||
'end_time': 3854,
|
|
||||||
'title': 'Lonely Day',
|
|
||||||
}, {
|
|
||||||
'start_time': 3854,
|
|
||||||
'end_time': 4090,
|
|
||||||
'title': 'Question!',
|
|
||||||
}, {
|
|
||||||
'start_time': 4090,
|
|
||||||
'end_time': 4420,
|
|
||||||
'title': 'Lost in Hollywood',
|
|
||||||
}, {
|
|
||||||
'start_time': 4420,
|
|
||||||
'end_time': 4577,
|
|
||||||
'title': 'Vicinity of Obscenity (First live performance since 2012)',
|
|
||||||
}, {
|
|
||||||
'start_time': 4577,
|
|
||||||
'end_time': 4802,
|
|
||||||
'title': 'Forest',
|
|
||||||
}, {
|
|
||||||
'start_time': 4802,
|
|
||||||
'end_time': 5037,
|
|
||||||
'title': 'Cigaro',
|
|
||||||
}, {
|
|
||||||
'start_time': 5037,
|
|
||||||
'end_time': 5273,
|
|
||||||
'title': 'Toxicity (with Chino Moreno)',
|
|
||||||
}, {
|
|
||||||
'start_time': 5273,
|
|
||||||
'end_time': 5640,
|
|
||||||
'title': 'Sugar',
|
|
||||||
}]
|
|
||||||
),
|
|
||||||
(
|
|
||||||
# https://www.youtube.com/watch?v=PkYLQbsqCE8
|
|
||||||
# pattern: <num> - <title> [<latinized title>] 0:00:00
|
|
||||||
'''Затемно (Zatemno) is an Obscure Black Metal Band from Russia.<br /><br />"Во прах (Vo prakh)'' Into The Ashes", Debut mini-album released may 6, 2016, by Death Knell Productions<br />Released on 6 panel digipak CD, limited to 100 copies only<br />And digital format on Bandcamp<br /><br />Tracklist<br /><br />1 - Во прах [Vo prakh] <a href="#" onclick="yt.www.watch.player.seekTo(0*3600+00*60+00);return false;">0:00:00</a><br />2 - Искупление [Iskupleniye] <a href="#" onclick="yt.www.watch.player.seekTo(0*3600+08*60+10);return false;">0:08:10</a><br />3 - Из серпов луны...[Iz serpov luny] <a href="#" onclick="yt.www.watch.player.seekTo(0*3600+14*60+30);return false;">0:14:30</a><br /><br />Links:<br /><a href="https://deathknellprod.bandcamp.com/album/--2" class="yt-uix-servicelink " data-target-new-window="True" data-url="https://deathknellprod.bandcamp.com/album/--2" data-servicelink="CC8Q6TgYACITCNP234Kr2dMCFcNxGAodQqsIwSj4HQ" target="_blank" rel="nofollow noopener">https://deathknellprod.bandcamp.com/a...</a><br /><a href="https://www.facebook.com/DeathKnellProd/" class="yt-uix-servicelink " data-target-new-window="True" data-url="https://www.facebook.com/DeathKnellProd/" data-servicelink="CC8Q6TgYACITCNP234Kr2dMCFcNxGAodQqsIwSj4HQ" target="_blank" rel="nofollow noopener">https://www.facebook.com/DeathKnellProd/</a><br /><br /><br />I don't have any right about this artifact, my only intention is to spread the music of the band, all rights are reserved to the Затемно (Zatemno) and his producers, Death Knell Productions.<br /><br />------------------------------------------------------------------<br /><br />Subscribe for more videos like this.<br />My link: <a href="https://web.facebook.com/AttackOfTheDragons" class="yt-uix-servicelink " data-target-new-window="True" data-url="https://web.facebook.com/AttackOfTheDragons" data-servicelink="CC8Q6TgYACITCNP234Kr2dMCFcNxGAodQqsIwSj4HQ" target="_blank" rel="nofollow noopener">https://web.facebook.com/AttackOfTheD...</a>''',
|
|
||||||
1138,
|
|
||||||
[{
|
|
||||||
'start_time': 0,
|
|
||||||
'end_time': 490,
|
|
||||||
'title': '1 - Во прах [Vo prakh]',
|
|
||||||
}, {
|
|
||||||
'start_time': 490,
|
|
||||||
'end_time': 870,
|
|
||||||
'title': '2 - Искупление [Iskupleniye]',
|
|
||||||
}, {
|
|
||||||
'start_time': 870,
|
|
||||||
'end_time': 1138,
|
|
||||||
'title': '3 - Из серпов луны...[Iz serpov luny]',
|
|
||||||
}]
|
|
||||||
),
|
|
||||||
(
|
|
||||||
# https://www.youtube.com/watch?v=xZW70zEasOk
|
|
||||||
# time point more than duration
|
|
||||||
'''● LCS Spring finals: Saturday and Sunday from <a href="#" onclick="yt.www.watch.player.seekTo(13*60+30);return false;">13:30</a> outside the venue! <br />● PAX East: Fri, Sat & Sun - more info in tomorrows video on the main channel!''',
|
|
||||||
283,
|
|
||||||
[]
|
|
||||||
),
|
|
||||||
]
|
|
||||||
|
|
||||||
def test_youtube_chapters(self):
|
|
||||||
for description, duration, expected_chapters in self._TEST_CASES:
|
|
||||||
ie = YoutubeIE()
|
|
||||||
expect_value(
|
|
||||||
self, ie._extract_chapters_from_description(description, duration),
|
|
||||||
expected_chapters, None)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
@@ -9,10 +10,10 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|||||||
|
|
||||||
from test.helper import FakeYDL
|
from test.helper import FakeYDL
|
||||||
|
|
||||||
|
|
||||||
from youtube_dl.extractor import (
|
from youtube_dl.extractor import (
|
||||||
YoutubePlaylistIE,
|
|
||||||
YoutubeIE,
|
YoutubeIE,
|
||||||
|
YoutubePlaylistIE,
|
||||||
|
YoutubeTabIE,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -24,47 +25,40 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
def test_youtube_playlist_noplaylist(self):
|
def test_youtube_playlist_noplaylist(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
dl.params['noplaylist'] = True
|
dl.params['noplaylist'] = True
|
||||||
|
dl.params['format'] = 'best'
|
||||||
ie = YoutubePlaylistIE(dl)
|
ie = YoutubePlaylistIE(dl)
|
||||||
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||||
self.assertEqual(result['_type'], 'url')
|
self.assertEqual(result['_type'], 'url')
|
||||||
|
result = dl.extract_info(result['url'], download=False, ie_key=result.get('ie_key'), process=False)
|
||||||
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
|
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
|
||||||
|
|
||||||
def test_youtube_course(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = YoutubePlaylistIE(dl)
|
|
||||||
# TODO find a > 100 (paginating?) videos course
|
|
||||||
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
|
||||||
entries = list(result['entries'])
|
|
||||||
self.assertEqual(YoutubeIE().extract_id(entries[0]['url']), 'j9WZyLZCBzs')
|
|
||||||
self.assertEqual(len(entries), 25)
|
|
||||||
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
|
||||||
|
|
||||||
def test_youtube_mix(self):
|
def test_youtube_mix(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = YoutubePlaylistIE(dl)
|
dl.params['format'] = 'best'
|
||||||
result = ie.extract('https://www.youtube.com/watch?v=W01L70IGBgE&index=2&list=RDOQpdSVF_k_w')
|
ie = YoutubeTabIE(dl)
|
||||||
entries = result['entries']
|
result = dl.extract_info('https://www.youtube.com/watch?v=tyITL_exICo&list=RDCLAK5uy_kLWIr9gv1XLlPbaDS965-Db4TrBoUTxQ8',
|
||||||
self.assertTrue(len(entries) >= 50)
|
download=False, ie_key=ie.ie_key(), process=True)
|
||||||
|
entries = (result or {}).get('entries', [{'id': 'not_found', }])
|
||||||
|
self.assertTrue(len(entries) >= 25)
|
||||||
original_video = entries[0]
|
original_video = entries[0]
|
||||||
self.assertEqual(original_video['id'], 'OQpdSVF_k_w')
|
self.assertEqual(original_video['id'], 'tyITL_exICo')
|
||||||
|
|
||||||
def test_youtube_toptracks(self):
|
def test_youtube_flat_playlist_extraction(self):
|
||||||
print('Skipping: The playlist page gives error 500')
|
|
||||||
return
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = YoutubePlaylistIE(dl)
|
|
||||||
result = ie.extract('https://www.youtube.com/playlist?list=MCUS')
|
|
||||||
entries = result['entries']
|
|
||||||
self.assertEqual(len(entries), 100)
|
|
||||||
|
|
||||||
def test_youtube_flat_playlist_titles(self):
|
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
dl.params['extract_flat'] = True
|
dl.params['extract_flat'] = True
|
||||||
ie = YoutubePlaylistIE(dl)
|
ie = YoutubeTabIE(dl)
|
||||||
result = ie.extract('https://www.youtube.com/playlist?list=PL-KKIb8rvtMSrAO9YFbeM6UQrAqoFTUWv')
|
result = ie.extract('https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
for entry in result['entries']:
|
entries = list(result['entries'])
|
||||||
self.assertTrue(entry.get('title'))
|
self.assertTrue(len(entries) == 1)
|
||||||
|
video = entries[0]
|
||||||
|
self.assertEqual(video['_type'], 'url')
|
||||||
|
self.assertEqual(video['ie_key'], 'Youtube')
|
||||||
|
self.assertEqual(video['id'], 'BaW_jenozKc')
|
||||||
|
self.assertEqual(video['url'], 'BaW_jenozKc')
|
||||||
|
self.assertEqual(video['title'], 'youtube-dl test video "\'/\\ä↭𝕐')
|
||||||
|
self.assertEqual(video['duration'], 10)
|
||||||
|
self.assertEqual(video['uploader'], 'Philipp Hagemeister')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
26
test/test_youtube_misc.py
Normal file
26
test/test_youtube_misc.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
from youtube_dl.extractor import YoutubeIE
|
||||||
|
|
||||||
|
|
||||||
|
class TestYoutubeMisc(unittest.TestCase):
|
||||||
|
def test_youtube_extract(self):
|
||||||
|
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
|
||||||
|
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||||
|
assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||||
|
assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||||
|
assertExtractId('https://www.youtube.com/watch_popup?v=BaW_jenozKc', 'BaW_jenozKc')
|
||||||
|
assertExtractId('http://www.youtube.com/watch?v=BaW_jenozKcsharePLED17F32AD9753930', 'BaW_jenozKc')
|
||||||
|
assertExtractId('BaW_jenozKc', 'BaW_jenozKc')
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
@@ -14,70 +14,93 @@ import string
|
|||||||
|
|
||||||
from test.helper import FakeYDL
|
from test.helper import FakeYDL
|
||||||
from youtube_dl.extractor import YoutubeIE
|
from youtube_dl.extractor import YoutubeIE
|
||||||
|
from youtube_dl.jsinterp import JSInterpreter
|
||||||
from youtube_dl.compat import compat_str, compat_urlretrieve
|
from youtube_dl.compat import compat_str, compat_urlretrieve
|
||||||
|
|
||||||
_TESTS = [
|
_SIG_TESTS = [
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
|
||||||
'js',
|
|
||||||
86,
|
86,
|
||||||
'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
|
'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
|
||||||
'js',
|
|
||||||
85,
|
85,
|
||||||
'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
|
'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js',
|
||||||
'js',
|
|
||||||
90,
|
90,
|
||||||
']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
|
']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl0Cbn9e.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl0Cbn9e.js',
|
||||||
'js',
|
|
||||||
84,
|
84,
|
||||||
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVW@YZ!"#$%&\'()*+,-./:;<=',
|
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVW@YZ!"#$%&\'()*+,-./:;<=',
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js',
|
||||||
'js',
|
|
||||||
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
|
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
|
||||||
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
|
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
|
||||||
'js',
|
|
||||||
84,
|
84,
|
||||||
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>'
|
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>'
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
|
||||||
'js',
|
|
||||||
83,
|
83,
|
||||||
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F'
|
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F'
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
|
||||||
'js',
|
|
||||||
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
|
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
|
||||||
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B'
|
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B'
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js',
|
||||||
'js',
|
|
||||||
'312AA52209E3623129A412D56A40F11CB0AF14AE.3EE09501CB14E3BCDC3B2AE808BF3F1D14E7FBF12',
|
'312AA52209E3623129A412D56A40F11CB0AF14AE.3EE09501CB14E3BCDC3B2AE808BF3F1D14E7FBF12',
|
||||||
'112AA5220913623229A412D56A40F11CB0AF14AE.3EE0950FCB14EEBCDC3B2AE808BF331D14E7FBF3',
|
'112AA5220913623229A412D56A40F11CB0AF14AE.3EE0950FCB14EEBCDC3B2AE808BF331D14E7FBF3',
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
_NSIG_TESTS = [
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/9216d1f7/player_ias.vflset/en_US/base.js',
|
||||||
|
'SLp9F5bwjAdhE9F-', 'gWnb9IK2DJ8Q1w',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/f8cb7a3b/player_ias.vflset/en_US/base.js',
|
||||||
|
'oBo2h5euWy6osrUt', 'ivXHpm7qJjJN',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/2dfe380c/player_ias.vflset/en_US/base.js',
|
||||||
|
'oBo2h5euWy6osrUt', '3DIBbn3qdQ',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/f1ca6900/player_ias.vflset/en_US/base.js',
|
||||||
|
'cu3wyu6LQn2hse', 'jvxetvmlI9AN9Q',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/8040e515/player_ias.vflset/en_US/base.js',
|
||||||
|
'wvOFaY-yjgDuIEg5', 'HkfBFDHmgw4rsw',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/e06dea74/player_ias.vflset/en_US/base.js',
|
||||||
|
'AiuodmaDDYw8d3y4bf', 'ankd8eza2T6Qmw',
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class TestPlayerInfo(unittest.TestCase):
|
class TestPlayerInfo(unittest.TestCase):
|
||||||
def test_youtube_extract_player_info(self):
|
def test_youtube_extract_player_info(self):
|
||||||
PLAYER_URLS = (
|
PLAYER_URLS = (
|
||||||
('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/en_US/base.js', '64dddad9'),
|
('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/en_US/base.js', '64dddad9'),
|
||||||
|
('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/fr_FR/base.js', '64dddad9'),
|
||||||
|
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'),
|
||||||
|
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-de_DE.vflset/base.js', '64dddad9'),
|
||||||
|
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-tablet-en_US.vflset/base.js', '64dddad9'),
|
||||||
# obsolete
|
# obsolete
|
||||||
('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'),
|
('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'),
|
||||||
('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'),
|
('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'),
|
||||||
@@ -86,59 +109,70 @@ class TestPlayerInfo(unittest.TestCase):
|
|||||||
('https://www.youtube.com/yts/jsbin/player-en_US-vflaxXRn1/base.js', 'vflaxXRn1'),
|
('https://www.youtube.com/yts/jsbin/player-en_US-vflaxXRn1/base.js', 'vflaxXRn1'),
|
||||||
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'),
|
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'),
|
||||||
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'),
|
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'),
|
||||||
('http://s.ytimg.com/yt/swfbin/watch_as3-vflrEm9Nq.swf', 'vflrEm9Nq'),
|
|
||||||
('https://s.ytimg.com/yts/swfbin/player-vflenCdZL/watch_as3.swf', 'vflenCdZL'),
|
|
||||||
)
|
)
|
||||||
for player_url, expected_player_id in PLAYER_URLS:
|
for player_url, expected_player_id in PLAYER_URLS:
|
||||||
expected_player_type = player_url.split('.')[-1]
|
player_id = YoutubeIE._extract_player_info(player_url)
|
||||||
player_type, player_id = YoutubeIE._extract_player_info(player_url)
|
|
||||||
self.assertEqual(player_type, expected_player_type)
|
|
||||||
self.assertEqual(player_id, expected_player_id)
|
self.assertEqual(player_id, expected_player_id)
|
||||||
|
|
||||||
|
|
||||||
class TestSignature(unittest.TestCase):
|
class TestSignature(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
|
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata/sigs')
|
||||||
if not os.path.exists(self.TESTDATA_DIR):
|
if not os.path.exists(self.TESTDATA_DIR):
|
||||||
os.mkdir(self.TESTDATA_DIR)
|
os.mkdir(self.TESTDATA_DIR)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
try:
|
||||||
|
for f in os.listdir(self.TESTDATA_DIR):
|
||||||
|
os.remove(f)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
def make_tfunc(url, stype, sig_input, expected_sig):
|
|
||||||
m = re.match(r'.*-([a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.[a-z]+$', url)
|
|
||||||
assert m, '%r should follow URL format' % url
|
|
||||||
test_id = m.group(1)
|
|
||||||
|
|
||||||
def test_func(self):
|
def t_factory(name, sig_func, url_pattern):
|
||||||
basename = 'player-%s.%s' % (test_id, stype)
|
def make_tfunc(url, sig_input, expected_sig):
|
||||||
fn = os.path.join(self.TESTDATA_DIR, basename)
|
m = url_pattern.match(url)
|
||||||
|
assert m, '%r should follow URL format' % url
|
||||||
|
test_id = m.group('id')
|
||||||
|
|
||||||
if not os.path.exists(fn):
|
def test_func(self):
|
||||||
compat_urlretrieve(url, fn)
|
basename = 'player-{0}-{1}.js'.format(name, test_id)
|
||||||
|
fn = os.path.join(self.TESTDATA_DIR, basename)
|
||||||
|
|
||||||
ydl = FakeYDL()
|
if not os.path.exists(fn):
|
||||||
ie = YoutubeIE(ydl)
|
compat_urlretrieve(url, fn)
|
||||||
if stype == 'js':
|
|
||||||
with io.open(fn, encoding='utf-8') as testf:
|
with io.open(fn, encoding='utf-8') as testf:
|
||||||
jscode = testf.read()
|
jscode = testf.read()
|
||||||
func = ie._parse_sig_js(jscode)
|
self.assertEqual(sig_func(jscode, sig_input), expected_sig)
|
||||||
else:
|
|
||||||
assert stype == 'swf'
|
|
||||||
with open(fn, 'rb') as testf:
|
|
||||||
swfcode = testf.read()
|
|
||||||
func = ie._parse_sig_swf(swfcode)
|
|
||||||
src_sig = (
|
|
||||||
compat_str(string.printable[:sig_input])
|
|
||||||
if isinstance(sig_input, int) else sig_input)
|
|
||||||
got_sig = func(src_sig)
|
|
||||||
self.assertEqual(got_sig, expected_sig)
|
|
||||||
|
|
||||||
test_func.__name__ = str('test_signature_' + stype + '_' + test_id)
|
test_func.__name__ = str('test_{0}_js_{1}'.format(name, test_id))
|
||||||
setattr(TestSignature, test_func.__name__, test_func)
|
setattr(TestSignature, test_func.__name__, test_func)
|
||||||
|
return make_tfunc
|
||||||
|
|
||||||
|
|
||||||
for test_spec in _TESTS:
|
def signature(jscode, sig_input):
|
||||||
make_tfunc(*test_spec)
|
func = YoutubeIE(FakeYDL())._parse_sig_js(jscode)
|
||||||
|
src_sig = (
|
||||||
|
compat_str(string.printable[:sig_input])
|
||||||
|
if isinstance(sig_input, int) else sig_input)
|
||||||
|
return func(src_sig)
|
||||||
|
|
||||||
|
|
||||||
|
def n_sig(jscode, sig_input):
|
||||||
|
funcname = YoutubeIE(FakeYDL())._extract_n_function_name(jscode)
|
||||||
|
return JSInterpreter(jscode).call_function(funcname, sig_input)
|
||||||
|
|
||||||
|
|
||||||
|
make_sig_test = t_factory(
|
||||||
|
'signature', signature, re.compile(r'.*-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.[a-z]+$'))
|
||||||
|
for test_spec in _SIG_TESTS:
|
||||||
|
make_sig_test(*test_spec)
|
||||||
|
|
||||||
|
make_nsig_test = t_factory(
|
||||||
|
'nsig', n_sig, re.compile(r'.+/player/(?P<id>[a-zA-Z0-9_-]+)/.+.js$'))
|
||||||
|
for test_spec in _NSIG_TESTS:
|
||||||
|
make_nsig_test(*test_spec)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -73,6 +73,7 @@ from .utils import (
|
|||||||
PostProcessingError,
|
PostProcessingError,
|
||||||
preferredencoding,
|
preferredencoding,
|
||||||
prepend_extension,
|
prepend_extension,
|
||||||
|
process_communicate_or_kill,
|
||||||
register_socks_protocols,
|
register_socks_protocols,
|
||||||
render_table,
|
render_table,
|
||||||
replace_extension,
|
replace_extension,
|
||||||
@@ -163,6 +164,7 @@ class YoutubeDL(object):
|
|||||||
simulate: Do not download the video files.
|
simulate: Do not download the video files.
|
||||||
format: Video format code. See options.py for more information.
|
format: Video format code. See options.py for more information.
|
||||||
outtmpl: Template for output names.
|
outtmpl: Template for output names.
|
||||||
|
outtmpl_na_placeholder: Placeholder for unavailable meta fields.
|
||||||
restrictfilenames: Do not allow "&" and spaces in file names
|
restrictfilenames: Do not allow "&" and spaces in file names
|
||||||
ignoreerrors: Do not stop on download errors.
|
ignoreerrors: Do not stop on download errors.
|
||||||
force_generic_extractor: Force downloader to use the generic extractor
|
force_generic_extractor: Force downloader to use the generic extractor
|
||||||
@@ -338,6 +340,8 @@ class YoutubeDL(object):
|
|||||||
_pps = []
|
_pps = []
|
||||||
_download_retcode = None
|
_download_retcode = None
|
||||||
_num_downloads = None
|
_num_downloads = None
|
||||||
|
_playlist_level = 0
|
||||||
|
_playlist_urls = set()
|
||||||
_screen_file = None
|
_screen_file = None
|
||||||
|
|
||||||
def __init__(self, params=None, auto_init=True):
|
def __init__(self, params=None, auto_init=True):
|
||||||
@@ -656,7 +660,7 @@ class YoutubeDL(object):
|
|||||||
template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
|
template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
|
||||||
for k, v in template_dict.items()
|
for k, v in template_dict.items()
|
||||||
if v is not None and not isinstance(v, (list, tuple, dict)))
|
if v is not None and not isinstance(v, (list, tuple, dict)))
|
||||||
template_dict = collections.defaultdict(lambda: 'NA', template_dict)
|
template_dict = collections.defaultdict(lambda: self.params.get('outtmpl_na_placeholder', 'NA'), template_dict)
|
||||||
|
|
||||||
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
|
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
|
||||||
|
|
||||||
@@ -676,8 +680,8 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
# Missing numeric fields used together with integer presentation types
|
# Missing numeric fields used together with integer presentation types
|
||||||
# in format specification will break the argument substitution since
|
# in format specification will break the argument substitution since
|
||||||
# string 'NA' is returned for missing fields. We will patch output
|
# string NA placeholder is returned for missing fields. We will patch
|
||||||
# template for missing fields to meet string presentation type.
|
# output template for missing fields to meet string presentation type.
|
||||||
for numeric_field in self._NUMERIC_FIELDS:
|
for numeric_field in self._NUMERIC_FIELDS:
|
||||||
if numeric_field not in template_dict:
|
if numeric_field not in template_dict:
|
||||||
# As of [1] format syntax is:
|
# As of [1] format syntax is:
|
||||||
@@ -770,11 +774,20 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
def extract_info(self, url, download=True, ie_key=None, extra_info={},
|
def extract_info(self, url, download=True, ie_key=None, extra_info={},
|
||||||
process=True, force_generic_extractor=False):
|
process=True, force_generic_extractor=False):
|
||||||
'''
|
"""
|
||||||
Returns a list with a dictionary for each video we find.
|
Return a list with a dictionary for each video extracted.
|
||||||
If 'download', also downloads the videos.
|
|
||||||
extra_info is a dict containing the extra values to add to each result
|
Arguments:
|
||||||
'''
|
url -- URL to extract
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
download -- whether to download videos during extraction
|
||||||
|
ie_key -- extractor key hint
|
||||||
|
extra_info -- dictionary containing the extra values to add to each result
|
||||||
|
process -- whether to resolve all unresolved references (URLs, playlist items),
|
||||||
|
must be True for download to work.
|
||||||
|
force_generic_extractor -- force using the generic extractor
|
||||||
|
"""
|
||||||
|
|
||||||
if not ie_key and force_generic_extractor:
|
if not ie_key and force_generic_extractor:
|
||||||
ie_key = 'Generic'
|
ie_key = 'Generic'
|
||||||
@@ -906,115 +919,23 @@ class YoutubeDL(object):
|
|||||||
return self.process_ie_result(
|
return self.process_ie_result(
|
||||||
new_result, download=download, extra_info=extra_info)
|
new_result, download=download, extra_info=extra_info)
|
||||||
elif result_type in ('playlist', 'multi_video'):
|
elif result_type in ('playlist', 'multi_video'):
|
||||||
# We process each entry in the playlist
|
# Protect from infinite recursion due to recursively nested playlists
|
||||||
playlist = ie_result.get('title') or ie_result.get('id')
|
# (see https://github.com/ytdl-org/youtube-dl/issues/27833)
|
||||||
self.to_screen('[download] Downloading playlist: %s' % playlist)
|
webpage_url = ie_result['webpage_url']
|
||||||
|
if webpage_url in self._playlist_urls:
|
||||||
playlist_results = []
|
|
||||||
|
|
||||||
playliststart = self.params.get('playliststart', 1) - 1
|
|
||||||
playlistend = self.params.get('playlistend')
|
|
||||||
# For backwards compatibility, interpret -1 as whole list
|
|
||||||
if playlistend == -1:
|
|
||||||
playlistend = None
|
|
||||||
|
|
||||||
playlistitems_str = self.params.get('playlist_items')
|
|
||||||
playlistitems = None
|
|
||||||
if playlistitems_str is not None:
|
|
||||||
def iter_playlistitems(format):
|
|
||||||
for string_segment in format.split(','):
|
|
||||||
if '-' in string_segment:
|
|
||||||
start, end = string_segment.split('-')
|
|
||||||
for item in range(int(start), int(end) + 1):
|
|
||||||
yield int(item)
|
|
||||||
else:
|
|
||||||
yield int(string_segment)
|
|
||||||
playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
|
|
||||||
|
|
||||||
ie_entries = ie_result['entries']
|
|
||||||
|
|
||||||
def make_playlistitems_entries(list_ie_entries):
|
|
||||||
num_entries = len(list_ie_entries)
|
|
||||||
return [
|
|
||||||
list_ie_entries[i - 1] for i in playlistitems
|
|
||||||
if -num_entries <= i - 1 < num_entries]
|
|
||||||
|
|
||||||
def report_download(num_entries):
|
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
'[%s] playlist %s: Downloading %d videos' %
|
'[download] Skipping already downloaded playlist: %s'
|
||||||
(ie_result['extractor'], playlist, num_entries))
|
% ie_result.get('title') or ie_result.get('id'))
|
||||||
|
return
|
||||||
|
|
||||||
if isinstance(ie_entries, list):
|
self._playlist_level += 1
|
||||||
n_all_entries = len(ie_entries)
|
self._playlist_urls.add(webpage_url)
|
||||||
if playlistitems:
|
try:
|
||||||
entries = make_playlistitems_entries(ie_entries)
|
return self.__process_playlist(ie_result, download)
|
||||||
else:
|
finally:
|
||||||
entries = ie_entries[playliststart:playlistend]
|
self._playlist_level -= 1
|
||||||
n_entries = len(entries)
|
if not self._playlist_level:
|
||||||
self.to_screen(
|
self._playlist_urls.clear()
|
||||||
'[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
|
|
||||||
(ie_result['extractor'], playlist, n_all_entries, n_entries))
|
|
||||||
elif isinstance(ie_entries, PagedList):
|
|
||||||
if playlistitems:
|
|
||||||
entries = []
|
|
||||||
for item in playlistitems:
|
|
||||||
entries.extend(ie_entries.getslice(
|
|
||||||
item - 1, item
|
|
||||||
))
|
|
||||||
else:
|
|
||||||
entries = ie_entries.getslice(
|
|
||||||
playliststart, playlistend)
|
|
||||||
n_entries = len(entries)
|
|
||||||
report_download(n_entries)
|
|
||||||
else: # iterable
|
|
||||||
if playlistitems:
|
|
||||||
entries = make_playlistitems_entries(list(itertools.islice(
|
|
||||||
ie_entries, 0, max(playlistitems))))
|
|
||||||
else:
|
|
||||||
entries = list(itertools.islice(
|
|
||||||
ie_entries, playliststart, playlistend))
|
|
||||||
n_entries = len(entries)
|
|
||||||
report_download(n_entries)
|
|
||||||
|
|
||||||
if self.params.get('playlistreverse', False):
|
|
||||||
entries = entries[::-1]
|
|
||||||
|
|
||||||
if self.params.get('playlistrandom', False):
|
|
||||||
random.shuffle(entries)
|
|
||||||
|
|
||||||
x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
|
|
||||||
|
|
||||||
for i, entry in enumerate(entries, 1):
|
|
||||||
self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
|
|
||||||
# This __x_forwarded_for_ip thing is a bit ugly but requires
|
|
||||||
# minimal changes
|
|
||||||
if x_forwarded_for:
|
|
||||||
entry['__x_forwarded_for_ip'] = x_forwarded_for
|
|
||||||
extra = {
|
|
||||||
'n_entries': n_entries,
|
|
||||||
'playlist': playlist,
|
|
||||||
'playlist_id': ie_result.get('id'),
|
|
||||||
'playlist_title': ie_result.get('title'),
|
|
||||||
'playlist_uploader': ie_result.get('uploader'),
|
|
||||||
'playlist_uploader_id': ie_result.get('uploader_id'),
|
|
||||||
'playlist_index': playlistitems[i - 1] if playlistitems else i + playliststart,
|
|
||||||
'extractor': ie_result['extractor'],
|
|
||||||
'webpage_url': ie_result['webpage_url'],
|
|
||||||
'webpage_url_basename': url_basename(ie_result['webpage_url']),
|
|
||||||
'extractor_key': ie_result['extractor_key'],
|
|
||||||
}
|
|
||||||
|
|
||||||
reason = self._match_entry(entry, incomplete=True)
|
|
||||||
if reason is not None:
|
|
||||||
self.to_screen('[download] ' + reason)
|
|
||||||
continue
|
|
||||||
|
|
||||||
entry_result = self.__process_iterable_entry(entry, download, extra)
|
|
||||||
# TODO: skip failed (empty) entries?
|
|
||||||
playlist_results.append(entry_result)
|
|
||||||
ie_result['entries'] = playlist_results
|
|
||||||
self.to_screen('[download] Finished downloading playlist: %s' % playlist)
|
|
||||||
return ie_result
|
|
||||||
elif result_type == 'compat_list':
|
elif result_type == 'compat_list':
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'Extractor %s returned a compat_list result. '
|
'Extractor %s returned a compat_list result. '
|
||||||
@@ -1039,6 +960,118 @@ class YoutubeDL(object):
|
|||||||
else:
|
else:
|
||||||
raise Exception('Invalid result type: %s' % result_type)
|
raise Exception('Invalid result type: %s' % result_type)
|
||||||
|
|
||||||
|
def __process_playlist(self, ie_result, download):
|
||||||
|
# We process each entry in the playlist
|
||||||
|
playlist = ie_result.get('title') or ie_result.get('id')
|
||||||
|
|
||||||
|
self.to_screen('[download] Downloading playlist: %s' % playlist)
|
||||||
|
|
||||||
|
playlist_results = []
|
||||||
|
|
||||||
|
playliststart = self.params.get('playliststart', 1) - 1
|
||||||
|
playlistend = self.params.get('playlistend')
|
||||||
|
# For backwards compatibility, interpret -1 as whole list
|
||||||
|
if playlistend == -1:
|
||||||
|
playlistend = None
|
||||||
|
|
||||||
|
playlistitems_str = self.params.get('playlist_items')
|
||||||
|
playlistitems = None
|
||||||
|
if playlistitems_str is not None:
|
||||||
|
def iter_playlistitems(format):
|
||||||
|
for string_segment in format.split(','):
|
||||||
|
if '-' in string_segment:
|
||||||
|
start, end = string_segment.split('-')
|
||||||
|
for item in range(int(start), int(end) + 1):
|
||||||
|
yield int(item)
|
||||||
|
else:
|
||||||
|
yield int(string_segment)
|
||||||
|
playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
|
||||||
|
|
||||||
|
ie_entries = ie_result['entries']
|
||||||
|
|
||||||
|
def make_playlistitems_entries(list_ie_entries):
|
||||||
|
num_entries = len(list_ie_entries)
|
||||||
|
return [
|
||||||
|
list_ie_entries[i - 1] for i in playlistitems
|
||||||
|
if -num_entries <= i - 1 < num_entries]
|
||||||
|
|
||||||
|
def report_download(num_entries):
|
||||||
|
self.to_screen(
|
||||||
|
'[%s] playlist %s: Downloading %d videos' %
|
||||||
|
(ie_result['extractor'], playlist, num_entries))
|
||||||
|
|
||||||
|
if isinstance(ie_entries, list):
|
||||||
|
n_all_entries = len(ie_entries)
|
||||||
|
if playlistitems:
|
||||||
|
entries = make_playlistitems_entries(ie_entries)
|
||||||
|
else:
|
||||||
|
entries = ie_entries[playliststart:playlistend]
|
||||||
|
n_entries = len(entries)
|
||||||
|
self.to_screen(
|
||||||
|
'[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
|
||||||
|
(ie_result['extractor'], playlist, n_all_entries, n_entries))
|
||||||
|
elif isinstance(ie_entries, PagedList):
|
||||||
|
if playlistitems:
|
||||||
|
entries = []
|
||||||
|
for item in playlistitems:
|
||||||
|
entries.extend(ie_entries.getslice(
|
||||||
|
item - 1, item
|
||||||
|
))
|
||||||
|
else:
|
||||||
|
entries = ie_entries.getslice(
|
||||||
|
playliststart, playlistend)
|
||||||
|
n_entries = len(entries)
|
||||||
|
report_download(n_entries)
|
||||||
|
else: # iterable
|
||||||
|
if playlistitems:
|
||||||
|
entries = make_playlistitems_entries(list(itertools.islice(
|
||||||
|
ie_entries, 0, max(playlistitems))))
|
||||||
|
else:
|
||||||
|
entries = list(itertools.islice(
|
||||||
|
ie_entries, playliststart, playlistend))
|
||||||
|
n_entries = len(entries)
|
||||||
|
report_download(n_entries)
|
||||||
|
|
||||||
|
if self.params.get('playlistreverse', False):
|
||||||
|
entries = entries[::-1]
|
||||||
|
|
||||||
|
if self.params.get('playlistrandom', False):
|
||||||
|
random.shuffle(entries)
|
||||||
|
|
||||||
|
x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
|
||||||
|
|
||||||
|
for i, entry in enumerate(entries, 1):
|
||||||
|
self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
|
||||||
|
# This __x_forwarded_for_ip thing is a bit ugly but requires
|
||||||
|
# minimal changes
|
||||||
|
if x_forwarded_for:
|
||||||
|
entry['__x_forwarded_for_ip'] = x_forwarded_for
|
||||||
|
extra = {
|
||||||
|
'n_entries': n_entries,
|
||||||
|
'playlist': playlist,
|
||||||
|
'playlist_id': ie_result.get('id'),
|
||||||
|
'playlist_title': ie_result.get('title'),
|
||||||
|
'playlist_uploader': ie_result.get('uploader'),
|
||||||
|
'playlist_uploader_id': ie_result.get('uploader_id'),
|
||||||
|
'playlist_index': playlistitems[i - 1] if playlistitems else i + playliststart,
|
||||||
|
'extractor': ie_result['extractor'],
|
||||||
|
'webpage_url': ie_result['webpage_url'],
|
||||||
|
'webpage_url_basename': url_basename(ie_result['webpage_url']),
|
||||||
|
'extractor_key': ie_result['extractor_key'],
|
||||||
|
}
|
||||||
|
|
||||||
|
reason = self._match_entry(entry, incomplete=True)
|
||||||
|
if reason is not None:
|
||||||
|
self.to_screen('[download] ' + reason)
|
||||||
|
continue
|
||||||
|
|
||||||
|
entry_result = self.__process_iterable_entry(entry, download, extra)
|
||||||
|
# TODO: skip failed (empty) entries?
|
||||||
|
playlist_results.append(entry_result)
|
||||||
|
ie_result['entries'] = playlist_results
|
||||||
|
self.to_screen('[download] Finished downloading playlist: %s' % playlist)
|
||||||
|
return ie_result
|
||||||
|
|
||||||
@__handle_extraction_exceptions
|
@__handle_extraction_exceptions
|
||||||
def __process_iterable_entry(self, entry, download, extra_info):
|
def __process_iterable_entry(self, entry, download, extra_info):
|
||||||
return self.process_ie_result(
|
return self.process_ie_result(
|
||||||
@@ -1083,7 +1116,7 @@ class YoutubeDL(object):
|
|||||||
'*=': lambda attr, value: value in attr,
|
'*=': lambda attr, value: value in attr,
|
||||||
}
|
}
|
||||||
str_operator_rex = re.compile(r'''(?x)
|
str_operator_rex = re.compile(r'''(?x)
|
||||||
\s*(?P<key>ext|acodec|vcodec|container|protocol|format_id)
|
\s*(?P<key>ext|acodec|vcodec|container|protocol|format_id|language)
|
||||||
\s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
|
\s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
|
||||||
\s*(?P<value>[a-zA-Z0-9._-]+)
|
\s*(?P<value>[a-zA-Z0-9._-]+)
|
||||||
\s*$
|
\s*$
|
||||||
@@ -1226,6 +1259,8 @@ class YoutubeDL(object):
|
|||||||
group = _parse_format_selection(tokens, inside_group=True)
|
group = _parse_format_selection(tokens, inside_group=True)
|
||||||
current_selector = FormatSelector(GROUP, group, [])
|
current_selector = FormatSelector(GROUP, group, [])
|
||||||
elif string == '+':
|
elif string == '+':
|
||||||
|
if inside_merge:
|
||||||
|
raise syntax_error('Unexpected "+"', start)
|
||||||
video_selector = current_selector
|
video_selector = current_selector
|
||||||
audio_selector = _parse_format_selection(tokens, inside_merge=True)
|
audio_selector = _parse_format_selection(tokens, inside_merge=True)
|
||||||
if not video_selector or not audio_selector:
|
if not video_selector or not audio_selector:
|
||||||
@@ -1486,14 +1521,18 @@ class YoutubeDL(object):
|
|||||||
if 'display_id' not in info_dict and 'id' in info_dict:
|
if 'display_id' not in info_dict and 'id' in info_dict:
|
||||||
info_dict['display_id'] = info_dict['id']
|
info_dict['display_id'] = info_dict['id']
|
||||||
|
|
||||||
if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
|
for ts_key, date_key in (
|
||||||
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
|
('timestamp', 'upload_date'),
|
||||||
# see http://bugs.python.org/issue1646728)
|
('release_timestamp', 'release_date'),
|
||||||
try:
|
):
|
||||||
upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp'])
|
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
|
||||||
info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
|
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
|
||||||
except (ValueError, OverflowError, OSError):
|
# see http://bugs.python.org/issue1646728)
|
||||||
pass
|
try:
|
||||||
|
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
|
||||||
|
info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d'))
|
||||||
|
except (ValueError, OverflowError, OSError):
|
||||||
|
pass
|
||||||
|
|
||||||
# Auto generate title fields corresponding to the *_number fields when missing
|
# Auto generate title fields corresponding to the *_number fields when missing
|
||||||
# in order to always have clean titles. This is very common for TV series.
|
# in order to always have clean titles. This is very common for TV series.
|
||||||
@@ -1777,6 +1816,8 @@ class YoutubeDL(object):
|
|||||||
os.makedirs(dn)
|
os.makedirs(dn)
|
||||||
return True
|
return True
|
||||||
except (OSError, IOError) as err:
|
except (OSError, IOError) as err:
|
||||||
|
if isinstance(err, OSError) and err.errno == errno.EEXIST:
|
||||||
|
return True
|
||||||
self.report_error('unable to create directory ' + error_to_compat_str(err))
|
self.report_error('unable to create directory ' + error_to_compat_str(err))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -1866,8 +1907,17 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
if not self.params.get('skip_download', False):
|
if not self.params.get('skip_download', False):
|
||||||
try:
|
try:
|
||||||
|
def checked_get_suitable_downloader(info_dict, params):
|
||||||
|
ed_args = params.get('external_downloader_args')
|
||||||
|
dler = get_suitable_downloader(info_dict, params)
|
||||||
|
if ed_args and not params.get('external_downloader_args'):
|
||||||
|
# external_downloader_args was cleared because external_downloader was rejected
|
||||||
|
self.report_warning('Requested external downloader cannot be used: '
|
||||||
|
'ignoring --external-downloader-args.')
|
||||||
|
return dler
|
||||||
|
|
||||||
def dl(name, info):
|
def dl(name, info):
|
||||||
fd = get_suitable_downloader(info, self.params)(self, self.params)
|
fd = checked_get_suitable_downloader(info, self.params)(self, self.params)
|
||||||
for ph in self._progress_hooks:
|
for ph in self._progress_hooks:
|
||||||
fd.add_progress_hook(ph)
|
fd.add_progress_hook(ph)
|
||||||
if self.params.get('verbose'):
|
if self.params.get('verbose'):
|
||||||
@@ -2274,7 +2324,7 @@ class YoutubeDL(object):
|
|||||||
['git', 'rev-parse', '--short', 'HEAD'],
|
['git', 'rev-parse', '--short', 'HEAD'],
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||||
cwd=os.path.dirname(os.path.abspath(__file__)))
|
cwd=os.path.dirname(os.path.abspath(__file__)))
|
||||||
out, err = sp.communicate()
|
out, err = process_communicate_or_kill(sp)
|
||||||
out = out.decode().strip()
|
out = out.decode().strip()
|
||||||
if re.match('[0-9a-f]+', out):
|
if re.match('[0-9a-f]+', out):
|
||||||
self._write_string('[debug] Git HEAD: ' + out + '\n')
|
self._write_string('[debug] Git HEAD: ' + out + '\n')
|
||||||
|
|||||||
@@ -340,6 +340,7 @@ def _real_main(argv=None):
|
|||||||
'format': opts.format,
|
'format': opts.format,
|
||||||
'listformats': opts.listformats,
|
'listformats': opts.listformats,
|
||||||
'outtmpl': outtmpl,
|
'outtmpl': outtmpl,
|
||||||
|
'outtmpl_na_placeholder': opts.outtmpl_na_placeholder,
|
||||||
'autonumber_size': opts.autonumber_size,
|
'autonumber_size': opts.autonumber_size,
|
||||||
'autonumber_start': opts.autonumber_start,
|
'autonumber_start': opts.autonumber_start,
|
||||||
'restrictfilenames': opts.restrictfilenames,
|
'restrictfilenames': opts.restrictfilenames,
|
||||||
|
|||||||
@@ -303,7 +303,7 @@ def xor(data1, data2):
|
|||||||
|
|
||||||
|
|
||||||
def rijndael_mul(a, b):
|
def rijndael_mul(a, b):
|
||||||
if(a == 0 or b == 0):
|
if (a == 0 or b == 0):
|
||||||
return 0
|
return 0
|
||||||
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
|
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
|
||||||
|
|
||||||
|
|||||||
@@ -21,6 +21,10 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
|
try:
|
||||||
|
import collections.abc as compat_collections_abc
|
||||||
|
except ImportError:
|
||||||
|
import collections as compat_collections_abc
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import urllib.request as compat_urllib_request
|
import urllib.request as compat_urllib_request
|
||||||
@@ -73,6 +77,15 @@ try:
|
|||||||
except ImportError: # Python 2
|
except ImportError: # Python 2
|
||||||
import Cookie as compat_cookies
|
import Cookie as compat_cookies
|
||||||
|
|
||||||
|
if sys.version_info[0] == 2:
|
||||||
|
class compat_cookies_SimpleCookie(compat_cookies.SimpleCookie):
|
||||||
|
def load(self, rawdata):
|
||||||
|
if isinstance(rawdata, compat_str):
|
||||||
|
rawdata = str(rawdata)
|
||||||
|
return super(compat_cookies_SimpleCookie, self).load(rawdata)
|
||||||
|
else:
|
||||||
|
compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import html.entities as compat_html_entities
|
import html.entities as compat_html_entities
|
||||||
except ImportError: # Python 2
|
except ImportError: # Python 2
|
||||||
@@ -2877,6 +2890,7 @@ else:
|
|||||||
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
|
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
|
||||||
|
|
||||||
def compat_get_terminal_size(fallback=(80, 24)):
|
def compat_get_terminal_size(fallback=(80, 24)):
|
||||||
|
from .utils import process_communicate_or_kill
|
||||||
columns = compat_getenv('COLUMNS')
|
columns = compat_getenv('COLUMNS')
|
||||||
if columns:
|
if columns:
|
||||||
columns = int(columns)
|
columns = int(columns)
|
||||||
@@ -2893,7 +2907,7 @@ else:
|
|||||||
sp = subprocess.Popen(
|
sp = subprocess.Popen(
|
||||||
['stty', 'size'],
|
['stty', 'size'],
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
out, err = sp.communicate()
|
out, err = process_communicate_or_kill(sp)
|
||||||
_lines, _columns = map(int, out.split())
|
_lines, _columns = map(int, out.split())
|
||||||
except Exception:
|
except Exception:
|
||||||
_columns, _lines = _terminal_size(*fallback)
|
_columns, _lines = _terminal_size(*fallback)
|
||||||
@@ -2953,6 +2967,25 @@ else:
|
|||||||
compat_Struct = struct.Struct
|
compat_Struct = struct.Struct
|
||||||
|
|
||||||
|
|
||||||
|
# compat_map/filter() returning an iterator, supposedly the
|
||||||
|
# same versioning as for zip below
|
||||||
|
try:
|
||||||
|
from future_builtins import map as compat_map
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
from itertools import imap as compat_map
|
||||||
|
except ImportError:
|
||||||
|
compat_map = map
|
||||||
|
|
||||||
|
try:
|
||||||
|
from future_builtins import filter as compat_filter
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
from itertools import ifilter as compat_filter
|
||||||
|
except ImportError:
|
||||||
|
compat_filter = filter
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from future_builtins import zip as compat_zip
|
from future_builtins import zip as compat_zip
|
||||||
except ImportError: # not 2.6+ or is 3.x
|
except ImportError: # not 2.6+ or is 3.x
|
||||||
@@ -2997,14 +3030,17 @@ __all__ = [
|
|||||||
'compat_b64decode',
|
'compat_b64decode',
|
||||||
'compat_basestring',
|
'compat_basestring',
|
||||||
'compat_chr',
|
'compat_chr',
|
||||||
|
'compat_collections_abc',
|
||||||
'compat_cookiejar',
|
'compat_cookiejar',
|
||||||
'compat_cookiejar_Cookie',
|
'compat_cookiejar_Cookie',
|
||||||
'compat_cookies',
|
'compat_cookies',
|
||||||
|
'compat_cookies_SimpleCookie',
|
||||||
'compat_ctypes_WINFUNCTYPE',
|
'compat_ctypes_WINFUNCTYPE',
|
||||||
'compat_etree_Element',
|
'compat_etree_Element',
|
||||||
'compat_etree_fromstring',
|
'compat_etree_fromstring',
|
||||||
'compat_etree_register_namespace',
|
'compat_etree_register_namespace',
|
||||||
'compat_expanduser',
|
'compat_expanduser',
|
||||||
|
'compat_filter',
|
||||||
'compat_get_terminal_size',
|
'compat_get_terminal_size',
|
||||||
'compat_getenv',
|
'compat_getenv',
|
||||||
'compat_getpass',
|
'compat_getpass',
|
||||||
@@ -3016,6 +3052,7 @@ __all__ = [
|
|||||||
'compat_integer_types',
|
'compat_integer_types',
|
||||||
'compat_itertools_count',
|
'compat_itertools_count',
|
||||||
'compat_kwargs',
|
'compat_kwargs',
|
||||||
|
'compat_map',
|
||||||
'compat_numeric_types',
|
'compat_numeric_types',
|
||||||
'compat_ord',
|
'compat_ord',
|
||||||
'compat_os_name',
|
'compat_os_name',
|
||||||
|
|||||||
@@ -1,22 +1,31 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from ..utils import (
|
||||||
|
determine_protocol,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_suitable_downloader(info_dict, params={}):
|
||||||
|
info_dict['protocol'] = determine_protocol(info_dict)
|
||||||
|
info_copy = info_dict.copy()
|
||||||
|
return _get_suitable_downloader(info_copy, params)
|
||||||
|
|
||||||
|
|
||||||
|
# Some of these require get_suitable_downloader
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
|
from .dash import DashSegmentsFD
|
||||||
from .f4m import F4mFD
|
from .f4m import F4mFD
|
||||||
from .hls import HlsFD
|
from .hls import HlsFD
|
||||||
from .http import HttpFD
|
from .http import HttpFD
|
||||||
from .rtmp import RtmpFD
|
from .rtmp import RtmpFD
|
||||||
from .dash import DashSegmentsFD
|
|
||||||
from .rtsp import RtspFD
|
from .rtsp import RtspFD
|
||||||
from .ism import IsmFD
|
from .ism import IsmFD
|
||||||
|
from .niconico import NiconicoDmcFD
|
||||||
from .external import (
|
from .external import (
|
||||||
get_external_downloader,
|
get_external_downloader,
|
||||||
FFmpegFD,
|
FFmpegFD,
|
||||||
)
|
)
|
||||||
|
|
||||||
from ..utils import (
|
|
||||||
determine_protocol,
|
|
||||||
)
|
|
||||||
|
|
||||||
PROTOCOL_MAP = {
|
PROTOCOL_MAP = {
|
||||||
'rtmp': RtmpFD,
|
'rtmp': RtmpFD,
|
||||||
'm3u8_native': HlsFD,
|
'm3u8_native': HlsFD,
|
||||||
@@ -26,13 +35,12 @@ PROTOCOL_MAP = {
|
|||||||
'f4m': F4mFD,
|
'f4m': F4mFD,
|
||||||
'http_dash_segments': DashSegmentsFD,
|
'http_dash_segments': DashSegmentsFD,
|
||||||
'ism': IsmFD,
|
'ism': IsmFD,
|
||||||
|
'niconico_dmc': NiconicoDmcFD,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_suitable_downloader(info_dict, params={}):
|
def _get_suitable_downloader(info_dict, params={}):
|
||||||
"""Get the downloader class that can handle the info dict."""
|
"""Get the downloader class that can handle the info dict."""
|
||||||
protocol = determine_protocol(info_dict)
|
|
||||||
info_dict['protocol'] = protocol
|
|
||||||
|
|
||||||
# if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict):
|
# if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict):
|
||||||
# return FFmpegFD
|
# return FFmpegFD
|
||||||
@@ -42,7 +50,11 @@ def get_suitable_downloader(info_dict, params={}):
|
|||||||
ed = get_external_downloader(external_downloader)
|
ed = get_external_downloader(external_downloader)
|
||||||
if ed.can_download(info_dict):
|
if ed.can_download(info_dict):
|
||||||
return ed
|
return ed
|
||||||
|
# Avoid using unwanted args since external_downloader was rejected
|
||||||
|
if params.get('external_downloader_args'):
|
||||||
|
params['external_downloader_args'] = None
|
||||||
|
|
||||||
|
protocol = info_dict['protocol']
|
||||||
if protocol.startswith('m3u8') and info_dict.get('is_live'):
|
if protocol.startswith('m3u8') and info_dict.get('is_live'):
|
||||||
return FFmpegFD
|
return FFmpegFD
|
||||||
|
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ from ..utils import (
|
|||||||
handle_youtubedl_headers,
|
handle_youtubedl_headers,
|
||||||
check_executable,
|
check_executable,
|
||||||
is_outdated_version,
|
is_outdated_version,
|
||||||
|
process_communicate_or_kill,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -104,7 +105,7 @@ class ExternalFD(FileDownloader):
|
|||||||
|
|
||||||
p = subprocess.Popen(
|
p = subprocess.Popen(
|
||||||
cmd, stderr=subprocess.PIPE)
|
cmd, stderr=subprocess.PIPE)
|
||||||
_, stderr = p.communicate()
|
_, stderr = process_communicate_or_kill(p)
|
||||||
if p.returncode != 0:
|
if p.returncode != 0:
|
||||||
self.to_stderr(stderr.decode('utf-8', 'replace'))
|
self.to_stderr(stderr.decode('utf-8', 'replace'))
|
||||||
return p.returncode
|
return p.returncode
|
||||||
@@ -141,7 +142,7 @@ class CurlFD(ExternalFD):
|
|||||||
|
|
||||||
# curl writes the progress to stderr so don't capture it.
|
# curl writes the progress to stderr so don't capture it.
|
||||||
p = subprocess.Popen(cmd)
|
p = subprocess.Popen(cmd)
|
||||||
p.communicate()
|
process_communicate_or_kill(p)
|
||||||
return p.returncode
|
return p.returncode
|
||||||
|
|
||||||
|
|
||||||
@@ -336,14 +337,17 @@ class FFmpegFD(ExternalFD):
|
|||||||
proc = subprocess.Popen(args, stdin=subprocess.PIPE, env=env)
|
proc = subprocess.Popen(args, stdin=subprocess.PIPE, env=env)
|
||||||
try:
|
try:
|
||||||
retval = proc.wait()
|
retval = proc.wait()
|
||||||
except KeyboardInterrupt:
|
except BaseException as e:
|
||||||
# subprocces.run would send the SIGKILL signal to ffmpeg and the
|
# subprocess.run would send the SIGKILL signal to ffmpeg and the
|
||||||
# mp4 file couldn't be played, but if we ask ffmpeg to quit it
|
# mp4 file couldn't be played, but if we ask ffmpeg to quit it
|
||||||
# produces a file that is playable (this is mostly useful for live
|
# produces a file that is playable (this is mostly useful for live
|
||||||
# streams). Note that Windows is not affected and produces playable
|
# streams). Note that Windows is not affected and produces playable
|
||||||
# files (see https://github.com/ytdl-org/youtube-dl/issues/8300).
|
# files (see https://github.com/ytdl-org/youtube-dl/issues/8300).
|
||||||
if sys.platform != 'win32':
|
if isinstance(e, KeyboardInterrupt) and sys.platform != 'win32':
|
||||||
proc.communicate(b'q')
|
process_communicate_or_kill(proc, b'q')
|
||||||
|
else:
|
||||||
|
proc.kill()
|
||||||
|
proc.wait()
|
||||||
raise
|
raise
|
||||||
return retval
|
return retval
|
||||||
|
|
||||||
|
|||||||
@@ -42,11 +42,13 @@ class HlsFD(FragmentFD):
|
|||||||
# no segments will definitely be appended to the end of the playlist.
|
# no segments will definitely be appended to the end of the playlist.
|
||||||
# r'#EXT-X-PLAYLIST-TYPE:EVENT', # media segments may be appended to the end of
|
# r'#EXT-X-PLAYLIST-TYPE:EVENT', # media segments may be appended to the end of
|
||||||
# # event media playlists [4]
|
# # event media playlists [4]
|
||||||
|
r'#EXT-X-MAP:', # media initialization [5]
|
||||||
|
|
||||||
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.4
|
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.4
|
||||||
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.2
|
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.2
|
||||||
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.2
|
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.2
|
||||||
# 4. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.5
|
# 4. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.5
|
||||||
|
# 5. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.5
|
||||||
)
|
)
|
||||||
check_results = [not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES]
|
check_results = [not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES]
|
||||||
is_aes128_enc = '#EXT-X-KEY:METHOD=AES-128' in manifest
|
is_aes128_enc = '#EXT-X-KEY:METHOD=AES-128' in manifest
|
||||||
@@ -170,8 +172,12 @@ class HlsFD(FragmentFD):
|
|||||||
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', media_sequence)
|
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', media_sequence)
|
||||||
decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(
|
decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(
|
||||||
self._prepare_url(info_dict, info_dict.get('_decryption_key_url') or decrypt_info['URI'])).read()
|
self._prepare_url(info_dict, info_dict.get('_decryption_key_url') or decrypt_info['URI'])).read()
|
||||||
frag_content = AES.new(
|
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
|
||||||
decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)
|
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
|
||||||
|
# not what it decrypts to.
|
||||||
|
if not test:
|
||||||
|
frag_content = AES.new(
|
||||||
|
decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)
|
||||||
self._append_fragment(ctx, frag_content)
|
self._append_fragment(ctx, frag_content)
|
||||||
# We only download the first fragment during the test
|
# We only download the first fragment during the test
|
||||||
if test:
|
if test:
|
||||||
|
|||||||
66
youtube_dl/downloader/niconico.py
Normal file
66
youtube_dl/downloader/niconico.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
try:
|
||||||
|
import threading
|
||||||
|
except ImportError:
|
||||||
|
threading = None
|
||||||
|
|
||||||
|
from .common import FileDownloader
|
||||||
|
from ..downloader import get_suitable_downloader
|
||||||
|
from ..extractor.niconico import NiconicoIE
|
||||||
|
from ..utils import sanitized_Request
|
||||||
|
|
||||||
|
|
||||||
|
class NiconicoDmcFD(FileDownloader):
|
||||||
|
""" Downloading niconico douga from DMC with heartbeat """
|
||||||
|
|
||||||
|
FD_NAME = 'niconico_dmc'
|
||||||
|
|
||||||
|
def real_download(self, filename, info_dict):
|
||||||
|
self.to_screen('[%s] Downloading from DMC' % self.FD_NAME)
|
||||||
|
|
||||||
|
ie = NiconicoIE(self.ydl)
|
||||||
|
info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)
|
||||||
|
|
||||||
|
fd = get_suitable_downloader(info_dict, params=self.params)(self.ydl, self.params)
|
||||||
|
for ph in self._progress_hooks:
|
||||||
|
fd.add_progress_hook(ph)
|
||||||
|
|
||||||
|
if not threading:
|
||||||
|
self.to_screen('[%s] Threading for Heartbeat not available' % self.FD_NAME)
|
||||||
|
return fd.real_download(filename, info_dict)
|
||||||
|
|
||||||
|
success = download_complete = False
|
||||||
|
timer = [None]
|
||||||
|
heartbeat_lock = threading.Lock()
|
||||||
|
heartbeat_url = heartbeat_info_dict['url']
|
||||||
|
heartbeat_data = heartbeat_info_dict['data'].encode()
|
||||||
|
heartbeat_interval = heartbeat_info_dict.get('interval', 30)
|
||||||
|
|
||||||
|
request = sanitized_Request(heartbeat_url, heartbeat_data)
|
||||||
|
|
||||||
|
def heartbeat():
|
||||||
|
try:
|
||||||
|
self.ydl.urlopen(request).read()
|
||||||
|
except Exception:
|
||||||
|
self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)
|
||||||
|
|
||||||
|
with heartbeat_lock:
|
||||||
|
if not download_complete:
|
||||||
|
timer[0] = threading.Timer(heartbeat_interval, heartbeat)
|
||||||
|
timer[0].start()
|
||||||
|
|
||||||
|
heartbeat_info_dict['ping']()
|
||||||
|
self.to_screen('[%s] Heartbeat with %d second interval ...' % (self.FD_NAME, heartbeat_interval))
|
||||||
|
try:
|
||||||
|
heartbeat()
|
||||||
|
if type(fd).__name__ == 'HlsFD':
|
||||||
|
info_dict.update(ie._extract_m3u8_formats(info_dict['url'], info_dict['id'])[0])
|
||||||
|
success = fd.real_download(filename, info_dict)
|
||||||
|
finally:
|
||||||
|
if heartbeat_lock:
|
||||||
|
with heartbeat_lock:
|
||||||
|
timer[0].cancel()
|
||||||
|
download_complete = True
|
||||||
|
return success
|
||||||
@@ -89,11 +89,13 @@ class RtmpFD(FileDownloader):
|
|||||||
self.to_screen('')
|
self.to_screen('')
|
||||||
cursor_in_new_line = True
|
cursor_in_new_line = True
|
||||||
self.to_screen('[rtmpdump] ' + line)
|
self.to_screen('[rtmpdump] ' + line)
|
||||||
finally:
|
if not cursor_in_new_line:
|
||||||
|
self.to_screen('')
|
||||||
|
return proc.wait()
|
||||||
|
except BaseException: # Including KeyboardInterrupt
|
||||||
|
proc.kill()
|
||||||
proc.wait()
|
proc.wait()
|
||||||
if not cursor_in_new_line:
|
raise
|
||||||
self.to_screen('')
|
|
||||||
return proc.returncode
|
|
||||||
|
|
||||||
url = info_dict['url']
|
url = info_dict['url']
|
||||||
player_url = info_dict.get('player_url')
|
player_url = info_dict.get('player_url')
|
||||||
|
|||||||
@@ -1,14 +1,15 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import calendar
|
|
||||||
import re
|
import re
|
||||||
import time
|
|
||||||
|
|
||||||
from .amp import AMPIE
|
from .amp import AMPIE
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .youtube import YoutubeIE
|
from ..utils import (
|
||||||
from ..compat import compat_urlparse
|
parse_duration,
|
||||||
|
parse_iso8601,
|
||||||
|
try_get,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class AbcNewsVideoIE(AMPIE):
|
class AbcNewsVideoIE(AMPIE):
|
||||||
@@ -18,8 +19,8 @@ class AbcNewsVideoIE(AMPIE):
|
|||||||
(?:
|
(?:
|
||||||
abcnews\.go\.com/
|
abcnews\.go\.com/
|
||||||
(?:
|
(?:
|
||||||
[^/]+/video/(?P<display_id>[0-9a-z-]+)-|
|
(?:[^/]+/)*video/(?P<display_id>[0-9a-z-]+)-|
|
||||||
video/embed\?.*?\bid=
|
video/(?:embed|itemfeed)\?.*?\bid=
|
||||||
)|
|
)|
|
||||||
fivethirtyeight\.abcnews\.go\.com/video/embed/\d+/
|
fivethirtyeight\.abcnews\.go\.com/video/embed/\d+/
|
||||||
)
|
)
|
||||||
@@ -36,6 +37,8 @@ class AbcNewsVideoIE(AMPIE):
|
|||||||
'description': 'George Stephanopoulos goes one-on-one with Iranian Foreign Minister Dr. Javad Zarif.',
|
'description': 'George Stephanopoulos goes one-on-one with Iranian Foreign Minister Dr. Javad Zarif.',
|
||||||
'duration': 180,
|
'duration': 180,
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
'timestamp': 1380454200,
|
||||||
|
'upload_date': '20130929',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
# m3u8 download
|
# m3u8 download
|
||||||
@@ -47,6 +50,12 @@ class AbcNewsVideoIE(AMPIE):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'http://abcnews.go.com/2020/video/2020-husband-stands-teacher-jail-student-affairs-26119478',
|
'url': 'http://abcnews.go.com/2020/video/2020-husband-stands-teacher-jail-student-affairs-26119478',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://abcnews.go.com/video/itemfeed?id=46979033',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://abcnews.go.com/GMA/News/video/history-christmas-story-67894761',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -67,28 +76,23 @@ class AbcNewsIE(InfoExtractor):
|
|||||||
_VALID_URL = r'https?://abcnews\.go\.com/(?:[^/]+/)+(?P<display_id>[0-9a-z-]+)/story\?id=(?P<id>\d+)'
|
_VALID_URL = r'https?://abcnews\.go\.com/(?:[^/]+/)+(?P<display_id>[0-9a-z-]+)/story\?id=(?P<id>\d+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://abcnews.go.com/Blotter/News/dramatic-video-rare-death-job-america/story?id=10498713#.UIhwosWHLjY',
|
# Youtube Embeds
|
||||||
|
'url': 'https://abcnews.go.com/Entertainment/peter-billingsley-child-actor-christmas-story-hollywood-power/story?id=51286501',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '10505354',
|
'id': '51286501',
|
||||||
'ext': 'flv',
|
'title': "Peter Billingsley: From child actor in 'A Christmas Story' to Hollywood power player",
|
||||||
'display_id': 'dramatic-video-rare-death-job-america',
|
'description': 'Billingsley went from a child actor to Hollywood power player.',
|
||||||
'title': 'Occupational Hazards',
|
|
||||||
'description': 'Nightline investigates the dangers that lurk at various jobs.',
|
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
|
||||||
'upload_date': '20100428',
|
|
||||||
'timestamp': 1272412800,
|
|
||||||
},
|
},
|
||||||
'add_ie': ['AbcNewsVideo'],
|
'playlist_count': 5,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://abcnews.go.com/Entertainment/justin-timberlake-performs-stop-feeling-eurovision-2016/story?id=39125818',
|
'url': 'http://abcnews.go.com/Entertainment/justin-timberlake-performs-stop-feeling-eurovision-2016/story?id=39125818',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '38897857',
|
'id': '38897857',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'display_id': 'justin-timberlake-performs-stop-feeling-eurovision-2016',
|
|
||||||
'title': 'Justin Timberlake Drops Hints For Secret Single',
|
'title': 'Justin Timberlake Drops Hints For Secret Single',
|
||||||
'description': 'Lara Spencer reports the buzziest stories of the day in "GMA" Pop News.',
|
'description': 'Lara Spencer reports the buzziest stories of the day in "GMA" Pop News.',
|
||||||
'upload_date': '20160515',
|
'upload_date': '20160505',
|
||||||
'timestamp': 1463329500,
|
'timestamp': 1462442280,
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
# m3u8 download
|
# m3u8 download
|
||||||
@@ -100,49 +104,55 @@ class AbcNewsIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'http://abcnews.go.com/Technology/exclusive-apple-ceo-tim-cook-iphone-cracking-software/story?id=37173343',
|
'url': 'http://abcnews.go.com/Technology/exclusive-apple-ceo-tim-cook-iphone-cracking-software/story?id=37173343',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# inline.type == 'video'
|
||||||
|
'url': 'http://abcnews.go.com/Technology/exclusive-apple-ceo-tim-cook-iphone-cracking-software/story?id=37173343',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
story_id = self._match_id(url)
|
||||||
display_id = mobj.group('display_id')
|
webpage = self._download_webpage(url, story_id)
|
||||||
video_id = mobj.group('id')
|
story = self._parse_json(self._search_regex(
|
||||||
|
r"window\['__abcnews__'\]\s*=\s*({.+?});",
|
||||||
|
webpage, 'data'), story_id)['page']['content']['story']['everscroll'][0]
|
||||||
|
article_contents = story.get('articleContents') or {}
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
def entries():
|
||||||
video_url = self._search_regex(
|
featured_video = story.get('featuredVideo') or {}
|
||||||
r'window\.abcnvideo\.url\s*=\s*"([^"]+)"', webpage, 'video URL')
|
feed = try_get(featured_video, lambda x: x['video']['feed'])
|
||||||
full_video_url = compat_urlparse.urljoin(url, video_url)
|
if feed:
|
||||||
|
yield {
|
||||||
|
'_type': 'url',
|
||||||
|
'id': featured_video.get('id'),
|
||||||
|
'title': featured_video.get('name'),
|
||||||
|
'url': feed,
|
||||||
|
'thumbnail': featured_video.get('images'),
|
||||||
|
'description': featured_video.get('description'),
|
||||||
|
'timestamp': parse_iso8601(featured_video.get('uploadDate')),
|
||||||
|
'duration': parse_duration(featured_video.get('duration')),
|
||||||
|
'ie_key': AbcNewsVideoIE.ie_key(),
|
||||||
|
}
|
||||||
|
|
||||||
youtube_url = YoutubeIE._extract_url(webpage)
|
for inline in (article_contents.get('inlines') or []):
|
||||||
|
inline_type = inline.get('type')
|
||||||
|
if inline_type == 'iframe':
|
||||||
|
iframe_url = try_get(inline, lambda x: x['attrs']['src'])
|
||||||
|
if iframe_url:
|
||||||
|
yield self.url_result(iframe_url)
|
||||||
|
elif inline_type == 'video':
|
||||||
|
video_id = inline.get('id')
|
||||||
|
if video_id:
|
||||||
|
yield {
|
||||||
|
'_type': 'url',
|
||||||
|
'id': video_id,
|
||||||
|
'url': 'http://abcnews.go.com/video/embed?id=' + video_id,
|
||||||
|
'thumbnail': inline.get('imgSrc') or inline.get('imgDefault'),
|
||||||
|
'description': inline.get('description'),
|
||||||
|
'duration': parse_duration(inline.get('duration')),
|
||||||
|
'ie_key': AbcNewsVideoIE.ie_key(),
|
||||||
|
}
|
||||||
|
|
||||||
timestamp = None
|
return self.playlist_result(
|
||||||
date_str = self._html_search_regex(
|
entries(), story_id, article_contents.get('headline'),
|
||||||
r'<span[^>]+class="timestamp">([^<]+)</span>',
|
article_contents.get('subHead'))
|
||||||
webpage, 'timestamp', fatal=False)
|
|
||||||
if date_str:
|
|
||||||
tz_offset = 0
|
|
||||||
if date_str.endswith(' ET'): # Eastern Time
|
|
||||||
tz_offset = -5
|
|
||||||
date_str = date_str[:-3]
|
|
||||||
date_formats = ['%b. %d, %Y', '%b %d, %Y, %I:%M %p']
|
|
||||||
for date_format in date_formats:
|
|
||||||
try:
|
|
||||||
timestamp = calendar.timegm(time.strptime(date_str.strip(), date_format))
|
|
||||||
except ValueError:
|
|
||||||
continue
|
|
||||||
if timestamp is not None:
|
|
||||||
timestamp -= tz_offset * 3600
|
|
||||||
|
|
||||||
entry = {
|
|
||||||
'_type': 'url_transparent',
|
|
||||||
'ie_key': AbcNewsVideoIE.ie_key(),
|
|
||||||
'url': full_video_url,
|
|
||||||
'id': video_id,
|
|
||||||
'display_id': display_id,
|
|
||||||
'timestamp': timestamp,
|
|
||||||
}
|
|
||||||
|
|
||||||
if youtube_url:
|
|
||||||
entries = [entry, self.url_result(youtube_url, ie=YoutubeIE.ie_key())]
|
|
||||||
return self.playlist_result(entries)
|
|
||||||
|
|
||||||
return entry
|
|
||||||
|
|||||||
@@ -2,21 +2,48 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import functools
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
float_or_none,
|
clean_podcast_url,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
try_get,
|
parse_iso8601,
|
||||||
unified_timestamp,
|
|
||||||
OnDemandPagedList,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ACastIE(InfoExtractor):
|
class ACastBaseIE(InfoExtractor):
|
||||||
|
def _extract_episode(self, episode, show_info):
|
||||||
|
title = episode['title']
|
||||||
|
info = {
|
||||||
|
'id': episode['id'],
|
||||||
|
'display_id': episode.get('episodeUrl'),
|
||||||
|
'url': clean_podcast_url(episode['url']),
|
||||||
|
'title': title,
|
||||||
|
'description': clean_html(episode.get('description') or episode.get('summary')),
|
||||||
|
'thumbnail': episode.get('image'),
|
||||||
|
'timestamp': parse_iso8601(episode.get('publishDate')),
|
||||||
|
'duration': int_or_none(episode.get('duration')),
|
||||||
|
'filesize': int_or_none(episode.get('contentLength')),
|
||||||
|
'season_number': int_or_none(episode.get('season')),
|
||||||
|
'episode': title,
|
||||||
|
'episode_number': int_or_none(episode.get('episode')),
|
||||||
|
}
|
||||||
|
info.update(show_info)
|
||||||
|
return info
|
||||||
|
|
||||||
|
def _extract_show_info(self, show):
|
||||||
|
return {
|
||||||
|
'creator': show.get('author'),
|
||||||
|
'series': show.get('title'),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _call_api(self, path, video_id, query=None):
|
||||||
|
return self._download_json(
|
||||||
|
'https://feeder.acast.com/api/v1/shows/' + path, video_id, query=query)
|
||||||
|
|
||||||
|
|
||||||
|
class ACastIE(ACastBaseIE):
|
||||||
IE_NAME = 'acast'
|
IE_NAME = 'acast'
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
https?://
|
https?://
|
||||||
@@ -28,15 +55,15 @@ class ACastIE(InfoExtractor):
|
|||||||
'''
|
'''
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna',
|
'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna',
|
||||||
'md5': '16d936099ec5ca2d5869e3a813ee8dc4',
|
'md5': 'f5598f3ad1e4776fed12ec1407153e4b',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2a92b283-1a75-4ad8-8396-499c641de0d9',
|
'id': '2a92b283-1a75-4ad8-8396-499c641de0d9',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': '2. Raggarmordet - Röster ur det förflutna',
|
'title': '2. Raggarmordet - Röster ur det förflutna',
|
||||||
'description': 'md5:4f81f6d8cf2e12ee21a321d8bca32db4',
|
'description': 'md5:a992ae67f4d98f1c0141598f7bebbf67',
|
||||||
'timestamp': 1477346700,
|
'timestamp': 1477346700,
|
||||||
'upload_date': '20161024',
|
'upload_date': '20161024',
|
||||||
'duration': 2766.602563,
|
'duration': 2766,
|
||||||
'creator': 'Anton Berg & Martin Johnson',
|
'creator': 'Anton Berg & Martin Johnson',
|
||||||
'series': 'Spår',
|
'series': 'Spår',
|
||||||
'episode': '2. Raggarmordet - Röster ur det förflutna',
|
'episode': '2. Raggarmordet - Röster ur det förflutna',
|
||||||
@@ -45,7 +72,7 @@ class ACastIE(InfoExtractor):
|
|||||||
'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015',
|
'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://play.acast.com/s/rattegangspodden/s04e09-styckmordet-i-helenelund-del-22',
|
'url': 'https://play.acast.com/s/rattegangspodden/s04e09styckmordetihelenelund-del2-2',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://play.acast.com/s/sparpodcast/2a92b283-1a75-4ad8-8396-499c641de0d9',
|
'url': 'https://play.acast.com/s/sparpodcast/2a92b283-1a75-4ad8-8396-499c641de0d9',
|
||||||
@@ -54,40 +81,14 @@ class ACastIE(InfoExtractor):
|
|||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
channel, display_id = re.match(self._VALID_URL, url).groups()
|
channel, display_id = re.match(self._VALID_URL, url).groups()
|
||||||
s = self._download_json(
|
episode = self._call_api(
|
||||||
'https://feeder.acast.com/api/v1/shows/%s/episodes/%s' % (channel, display_id),
|
'%s/episodes/%s' % (channel, display_id),
|
||||||
display_id)
|
display_id, {'showInfo': 'true'})
|
||||||
media_url = s['url']
|
return self._extract_episode(
|
||||||
if re.search(r'[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}', display_id):
|
episode, self._extract_show_info(episode.get('show') or {}))
|
||||||
episode_url = s.get('episodeUrl')
|
|
||||||
if episode_url:
|
|
||||||
display_id = episode_url
|
|
||||||
else:
|
|
||||||
channel, display_id = re.match(self._VALID_URL, s['link']).groups()
|
|
||||||
cast_data = self._download_json(
|
|
||||||
'https://play-api.acast.com/splash/%s/%s' % (channel, display_id),
|
|
||||||
display_id)['result']
|
|
||||||
e = cast_data['episode']
|
|
||||||
title = e.get('name') or s['title']
|
|
||||||
return {
|
|
||||||
'id': compat_str(e['id']),
|
|
||||||
'display_id': display_id,
|
|
||||||
'url': media_url,
|
|
||||||
'title': title,
|
|
||||||
'description': e.get('summary') or clean_html(e.get('description') or s.get('description')),
|
|
||||||
'thumbnail': e.get('image'),
|
|
||||||
'timestamp': unified_timestamp(e.get('publishingDate') or s.get('publishDate')),
|
|
||||||
'duration': float_or_none(e.get('duration') or s.get('duration')),
|
|
||||||
'filesize': int_or_none(e.get('contentLength')),
|
|
||||||
'creator': try_get(cast_data, lambda x: x['show']['author'], compat_str),
|
|
||||||
'series': try_get(cast_data, lambda x: x['show']['name'], compat_str),
|
|
||||||
'season_number': int_or_none(e.get('seasonNumber')),
|
|
||||||
'episode': title,
|
|
||||||
'episode_number': int_or_none(e.get('episodeNumber')),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class ACastChannelIE(InfoExtractor):
|
class ACastChannelIE(ACastBaseIE):
|
||||||
IE_NAME = 'acast:channel'
|
IE_NAME = 'acast:channel'
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
https?://
|
https?://
|
||||||
@@ -102,34 +103,24 @@ class ACastChannelIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '4efc5294-5385-4847-98bd-519799ce5786',
|
'id': '4efc5294-5385-4847-98bd-519799ce5786',
|
||||||
'title': 'Today in Focus',
|
'title': 'Today in Focus',
|
||||||
'description': 'md5:9ba5564de5ce897faeb12963f4537a64',
|
'description': 'md5:c09ce28c91002ce4ffce71d6504abaae',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 35,
|
'playlist_mincount': 200,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://play.acast.com/s/ft-banking-weekly',
|
'url': 'http://play.acast.com/s/ft-banking-weekly',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
_API_BASE_URL = 'https://play.acast.com/api/'
|
|
||||||
_PAGE_SIZE = 10
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def suitable(cls, url):
|
def suitable(cls, url):
|
||||||
return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
|
return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
|
||||||
|
|
||||||
def _fetch_page(self, channel_slug, page):
|
|
||||||
casts = self._download_json(
|
|
||||||
self._API_BASE_URL + 'channels/%s/acasts?page=%s' % (channel_slug, page),
|
|
||||||
channel_slug, note='Download page %d of channel data' % page)
|
|
||||||
for cast in casts:
|
|
||||||
yield self.url_result(
|
|
||||||
'https://play.acast.com/s/%s/%s' % (channel_slug, cast['url']),
|
|
||||||
'ACast', cast['id'])
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
channel_slug = self._match_id(url)
|
show_slug = self._match_id(url)
|
||||||
channel_data = self._download_json(
|
show = self._call_api(show_slug, show_slug)
|
||||||
self._API_BASE_URL + 'channels/%s' % channel_slug, channel_slug)
|
show_info = self._extract_show_info(show)
|
||||||
entries = OnDemandPagedList(functools.partial(
|
entries = []
|
||||||
self._fetch_page, channel_slug), self._PAGE_SIZE)
|
for episode in (show.get('episodes') or []):
|
||||||
return self.playlist_result(entries, compat_str(
|
entries.append(self._extract_episode(episode, show_info))
|
||||||
channel_data['id']), channel_data['name'], channel_data.get('description'))
|
return self.playlist_result(
|
||||||
|
entries, show.get('id'), show.get('title'), show.get('description'))
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import random
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..aes import aes_cbc_decrypt
|
from ..aes import aes_cbc_decrypt
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
|
compat_HTTPError,
|
||||||
compat_b64decode,
|
compat_b64decode,
|
||||||
compat_ord,
|
compat_ord,
|
||||||
)
|
)
|
||||||
@@ -18,11 +19,14 @@ from ..utils import (
|
|||||||
bytes_to_long,
|
bytes_to_long,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
int_or_none,
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
long_to_bytes,
|
long_to_bytes,
|
||||||
pkcs1pad,
|
pkcs1pad,
|
||||||
strip_or_none,
|
strip_or_none,
|
||||||
urljoin,
|
try_get,
|
||||||
|
unified_strdate,
|
||||||
|
urlencode_postdata,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -31,16 +35,30 @@ class ADNIE(InfoExtractor):
|
|||||||
_VALID_URL = r'https?://(?:www\.)?animedigitalnetwork\.fr/video/[^/]+/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?animedigitalnetwork\.fr/video/[^/]+/(?P<id>\d+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites',
|
'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites',
|
||||||
'md5': 'e497370d847fd79d9d4c74be55575c7a',
|
'md5': '0319c99885ff5547565cacb4f3f9348d',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '7778',
|
'id': '7778',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Blue Exorcist - Kyôto Saga - Épisode 1',
|
'title': 'Blue Exorcist - Kyôto Saga - Episode 1',
|
||||||
'description': 'md5:2f7b5aa76edbc1a7a92cedcda8a528d5',
|
'description': 'md5:2f7b5aa76edbc1a7a92cedcda8a528d5',
|
||||||
|
'series': 'Blue Exorcist - Kyôto Saga',
|
||||||
|
'duration': 1467,
|
||||||
|
'release_date': '20170106',
|
||||||
|
'comment_count': int,
|
||||||
|
'average_rating': float,
|
||||||
|
'season_number': 2,
|
||||||
|
'episode': 'Début des hostilités',
|
||||||
|
'episode_number': 1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_NETRC_MACHINE = 'animedigitalnetwork'
|
||||||
_BASE_URL = 'http://animedigitalnetwork.fr'
|
_BASE_URL = 'http://animedigitalnetwork.fr'
|
||||||
_RSA_KEY = (0xc35ae1e4356b65a73b551493da94b8cb443491c0aa092a357a5aee57ffc14dda85326f42d716e539a34542a0d3f363adf16c5ec222d713d5997194030ee2e4f0d1fb328c01a81cf6868c090d50de8e169c6b13d1675b9eeed1cbc51e1fffca9b38af07f37abd790924cd3bee59d0257cfda4fe5f3f0534877e21ce5821447d1b, 65537)
|
_API_BASE_URL = 'https://gw.api.animedigitalnetwork.fr/'
|
||||||
|
_PLAYER_BASE_URL = _API_BASE_URL + 'player/'
|
||||||
|
_HEADERS = {}
|
||||||
|
_LOGIN_ERR_MESSAGE = 'Unable to log in'
|
||||||
|
_RSA_KEY = (0x9B42B08905199A5CCE2026274399CA560ECB209EE9878A708B1C0812E1BB8CB5D1FB7441861147C1A1F2F3A0476DD63A9CAC20D3E983613346850AA6CB38F16DC7D720FD7D86FC6E5B3D5BBC72E14CD0BF9E869F2CEA2CCAD648F1DCE38F1FF916CEFB2D339B64AA0264372344BC775E265E8A852F88144AB0BD9AA06C1A4ABB, 65537)
|
||||||
_POS_ALIGN_MAP = {
|
_POS_ALIGN_MAP = {
|
||||||
'start': 1,
|
'start': 1,
|
||||||
'end': 3,
|
'end': 3,
|
||||||
@@ -54,26 +72,24 @@ class ADNIE(InfoExtractor):
|
|||||||
def _ass_subtitles_timecode(seconds):
|
def _ass_subtitles_timecode(seconds):
|
||||||
return '%01d:%02d:%02d.%02d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 100)
|
return '%01d:%02d:%02d.%02d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 100)
|
||||||
|
|
||||||
def _get_subtitles(self, sub_path, video_id):
|
def _get_subtitles(self, sub_url, video_id):
|
||||||
if not sub_path:
|
if not sub_url:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
enc_subtitles = self._download_webpage(
|
enc_subtitles = self._download_webpage(
|
||||||
urljoin(self._BASE_URL, sub_path),
|
sub_url, video_id, 'Downloading subtitles location', fatal=False) or '{}'
|
||||||
video_id, 'Downloading subtitles location', fatal=False) or '{}'
|
|
||||||
subtitle_location = (self._parse_json(enc_subtitles, video_id, fatal=False) or {}).get('location')
|
subtitle_location = (self._parse_json(enc_subtitles, video_id, fatal=False) or {}).get('location')
|
||||||
if subtitle_location:
|
if subtitle_location:
|
||||||
enc_subtitles = self._download_webpage(
|
enc_subtitles = self._download_webpage(
|
||||||
urljoin(self._BASE_URL, subtitle_location),
|
subtitle_location, video_id, 'Downloading subtitles data',
|
||||||
video_id, 'Downloading subtitles data', fatal=False,
|
fatal=False, headers={'Origin': 'https://animedigitalnetwork.fr'})
|
||||||
headers={'Origin': 'https://animedigitalnetwork.fr'})
|
|
||||||
if not enc_subtitles:
|
if not enc_subtitles:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
# http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
||||||
dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(
|
dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(
|
||||||
bytes_to_intlist(compat_b64decode(enc_subtitles[24:])),
|
bytes_to_intlist(compat_b64decode(enc_subtitles[24:])),
|
||||||
bytes_to_intlist(binascii.unhexlify(self._K + '4b8ef13ec1872730')),
|
bytes_to_intlist(binascii.unhexlify(self._K + 'ab9f52f5baae7c72')),
|
||||||
bytes_to_intlist(compat_b64decode(enc_subtitles[:24]))
|
bytes_to_intlist(compat_b64decode(enc_subtitles[:24]))
|
||||||
))
|
))
|
||||||
subtitles_json = self._parse_json(
|
subtitles_json = self._parse_json(
|
||||||
@@ -117,61 +133,100 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
}])
|
}])
|
||||||
return subtitles
|
return subtitles
|
||||||
|
|
||||||
|
def _real_initialize(self):
|
||||||
|
username, password = self._get_login_info()
|
||||||
|
if not username:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
access_token = (self._download_json(
|
||||||
|
self._API_BASE_URL + 'authentication/login', None,
|
||||||
|
'Logging in', self._LOGIN_ERR_MESSAGE, fatal=False,
|
||||||
|
data=urlencode_postdata({
|
||||||
|
'password': password,
|
||||||
|
'rememberMe': False,
|
||||||
|
'source': 'Web',
|
||||||
|
'username': username,
|
||||||
|
})) or {}).get('accessToken')
|
||||||
|
if access_token:
|
||||||
|
self._HEADERS = {'authorization': 'Bearer ' + access_token}
|
||||||
|
except ExtractorError as e:
|
||||||
|
message = None
|
||||||
|
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
||||||
|
resp = self._parse_json(
|
||||||
|
e.cause.read().decode(), None, fatal=False) or {}
|
||||||
|
message = resp.get('message') or resp.get('code')
|
||||||
|
self.report_warning(message or self._LOGIN_ERR_MESSAGE)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
video_base_url = self._PLAYER_BASE_URL + 'video/%s/' % video_id
|
||||||
player_config = self._parse_json(self._search_regex(
|
player = self._download_json(
|
||||||
r'playerConfig\s*=\s*({.+});', webpage,
|
video_base_url + 'configuration', video_id,
|
||||||
'player config', default='{}'), video_id, fatal=False)
|
'Downloading player config JSON metadata',
|
||||||
if not player_config:
|
headers=self._HEADERS)['player']
|
||||||
config_url = urljoin(self._BASE_URL, self._search_regex(
|
options = player['options']
|
||||||
r'(?:id="player"|class="[^"]*adn-player-container[^"]*")[^>]+data-url="([^"]+)"',
|
|
||||||
webpage, 'config url'))
|
|
||||||
player_config = self._download_json(
|
|
||||||
config_url, video_id,
|
|
||||||
'Downloading player config JSON metadata')['player']
|
|
||||||
|
|
||||||
video_info = {}
|
user = options['user']
|
||||||
video_info_str = self._search_regex(
|
if not user.get('hasAccess'):
|
||||||
r'videoInfo\s*=\s*({.+});', webpage,
|
self.raise_login_required()
|
||||||
'video info', fatal=False)
|
|
||||||
if video_info_str:
|
|
||||||
video_info = self._parse_json(
|
|
||||||
video_info_str, video_id, fatal=False) or {}
|
|
||||||
|
|
||||||
options = player_config.get('options') or {}
|
token = self._download_json(
|
||||||
metas = options.get('metas') or {}
|
user.get('refreshTokenUrl') or (self._PLAYER_BASE_URL + 'refresh/token'),
|
||||||
links = player_config.get('links') or {}
|
video_id, 'Downloading access token', headers={
|
||||||
sub_path = player_config.get('subtitles')
|
'x-player-refresh-token': user['refreshToken']
|
||||||
error = None
|
}, data=b'')['token']
|
||||||
if not links:
|
|
||||||
links_url = player_config.get('linksurl') or options['videoUrl']
|
links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link')
|
||||||
token = options['token']
|
self._K = ''.join([random.choice('0123456789abcdef') for _ in range(16)])
|
||||||
self._K = ''.join([random.choice('0123456789abcdef') for _ in range(16)])
|
message = bytes_to_intlist(json.dumps({
|
||||||
message = bytes_to_intlist(json.dumps({
|
'k': self._K,
|
||||||
'k': self._K,
|
't': token,
|
||||||
'e': 60,
|
}))
|
||||||
't': token,
|
|
||||||
}))
|
# Sometimes authentication fails for no good reason, retry with
|
||||||
|
# a different random padding
|
||||||
|
links_data = None
|
||||||
|
for _ in range(3):
|
||||||
padded_message = intlist_to_bytes(pkcs1pad(message, 128))
|
padded_message = intlist_to_bytes(pkcs1pad(message, 128))
|
||||||
n, e = self._RSA_KEY
|
n, e = self._RSA_KEY
|
||||||
encrypted_message = long_to_bytes(pow(bytes_to_long(padded_message), e, n))
|
encrypted_message = long_to_bytes(pow(bytes_to_long(padded_message), e, n))
|
||||||
authorization = base64.b64encode(encrypted_message).decode()
|
authorization = base64.b64encode(encrypted_message).decode()
|
||||||
links_data = self._download_json(
|
|
||||||
urljoin(self._BASE_URL, links_url), video_id,
|
try:
|
||||||
'Downloading links JSON metadata', headers={
|
links_data = self._download_json(
|
||||||
'Authorization': 'Bearer ' + authorization,
|
links_url, video_id, 'Downloading links JSON metadata', headers={
|
||||||
})
|
'X-Player-Token': authorization
|
||||||
links = links_data.get('links') or {}
|
}, query={
|
||||||
metas = metas or links_data.get('meta') or {}
|
'freeWithAds': 'true',
|
||||||
sub_path = sub_path or links_data.get('subtitles') or \
|
'adaptive': 'false',
|
||||||
'index.php?option=com_vodapi&task=subtitles.getJSON&format=json&id=' + video_id
|
'withMetadata': 'true',
|
||||||
sub_path += '&token=' + token
|
'source': 'Web'
|
||||||
error = links_data.get('error')
|
})
|
||||||
title = metas.get('title') or video_info['title']
|
break
|
||||||
|
except ExtractorError as e:
|
||||||
|
if not isinstance(e.cause, compat_HTTPError):
|
||||||
|
raise e
|
||||||
|
|
||||||
|
if e.cause.code == 401:
|
||||||
|
# This usually goes away with a different random pkcs1pad, so retry
|
||||||
|
continue
|
||||||
|
|
||||||
|
error = self._parse_json(e.cause.read(), video_id)
|
||||||
|
message = error.get('message')
|
||||||
|
if e.cause.code == 403 and error.get('code') == 'player-bad-geolocation-country':
|
||||||
|
self.raise_geo_restricted(msg=message)
|
||||||
|
raise ExtractorError(message)
|
||||||
|
else:
|
||||||
|
raise ExtractorError('Giving up retrying')
|
||||||
|
|
||||||
|
links = links_data.get('links') or {}
|
||||||
|
metas = links_data.get('metadata') or {}
|
||||||
|
sub_url = (links.get('subtitles') or {}).get('all')
|
||||||
|
video_info = links_data.get('video') or {}
|
||||||
|
title = metas['title']
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for format_id, qualities in links.items():
|
for format_id, qualities in (links.get('streaming') or {}).items():
|
||||||
if not isinstance(qualities, dict):
|
if not isinstance(qualities, dict):
|
||||||
continue
|
continue
|
||||||
for quality, load_balancer_url in qualities.items():
|
for quality, load_balancer_url in qualities.items():
|
||||||
@@ -189,19 +244,26 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
for f in m3u8_formats:
|
for f in m3u8_formats:
|
||||||
f['language'] = 'fr'
|
f['language'] = 'fr'
|
||||||
formats.extend(m3u8_formats)
|
formats.extend(m3u8_formats)
|
||||||
if not error:
|
|
||||||
error = options.get('error')
|
|
||||||
if not formats and error:
|
|
||||||
raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True)
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
video = (self._download_json(
|
||||||
|
self._API_BASE_URL + 'video/%s' % video_id, video_id,
|
||||||
|
'Downloading additional video metadata', fatal=False) or {}).get('video') or {}
|
||||||
|
show = video.get('show') or {}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': strip_or_none(metas.get('summary') or video_info.get('resume')),
|
'description': strip_or_none(metas.get('summary') or video.get('summary')),
|
||||||
'thumbnail': video_info.get('image'),
|
'thumbnail': video_info.get('image') or player.get('image'),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': self.extract_subtitles(sub_path, video_id),
|
'subtitles': self.extract_subtitles(sub_url, video_id),
|
||||||
'episode': metas.get('subtitle') or video_info.get('videoTitle'),
|
'episode': metas.get('subtitle') or video.get('name'),
|
||||||
'series': video_info.get('playlistTitle'),
|
'episode_number': int_or_none(video.get('shortNumber')),
|
||||||
|
'series': show.get('title'),
|
||||||
|
'season_number': int_or_none(video.get('season')),
|
||||||
|
'duration': int_or_none(video_info.get('duration') or video.get('duration')),
|
||||||
|
'release_date': unified_strdate(video.get('releaseDate')),
|
||||||
|
'average_rating': float_or_none(video.get('rating') or metas.get('rating')),
|
||||||
|
'comment_count': int_or_none(video.get('commentsCount')),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import re
|
|||||||
from .theplatform import ThePlatformIE
|
from .theplatform import ThePlatformIE
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
GeoRestrictedError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
@@ -19,8 +20,8 @@ class AENetworksBaseIE(ThePlatformIE):
|
|||||||
(?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com|
|
(?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com|
|
||||||
fyi\.tv
|
fyi\.tv
|
||||||
)/'''
|
)/'''
|
||||||
_THEPLATFORM_KEY = 'crazyjava'
|
_THEPLATFORM_KEY = '43jXaGRQud'
|
||||||
_THEPLATFORM_SECRET = 's3cr3t'
|
_THEPLATFORM_SECRET = 'S10BPXHMlb'
|
||||||
_DOMAIN_MAP = {
|
_DOMAIN_MAP = {
|
||||||
'history.com': ('HISTORY', 'history'),
|
'history.com': ('HISTORY', 'history'),
|
||||||
'aetv.com': ('AETV', 'aetv'),
|
'aetv.com': ('AETV', 'aetv'),
|
||||||
@@ -28,6 +29,7 @@ class AENetworksBaseIE(ThePlatformIE):
|
|||||||
'lifetimemovieclub.com': ('LIFETIMEMOVIECLUB', 'lmc'),
|
'lifetimemovieclub.com': ('LIFETIMEMOVIECLUB', 'lmc'),
|
||||||
'fyi.tv': ('FYI', 'fyi'),
|
'fyi.tv': ('FYI', 'fyi'),
|
||||||
'historyvault.com': (None, 'historyvault'),
|
'historyvault.com': (None, 'historyvault'),
|
||||||
|
'biography.com': (None, 'biography'),
|
||||||
}
|
}
|
||||||
|
|
||||||
def _extract_aen_smil(self, smil_url, video_id, auth=None):
|
def _extract_aen_smil(self, smil_url, video_id, auth=None):
|
||||||
@@ -54,6 +56,8 @@ class AENetworksBaseIE(ThePlatformIE):
|
|||||||
tp_formats, tp_subtitles = self._extract_theplatform_smil(
|
tp_formats, tp_subtitles = self._extract_theplatform_smil(
|
||||||
m_url, video_id, 'Downloading %s SMIL data' % (q.get('switch') or q['assetTypes']))
|
m_url, video_id, 'Downloading %s SMIL data' % (q.get('switch') or q['assetTypes']))
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
|
if isinstance(e, GeoRestrictedError):
|
||||||
|
raise
|
||||||
last_e = e
|
last_e = e
|
||||||
continue
|
continue
|
||||||
formats.extend(tp_formats)
|
formats.extend(tp_formats)
|
||||||
@@ -67,6 +71,34 @@ class AENetworksBaseIE(ThePlatformIE):
|
|||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def _extract_aetn_info(self, domain, filter_key, filter_value, url):
|
||||||
|
requestor_id, brand = self._DOMAIN_MAP[domain]
|
||||||
|
result = self._download_json(
|
||||||
|
'https://feeds.video.aetnd.com/api/v2/%s/videos' % brand,
|
||||||
|
filter_value, query={'filter[%s]' % filter_key: filter_value})['results'][0]
|
||||||
|
title = result['title']
|
||||||
|
video_id = result['id']
|
||||||
|
media_url = result['publicUrl']
|
||||||
|
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
|
||||||
|
r'https?://link\.theplatform\.com/s/([^?]+)', media_url, 'theplatform_path'), video_id)
|
||||||
|
info = self._parse_theplatform_metadata(theplatform_metadata)
|
||||||
|
auth = None
|
||||||
|
if theplatform_metadata.get('AETN$isBehindWall'):
|
||||||
|
resource = self._get_mvpd_resource(
|
||||||
|
requestor_id, theplatform_metadata['title'],
|
||||||
|
theplatform_metadata.get('AETN$PPL_pplProgramId') or theplatform_metadata.get('AETN$PPL_pplProgramId_OLD'),
|
||||||
|
theplatform_metadata['ratings'][0]['rating'])
|
||||||
|
auth = self._extract_mvpd_auth(
|
||||||
|
url, video_id, requestor_id, resource)
|
||||||
|
info.update(self._extract_aen_smil(media_url, video_id, auth))
|
||||||
|
info.update({
|
||||||
|
'title': title,
|
||||||
|
'series': result.get('seriesName'),
|
||||||
|
'season_number': int_or_none(result.get('tvSeasonNumber')),
|
||||||
|
'episode_number': int_or_none(result.get('tvSeasonEpisodeNumber')),
|
||||||
|
})
|
||||||
|
return info
|
||||||
|
|
||||||
|
|
||||||
class AENetworksIE(AENetworksBaseIE):
|
class AENetworksIE(AENetworksBaseIE):
|
||||||
IE_NAME = 'aenetworks'
|
IE_NAME = 'aenetworks'
|
||||||
@@ -139,32 +171,7 @@ class AENetworksIE(AENetworksBaseIE):
|
|||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
domain, canonical = re.match(self._VALID_URL, url).groups()
|
domain, canonical = re.match(self._VALID_URL, url).groups()
|
||||||
requestor_id, brand = self._DOMAIN_MAP[domain]
|
return self._extract_aetn_info(domain, 'canonical', '/' + canonical, url)
|
||||||
result = self._download_json(
|
|
||||||
'https://feeds.video.aetnd.com/api/v2/%s/videos' % brand,
|
|
||||||
canonical, query={'filter[canonical]': '/' + canonical})['results'][0]
|
|
||||||
title = result['title']
|
|
||||||
video_id = result['id']
|
|
||||||
media_url = result['publicUrl']
|
|
||||||
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
|
|
||||||
r'https?://link\.theplatform\.com/s/([^?]+)', media_url, 'theplatform_path'), video_id)
|
|
||||||
info = self._parse_theplatform_metadata(theplatform_metadata)
|
|
||||||
auth = None
|
|
||||||
if theplatform_metadata.get('AETN$isBehindWall'):
|
|
||||||
resource = self._get_mvpd_resource(
|
|
||||||
requestor_id, theplatform_metadata['title'],
|
|
||||||
theplatform_metadata.get('AETN$PPL_pplProgramId') or theplatform_metadata.get('AETN$PPL_pplProgramId_OLD'),
|
|
||||||
theplatform_metadata['ratings'][0]['rating'])
|
|
||||||
auth = self._extract_mvpd_auth(
|
|
||||||
url, video_id, requestor_id, resource)
|
|
||||||
info.update(self._extract_aen_smil(media_url, video_id, auth))
|
|
||||||
info.update({
|
|
||||||
'title': title,
|
|
||||||
'series': result.get('seriesName'),
|
|
||||||
'season_number': int_or_none(result.get('tvSeasonNumber')),
|
|
||||||
'episode_number': int_or_none(result.get('tvSeasonEpisodeNumber')),
|
|
||||||
})
|
|
||||||
return info
|
|
||||||
|
|
||||||
|
|
||||||
class AENetworksListBaseIE(AENetworksBaseIE):
|
class AENetworksListBaseIE(AENetworksBaseIE):
|
||||||
@@ -245,11 +252,11 @@ class AENetworksShowIE(AENetworksListBaseIE):
|
|||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.history.com/shows/ancient-aliens',
|
'url': 'http://www.history.com/shows/ancient-aliens',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'SH012427480000',
|
'id': 'SERIES1574',
|
||||||
'title': 'Ancient Aliens',
|
'title': 'Ancient Aliens',
|
||||||
'description': 'md5:3f6d74daf2672ff3ae29ed732e37ea7f',
|
'description': 'md5:3f6d74daf2672ff3ae29ed732e37ea7f',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 168,
|
'playlist_mincount': 150,
|
||||||
}]
|
}]
|
||||||
_RESOURCE = 'series'
|
_RESOURCE = 'series'
|
||||||
_ITEMS_KEY = 'episodes'
|
_ITEMS_KEY = 'episodes'
|
||||||
@@ -294,3 +301,42 @@ class HistoryTopicIE(AENetworksBaseIE):
|
|||||||
return self.url_result(
|
return self.url_result(
|
||||||
'http://www.history.com/videos/' + display_id,
|
'http://www.history.com/videos/' + display_id,
|
||||||
AENetworksIE.ie_key())
|
AENetworksIE.ie_key())
|
||||||
|
|
||||||
|
|
||||||
|
class HistoryPlayerIE(AENetworksBaseIE):
|
||||||
|
IE_NAME = 'history:player'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:history|biography)\.com)/player/(?P<id>\d+)'
|
||||||
|
_TESTS = []
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
domain, video_id = re.match(self._VALID_URL, url).groups()
|
||||||
|
return self._extract_aetn_info(domain, 'id', video_id, url)
|
||||||
|
|
||||||
|
|
||||||
|
class BiographyIE(AENetworksBaseIE):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?biography\.com/video/(?P<id>[^/?#&]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.biography.com/video/vincent-van-gogh-full-episode-2075049808',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '30322987',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Vincent Van Gogh - Full Episode',
|
||||||
|
'description': 'A full biography about the most influential 20th century painter, Vincent Van Gogh.',
|
||||||
|
'timestamp': 1311970571,
|
||||||
|
'upload_date': '20110729',
|
||||||
|
'uploader': 'AENE-NEW',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'add_ie': ['ThePlatform'],
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
player_url = self._search_regex(
|
||||||
|
r'<phoenix-iframe[^>]+src="(%s)' % HistoryPlayerIE._VALID_URL,
|
||||||
|
webpage, 'player URL')
|
||||||
|
return self.url_result(player_url, HistoryPlayerIE.ie_key())
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ class AliExpressLiveIE(InfoExtractor):
|
|||||||
'id': '2800002704436634',
|
'id': '2800002704436634',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'CASIMA7.22',
|
'title': 'CASIMA7.22',
|
||||||
'thumbnail': r're:http://.*\.jpg',
|
'thumbnail': r're:https?://.*\.jpg',
|
||||||
'uploader': 'CASIMA Official Store',
|
'uploader': 'CASIMA Official Store',
|
||||||
'timestamp': 1500717600,
|
'timestamp': 1500717600,
|
||||||
'upload_date': '20170722',
|
'upload_date': '20170722',
|
||||||
|
|||||||
@@ -1,13 +1,16 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class AlJazeeraIE(InfoExtractor):
|
class AlJazeeraIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?aljazeera\.com/(?:programmes|video)/.*?/(?P<id>[^/]+)\.html'
|
_VALID_URL = r'https?://(?:www\.)?aljazeera\.com/(?P<type>program/[^/]+|(?:feature|video)s)/\d{4}/\d{1,2}/\d{1,2}/(?P<id>[^/?&#]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.aljazeera.com/programmes/the-slum/2014/08/deliverance-201482883754237240.html',
|
'url': 'https://www.aljazeera.com/program/episode/2014/9/19/deliverance',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '3792260579001',
|
'id': '3792260579001',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@@ -20,14 +23,34 @@ class AlJazeeraIE(InfoExtractor):
|
|||||||
'add_ie': ['BrightcoveNew'],
|
'add_ie': ['BrightcoveNew'],
|
||||||
'skip': 'Not accessible from Travis CI server',
|
'skip': 'Not accessible from Travis CI server',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.aljazeera.com/video/news/2017/05/sierra-leone-709-carat-diamond-auctioned-170511100111930.html',
|
'url': 'https://www.aljazeera.com/videos/2017/5/11/sierra-leone-709-carat-diamond-to-be-auctioned-off',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.aljazeera.com/features/2017/8/21/transforming-pakistans-buses-into-art',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/665003303001/default_default/index.html?videoId=%s'
|
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
program_name = self._match_id(url)
|
post_type, name = re.match(self._VALID_URL, url).groups()
|
||||||
webpage = self._download_webpage(url, program_name)
|
post_type = {
|
||||||
brightcove_id = self._search_regex(
|
'features': 'post',
|
||||||
r'RenderPagesVideo\(\'(.+?)\'', webpage, 'brightcove id')
|
'program': 'episode',
|
||||||
return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
|
'videos': 'video',
|
||||||
|
}[post_type.split('/')[0]]
|
||||||
|
video = self._download_json(
|
||||||
|
'https://www.aljazeera.com/graphql', name, query={
|
||||||
|
'operationName': 'SingleArticleQuery',
|
||||||
|
'variables': json.dumps({
|
||||||
|
'name': name,
|
||||||
|
'postType': post_type,
|
||||||
|
}),
|
||||||
|
}, headers={
|
||||||
|
'wp-site': 'aje',
|
||||||
|
})['data']['article']['video']
|
||||||
|
video_id = video['id']
|
||||||
|
account_id = video.get('accountId') or '665003303001'
|
||||||
|
player_id = video.get('playerId') or 'BkeSH5BDb'
|
||||||
|
return self.url_result(
|
||||||
|
self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id),
|
||||||
|
'BrightcoveNew', video_id)
|
||||||
|
|||||||
89
youtube_dl/extractor/alsace20tv.py
Normal file
89
youtube_dl/extractor/alsace20tv.py
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
clean_html,
|
||||||
|
dict_get,
|
||||||
|
get_element_by_class,
|
||||||
|
int_or_none,
|
||||||
|
unified_strdate,
|
||||||
|
url_or_none,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class Alsace20TVIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?alsace20\.tv/(?:[\w-]+/)+[\w-]+-(?P<id>[\w]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.alsace20.tv/VOD/Actu/JT/Votre-JT-jeudi-3-fevrier-lyNHCXpYJh.html',
|
||||||
|
# 'md5': 'd91851bf9af73c0ad9b2cdf76c127fbb',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'lyNHCXpYJh',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'description': 'md5:fc0bc4a0692d3d2dba4524053de4c7b7',
|
||||||
|
'title': 'Votre JT du jeudi 3 février',
|
||||||
|
'upload_date': '20220203',
|
||||||
|
'thumbnail': r're:https?://.+\.jpg',
|
||||||
|
'duration': 1073,
|
||||||
|
'view_count': int,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'format': 'bestvideo',
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _extract_video(self, video_id, url=None):
|
||||||
|
info = self._download_json(
|
||||||
|
'https://www.alsace20.tv/visionneuse/visio_v9_js.php?key=%s&habillage=0&mode=html' % (video_id, ),
|
||||||
|
video_id) or {}
|
||||||
|
title = info['titre']
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for res, fmt_url in (info.get('files') or {}).items():
|
||||||
|
formats.extend(
|
||||||
|
self._extract_smil_formats(fmt_url, video_id, fatal=False)
|
||||||
|
if '/smil:_' in fmt_url
|
||||||
|
else self._extract_mpd_formats(fmt_url, video_id, mpd_id=res, fatal=False))
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
webpage = (url and self._download_webpage(url, video_id, fatal=False)) or ''
|
||||||
|
thumbnail = url_or_none(dict_get(info, ('image', 'preview', )) or self._og_search_thumbnail(webpage))
|
||||||
|
upload_date = self._search_regex(r'/(\d{6})_', thumbnail, 'upload_date', default=None)
|
||||||
|
upload_date = unified_strdate('20%s-%s-%s' % (upload_date[:2], upload_date[2:4], upload_date[4:])) if upload_date else None
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats,
|
||||||
|
'description': clean_html(get_element_by_class('wysiwyg', webpage)),
|
||||||
|
'upload_date': upload_date,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'duration': int_or_none(self._og_search_property('video:duration', webpage) if webpage else None),
|
||||||
|
'view_count': int_or_none(info.get('nb_vues')),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
return self._extract_video(video_id, url)
|
||||||
|
|
||||||
|
|
||||||
|
class Alsace20TVEmbedIE(Alsace20TVIE):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?alsace20\.tv/emb/(?P<id>[\w]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.alsace20.tv/emb/lyNHCXpYJh',
|
||||||
|
# 'md5': 'd91851bf9af73c0ad9b2cdf76c127fbb',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'lyNHCXpYJh',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Votre JT du jeudi 3 février',
|
||||||
|
'upload_date': '20220203',
|
||||||
|
'thumbnail': r're:https?://.+\.jpg',
|
||||||
|
'view_count': int,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'format': 'bestvideo',
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
return self._extract_video(video_id)
|
||||||
@@ -80,7 +80,8 @@ class AMCNetworksIE(ThePlatformIE):
|
|||||||
title = theplatform_metadata['title']
|
title = theplatform_metadata['title']
|
||||||
rating = try_get(
|
rating = try_get(
|
||||||
theplatform_metadata, lambda x: x['ratings'][0]['rating'])
|
theplatform_metadata, lambda x: x['ratings'][0]['rating'])
|
||||||
if properties.get('videoCategory') == 'TVE-Auth':
|
video_category = properties.get('videoCategory')
|
||||||
|
if video_category and video_category.endswith('-Auth'):
|
||||||
resource = self._get_mvpd_resource(
|
resource = self._get_mvpd_resource(
|
||||||
requestor_id, title, video_id, rating)
|
requestor_id, title, video_id, rating)
|
||||||
query['auth'] = self._extract_mvpd_auth(
|
query['auth'] = self._extract_mvpd_auth(
|
||||||
|
|||||||
@@ -1,13 +1,16 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
|
int_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
|
unified_timestamp,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -22,8 +25,8 @@ class AmericasTestKitchenIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'description': 'md5:64e606bfee910627efc4b5f050de92b3',
|
'description': 'md5:64e606bfee910627efc4b5f050de92b3',
|
||||||
'thumbnail': r're:^https?://',
|
'thumbnail': r're:^https?://',
|
||||||
'timestamp': 1523664000,
|
'timestamp': 1523318400,
|
||||||
'upload_date': '20180414',
|
'upload_date': '20180410',
|
||||||
'release_date': '20180410',
|
'release_date': '20180410',
|
||||||
'series': "America's Test Kitchen",
|
'series': "America's Test Kitchen",
|
||||||
'season_number': 18,
|
'season_number': 18,
|
||||||
@@ -33,6 +36,27 @@ class AmericasTestKitchenIE(InfoExtractor):
|
|||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
# Metadata parsing behaves differently for newer episodes (705) as opposed to older episodes (582 above)
|
||||||
|
'url': 'https://www.americastestkitchen.com/episode/705-simple-chicken-dinner',
|
||||||
|
'md5': '06451608c57651e985a498e69cec17e5',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '5fbe8c61bda2010001c6763b',
|
||||||
|
'title': 'Simple Chicken Dinner',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'description': 'md5:eb68737cc2fd4c26ca7db30139d109e7',
|
||||||
|
'thumbnail': r're:^https?://',
|
||||||
|
'timestamp': 1610755200,
|
||||||
|
'upload_date': '20210116',
|
||||||
|
'release_date': '20210116',
|
||||||
|
'series': "America's Test Kitchen",
|
||||||
|
'season_number': 21,
|
||||||
|
'episode': 'Simple Chicken Dinner',
|
||||||
|
'episode_number': 3,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.americastestkitchen.com/videos/3420-pan-seared-salmon',
|
'url': 'https://www.americastestkitchen.com/videos/3420-pan-seared-salmon',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@@ -60,7 +84,76 @@ class AmericasTestKitchenIE(InfoExtractor):
|
|||||||
'url': 'https://player.zype.com/embed/%s.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ' % video['zypeId'],
|
'url': 'https://player.zype.com/embed/%s.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ' % video['zypeId'],
|
||||||
'ie_key': 'Zype',
|
'ie_key': 'Zype',
|
||||||
'description': clean_html(video.get('description')),
|
'description': clean_html(video.get('description')),
|
||||||
|
'timestamp': unified_timestamp(video.get('publishDate')),
|
||||||
'release_date': unified_strdate(video.get('publishDate')),
|
'release_date': unified_strdate(video.get('publishDate')),
|
||||||
|
'episode_number': int_or_none(episode.get('number')),
|
||||||
|
'season_number': int_or_none(episode.get('season')),
|
||||||
'series': try_get(episode, lambda x: x['show']['title']),
|
'series': try_get(episode, lambda x: x['show']['title']),
|
||||||
'episode': episode.get('title'),
|
'episode': episode.get('title'),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class AmericasTestKitchenSeasonIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?(?P<show>americastestkitchen|cookscountry)\.com/episodes/browse/season_(?P<id>\d+)'
|
||||||
|
_TESTS = [{
|
||||||
|
# ATK Season
|
||||||
|
'url': 'https://www.americastestkitchen.com/episodes/browse/season_1',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'season_1',
|
||||||
|
'title': 'Season 1',
|
||||||
|
},
|
||||||
|
'playlist_count': 13,
|
||||||
|
}, {
|
||||||
|
# Cooks Country Season
|
||||||
|
'url': 'https://www.cookscountry.com/episodes/browse/season_12',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'season_12',
|
||||||
|
'title': 'Season 12',
|
||||||
|
},
|
||||||
|
'playlist_count': 13,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
show_name, season_number = re.match(self._VALID_URL, url).groups()
|
||||||
|
season_number = int(season_number)
|
||||||
|
|
||||||
|
slug = 'atk' if show_name == 'americastestkitchen' else 'cco'
|
||||||
|
|
||||||
|
season = 'Season %d' % season_number
|
||||||
|
|
||||||
|
season_search = self._download_json(
|
||||||
|
'https://y1fnzxui30-dsn.algolia.net/1/indexes/everest_search_%s_season_desc_production' % slug,
|
||||||
|
season, headers={
|
||||||
|
'Origin': 'https://www.%s.com' % show_name,
|
||||||
|
'X-Algolia-API-Key': '8d504d0099ed27c1b73708d22871d805',
|
||||||
|
'X-Algolia-Application-Id': 'Y1FNZXUI30',
|
||||||
|
}, query={
|
||||||
|
'facetFilters': json.dumps([
|
||||||
|
'search_season_list:' + season,
|
||||||
|
'search_document_klass:episode',
|
||||||
|
'search_show_slug:' + slug,
|
||||||
|
]),
|
||||||
|
'attributesToRetrieve': 'description,search_%s_episode_number,search_document_date,search_url,title' % slug,
|
||||||
|
'attributesToHighlight': '',
|
||||||
|
'hitsPerPage': 1000,
|
||||||
|
})
|
||||||
|
|
||||||
|
def entries():
|
||||||
|
for episode in (season_search.get('hits') or []):
|
||||||
|
search_url = episode.get('search_url')
|
||||||
|
if not search_url:
|
||||||
|
continue
|
||||||
|
yield {
|
||||||
|
'_type': 'url',
|
||||||
|
'url': 'https://www.%s.com%s' % (show_name, search_url),
|
||||||
|
'id': try_get(episode, lambda e: e['objectID'].split('_')[-1]),
|
||||||
|
'title': episode.get('title'),
|
||||||
|
'description': episode.get('description'),
|
||||||
|
'timestamp': unified_timestamp(episode.get('search_document_date')),
|
||||||
|
'season_number': season_number,
|
||||||
|
'episode_number': int_or_none(episode.get('search_%s_episode_number' % slug)),
|
||||||
|
'ie_key': AmericasTestKitchenIE.ie_key(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return self.playlist_result(
|
||||||
|
entries(), 'season_%d' % season_number, season)
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ from ..utils import (
|
|||||||
int_or_none,
|
int_or_none,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
unified_timestamp,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -88,7 +89,7 @@ class AMPIE(InfoExtractor):
|
|||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
timestamp = parse_iso8601(item.get('pubDate'), ' ') or parse_iso8601(item.get('dc-date'))
|
timestamp = unified_timestamp(item.get('pubDate'), ' ') or parse_iso8601(item.get('dc-date'))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
|||||||
@@ -116,8 +116,6 @@ class AnimeOnDemandIE(InfoExtractor):
|
|||||||
r'(?s)<div[^>]+itemprop="description"[^>]*>(.+?)</div>',
|
r'(?s)<div[^>]+itemprop="description"[^>]*>(.+?)</div>',
|
||||||
webpage, 'anime description', default=None)
|
webpage, 'anime description', default=None)
|
||||||
|
|
||||||
entries = []
|
|
||||||
|
|
||||||
def extract_info(html, video_id, num=None):
|
def extract_info(html, video_id, num=None):
|
||||||
title, description = [None] * 2
|
title, description = [None] * 2
|
||||||
formats = []
|
formats = []
|
||||||
@@ -233,7 +231,7 @@ class AnimeOnDemandIE(InfoExtractor):
|
|||||||
self._sort_formats(info['formats'])
|
self._sort_formats(info['formats'])
|
||||||
f = common_info.copy()
|
f = common_info.copy()
|
||||||
f.update(info)
|
f.update(info)
|
||||||
entries.append(f)
|
yield f
|
||||||
|
|
||||||
# Extract teaser/trailer only when full episode is not available
|
# Extract teaser/trailer only when full episode is not available
|
||||||
if not info['formats']:
|
if not info['formats']:
|
||||||
@@ -247,7 +245,7 @@ class AnimeOnDemandIE(InfoExtractor):
|
|||||||
'title': m.group('title'),
|
'title': m.group('title'),
|
||||||
'url': urljoin(url, m.group('href')),
|
'url': urljoin(url, m.group('href')),
|
||||||
})
|
})
|
||||||
entries.append(f)
|
yield f
|
||||||
|
|
||||||
def extract_episodes(html):
|
def extract_episodes(html):
|
||||||
for num, episode_html in enumerate(re.findall(
|
for num, episode_html in enumerate(re.findall(
|
||||||
@@ -275,7 +273,8 @@ class AnimeOnDemandIE(InfoExtractor):
|
|||||||
'episode_number': episode_number,
|
'episode_number': episode_number,
|
||||||
}
|
}
|
||||||
|
|
||||||
extract_entries(episode_html, video_id, common_info)
|
for e in extract_entries(episode_html, video_id, common_info):
|
||||||
|
yield e
|
||||||
|
|
||||||
def extract_film(html, video_id):
|
def extract_film(html, video_id):
|
||||||
common_info = {
|
common_info = {
|
||||||
@@ -283,11 +282,18 @@ class AnimeOnDemandIE(InfoExtractor):
|
|||||||
'title': anime_title,
|
'title': anime_title,
|
||||||
'description': anime_description,
|
'description': anime_description,
|
||||||
}
|
}
|
||||||
extract_entries(html, video_id, common_info)
|
for e in extract_entries(html, video_id, common_info):
|
||||||
|
yield e
|
||||||
|
|
||||||
extract_episodes(webpage)
|
def entries():
|
||||||
|
has_episodes = False
|
||||||
|
for e in extract_episodes(webpage):
|
||||||
|
has_episodes = True
|
||||||
|
yield e
|
||||||
|
|
||||||
if not entries:
|
if not has_episodes:
|
||||||
extract_film(webpage, anime_id)
|
for e in extract_film(webpage, anime_id):
|
||||||
|
yield e
|
||||||
|
|
||||||
return self.playlist_result(entries, anime_id, anime_title, anime_description)
|
return self.playlist_result(
|
||||||
|
entries(), anime_id, anime_title, anime_description)
|
||||||
|
|||||||
@@ -116,7 +116,76 @@ class AnvatoIE(InfoExtractor):
|
|||||||
'anvato_scripps_app_ios_prod_409c41960c60b308db43c3cc1da79cab9f1c3d93': 'WPxj5GraLTkYCyj3M7RozLqIycjrXOEcDGFMIJPn',
|
'anvato_scripps_app_ios_prod_409c41960c60b308db43c3cc1da79cab9f1c3d93': 'WPxj5GraLTkYCyj3M7RozLqIycjrXOEcDGFMIJPn',
|
||||||
'EZqvRyKBJLrgpClDPDF8I7Xpdp40Vx73': '4OxGd2dEakylntVKjKF0UK9PDPYB6A9W',
|
'EZqvRyKBJLrgpClDPDF8I7Xpdp40Vx73': '4OxGd2dEakylntVKjKF0UK9PDPYB6A9W',
|
||||||
'M2v78QkpleXm9hPp9jUXI63x5vA6BogR': 'ka6K32k7ZALmpINkjJUGUo0OE42Md1BQ',
|
'M2v78QkpleXm9hPp9jUXI63x5vA6BogR': 'ka6K32k7ZALmpINkjJUGUo0OE42Md1BQ',
|
||||||
'nbcu_nbcd_desktop_web_prod_93d8ead38ce2024f8f544b78306fbd15895ae5e6_secure': 'NNemUkySjxLyPTKvZRiGntBIjEyK8uqicjMakIaQ'
|
'nbcu_nbcd_desktop_web_prod_93d8ead38ce2024f8f544b78306fbd15895ae5e6_secure': 'NNemUkySjxLyPTKvZRiGntBIjEyK8uqicjMakIaQ',
|
||||||
|
'X8POa4zPPaKVZHqmWjuEzfP31b1QM9VN': 'Dn5vOY9ooDw7VSl9qztjZI5o0g08mA0z',
|
||||||
|
'M2v78QkBMpNJlSPp9diX5F2PBmBy6Bog': 'ka6K32kyo7nDZfNkjQCGWf1lpApXMd1B',
|
||||||
|
'bvJ0dQpav07l0hG5JgfVLF2dv1vARwpP': 'BzoQW24GrJZoJfmNodiJKSPeB9B8NOxj',
|
||||||
|
'lxQMLg2XZKuEZaWgsqubBxV9INZ6bryY': 'Vm2Mx6noKds9jB71h6urazwlTG3m9x8l',
|
||||||
|
'04EnjvXeoSmkbJ9ckPs7oY0mcxv7PlyN': 'aXERQP9LMfQVlEDsgGs6eEA1SWznAQ8P',
|
||||||
|
'mQbO2ge6BFRWVPYCYpU06YvNt80XLvAX': 'E2BV1NGmasN5v7eujECVPJgwflnLPm2A',
|
||||||
|
'g43oeBzJrCml7o6fa5fRL1ErCdeD8z4K': 'RX34mZ6zVH4Nr6whbxIGLv9WSbxEKo8V',
|
||||||
|
'VQrDJoP7mtdBzkxhXbSPwGB1coeElk4x': 'j2VejQx0VFKQepAF7dI0mJLKtOVJE18z',
|
||||||
|
'WxA5NzLRjCrmq0NUgaU5pdMDuZO7RJ4w': 'lyY5ADLKaIOLEgAsGQCveEMAcqnx3rY9',
|
||||||
|
'M4lpMXB71ie0PjMCjdFzVXq0SeRVqz49': 'n2zVkOqaLIv3GbLfBjcwW51LcveWOZ2e',
|
||||||
|
'dyDZGEqN8u8nkJZcJns0oxYmtP7KbGAn': 'VXOEqQW9BtEVLajfZQSLEqxgS5B7qn2D',
|
||||||
|
'E7QNjrVY5u5mGvgu67IoDgV1CjEND8QR': 'rz8AaDmdKIkLmPNhB5ILPJnjS5PnlL8d',
|
||||||
|
'a4zrqjoKlfzg0dwHEWtP31VqcLBpjm4g': 'LY9J16gwETdGWa3hjBu5o0RzuoQDjqXQ',
|
||||||
|
'dQP5BZroMsMVLO1hbmT5r2Enu86GjxA6': '7XR3oOdbPF6x3PRFLDCq9RkgsRjAo48V',
|
||||||
|
'M4lKNBO1NFe0PjMCj1tzVXq0SeRVqzA9': 'n2zoRqGLRUv3GbLfBmTwW51LcveWOZYe',
|
||||||
|
'nAZ7MZdpGCGg1pqFEbsoJOz2C60mv143': 'dYJgdqA9aT4yojETqGi7yNgoFADxqmXP',
|
||||||
|
'3y1MERYgOuE9NzbFgwhV6Wv2F0YKvbyz': '081xpZDQgC4VadLTavhWQxrku56DAgXV',
|
||||||
|
'bmQvmEXr5HWklBMCZOcpE2Z3HBYwqGyl': 'zxXPbVNyMiMAZldhr9FkOmA0fl4aKr2v',
|
||||||
|
'wA7oDNYldfr6050Hwxi52lPZiVlB86Ap': 'ZYK16aA7ni0d3l3c34uwpxD7CbReMm8Q',
|
||||||
|
'g43MbKMWmFml7o7sJoSRkXxZiXRvJ3QK': 'RX3oBJonvs4Nr6rUWBCGn3matRGqJPXV',
|
||||||
|
'mA9VdlqpLS0raGaSDvtoqNrBTzb8XY4q': '0XN4OjBD3fnW7r7IbmtJB4AyfOmlrE2r',
|
||||||
|
'mAajOwgkGt17oGoFmEuklMP9H0GnW54d': 'lXbBLPGyzikNGeGujAuAJGjZiwLRxyXR',
|
||||||
|
'vy8vjJ9kbUwrRqRu59Cj5dWZfzYErlAb': 'K8l7gpwaGcBpnAnCLNCmPZRdin3eaQX0',
|
||||||
|
'xQMWBpR8oHEZaWaSMGUb0avOHjLVYn4Y': 'm2MrN4vEaf9jB7BFy5Srb40jTrN67AYl',
|
||||||
|
'xyKEmVO3miRr6D6UVkt7oB8jtD6aJEAv': 'g2ddDebqDfqdgKgswyUKwGjbTWwzq923',
|
||||||
|
'7Qk0wa2D9FjKapacoJF27aLvUDKkLGA0': 'b2kgBEkephJaMkMTL7s1PLe4Ua6WyP2P',
|
||||||
|
'3QLg6nqmNTJ5VvVTo7f508LPidz1xwyY': 'g2L1GgpraipmAOAUqmIbBnPxHOmw4MYa',
|
||||||
|
'3y1B7zZjXTE9NZNSzZSVNPZaTNLjo6Qz': '081b5G6wzH4VagaURmcWbN5mT4JGEe2V',
|
||||||
|
'lAqnwvkw6SG6D8DSqmUg6DRLUp0w3G4x': 'O2pbP0xPDFNJjpjIEvcdryOJtpkVM4X5',
|
||||||
|
'awA7xd1N0Hr6050Hw2c52lPZiVlB864p': 'GZYKpn4aoT0d3l3c3PiwpxD7CbReMmXQ',
|
||||||
|
'jQVqPLl9YHL1WGWtR1HDgWBGT63qRNyV': '6X03ne6vrU4oWyWUN7tQVoajikxJR3Ye',
|
||||||
|
'GQRMR8mL7uZK797t7xH3eNzPIP5dOny1': 'm2vqPWGd4U31zWzSyasDRAoMT1PKRp8o',
|
||||||
|
'zydq9RdmRhXLkNkfNoTJlMzaF0lWekQB': '3X7LnvE7vH5nkEkSqLiey793Un7dLB8e',
|
||||||
|
'VQrDzwkB2IdBzjzu9MHPbEYkSB50gR4x': 'j2VebLzoKUKQeEesmVh0gM1eIp9jKz8z',
|
||||||
|
'mAa2wMamBs17oGoFmktklMP9H0GnW54d': 'lXbgP74xZTkNGeGujVUAJGjZiwLRxy8R',
|
||||||
|
'7yjB6ZLG6sW8R6RF2xcan1KGfJ5dNoyd': 'wXQkPorvPHZ45N5t4Jf6qwg5Tp4xvw29',
|
||||||
|
'a4zPpNeWGuzg0m0iX3tPeanGSkRKWXQg': 'LY9oa3QAyHdGW9Wu3Ri5JGeEik7l1N8Q',
|
||||||
|
'k2rneA2M38k25cXDwwSknTJlxPxQLZ6M': '61lyA2aEVDzklfdwmmh31saPxQx2VRjp',
|
||||||
|
'bK9Zk4OvPnvxduLgxvi8VUeojnjA02eV': 'o5jANYjbeMb4nfBaQvcLAt1jzLzYx6ze',
|
||||||
|
'5VD6EydM3R9orHmNMGInGCJwbxbQvGRw': 'w3zjmX7g4vnxzCxElvUEOiewkokXprkZ',
|
||||||
|
'70X35QbVYVYNPUmP9YfbzI06YqYQk2R1': 'vG4Aj2BMjMjoztB7zeFOnCVPJpJ8lMOa',
|
||||||
|
'26qYwQVG9p1Bks2GgBckjfDJOXOAMgG1': 'r4ev9X0mv5zqJc0yk5IBDcQOwZw8mnwQ',
|
||||||
|
'rvVKpA56MBXWlSxMw3cobT5pdkd4Dm7q': '1J7ZkY53pZ645c93owcLZuveE7E8B3rL',
|
||||||
|
'qN1zdy1zlYL23IWZGWtDvfV6WeWQWkJo': 'qN1zdy1zlYL23IWZGWtDvfV6WeWQWkJo',
|
||||||
|
'jdKqRGF16dKsBviMDae7IGDl7oTjEbVV': 'Q09l7vhlNxPFErIOK6BVCe7KnwUW5DVV',
|
||||||
|
'3QLkogW1OUJ5VvPsrDH56DY2u7lgZWyY': 'g2LRE1V9espmAOPhE4ubj4ZdUA57yDXa',
|
||||||
|
'wyJvWbXGBSdbkEzhv0CW8meou82aqRy8': 'M2wolPvyBIpQGkbT4juedD4ruzQGdK2y',
|
||||||
|
'7QkdZrzEkFjKap6IYDU2PB0oCNZORmA0': 'b2kN1l96qhJaMkPs9dt1lpjBfwqZoA8P',
|
||||||
|
'pvA05113MHG1w3JTYxc6DVlRCjErVz4O': 'gQXeAbblBUnDJ7vujbHvbRd1cxlz3AXO',
|
||||||
|
'mA9blJDZwT0raG1cvkuoeVjLC7ZWd54q': '0XN9jRPwMHnW7rvumgfJZOD9CJgVkWYr',
|
||||||
|
'5QwRN5qKJTvGKlDTmnf7xwNZcjRmvEy9': 'R2GP6LWBJU1QlnytwGt0B9pytWwAdDYy',
|
||||||
|
'eyn5rPPbkfw2KYxH32fG1q58CbLJzM40': 'p2gyqooZnS56JWeiDgfmOy1VugOQEBXn',
|
||||||
|
'3BABn3b5RfPJGDwilbHe7l82uBoR05Am': '7OYZG7KMVhbPdKJS3xcWEN3AuDlLNmXj',
|
||||||
|
'xA5zNGXD3HrmqMlF6OS5pdMDuZO7RJ4w': 'yY5DAm6r1IOLE3BCVMFveEMAcqnx3r29',
|
||||||
|
'g43PgW3JZfml7o6fDEURL1ErCdeD8zyK': 'RX3aQn1zrS4Nr6whDgCGLv9WSbxEKo2V',
|
||||||
|
'lAqp8WbGgiG6D8LTKJcg3O72CDdre1Qx': 'O2pnm6473HNJjpKuVosd3vVeh975yrX5',
|
||||||
|
'wyJbYEDxKSdbkJ6S6RhW8meou82aqRy8': 'M2wPm7EgRSpQGlAh70CedD4ruzQGdKYy',
|
||||||
|
'M4lgW28nLCe0PVdtaXszVXq0SeRVqzA9': 'n2zmJvg4jHv3G0ETNgiwW51LcveWOZ8e',
|
||||||
|
'5Qw3OVvp9FvGKlDTmOC7xwNZcjRmvEQ9': 'R2GzDdml9F1Qlnytw9s0B9pytWwAdD8y',
|
||||||
|
'vy8a98X7zCwrRqbHrLUjYzwDiK2b70Qb': 'K8lVwzyjZiBpnAaSGeUmnAgxuGOBxmY0',
|
||||||
|
'g4eGjJLLoiqRD3Pf9oT5O03LuNbLRDQp': '6XqD59zzpfN4EwQuaGt67qNpSyRBlnYy',
|
||||||
|
'g43OPp9boIml7o6fDOIRL1ErCdeD8z4K': 'RX33alNB4s4Nr6whDPUGLv9WSbxEKoXV',
|
||||||
|
'xA2ng9OkBcGKzDbTkKsJlx7dUK8R3dA5': 'z2aPnJvzBfObkwGC3vFaPxeBhxoMqZ8K',
|
||||||
|
'xyKEgBajZuRr6DEC0Kt7XpD1cnNW9gAv': 'g2ddlEBvRsqdgKaI4jUK9PrgfMexGZ23',
|
||||||
|
'BAogww51jIMa2JnH1BcYpXM5F658RNAL': 'rYWDmm0KptlkGv4FGJFMdZmjs9RDE6XR',
|
||||||
|
'BAokpg62VtMa2JnH1mHYpXM5F658RNAL': 'rYWryDnlNslkGv4FG4HMdZmjs9RDE62R',
|
||||||
|
'a4z1Px5e2hzg0m0iMMCPeanGSkRKWXAg': 'LY9eorNQGUdGW9WuKKf5JGeEik7l1NYQ',
|
||||||
|
'kAx69R58kF9nY5YcdecJdl2pFXP53WyX': 'gXyRxELpbfPvLeLSaRil0mp6UEzbZJ8L',
|
||||||
|
'BAoY13nwViMa2J2uo2cY6BlETgmdwryL': 'rYWwKzJmNFlkGvGtNoUM9bzwIJVzB1YR',
|
||||||
}
|
}
|
||||||
|
|
||||||
_MCP_TO_ACCESS_KEY_TABLE = {
|
_MCP_TO_ACCESS_KEY_TABLE = {
|
||||||
@@ -189,19 +258,17 @@ class AnvatoIE(InfoExtractor):
|
|||||||
|
|
||||||
video_data_url += '&X-Anvato-Adst-Auth=' + base64.b64encode(auth_secret).decode('ascii')
|
video_data_url += '&X-Anvato-Adst-Auth=' + base64.b64encode(auth_secret).decode('ascii')
|
||||||
anvrid = md5_text(time.time() * 1000 * random.random())[:30]
|
anvrid = md5_text(time.time() * 1000 * random.random())[:30]
|
||||||
payload = {
|
api = {
|
||||||
'api': {
|
'anvrid': anvrid,
|
||||||
'anvrid': anvrid,
|
'anvts': server_time,
|
||||||
'anvstk': md5_text('%s|%s|%d|%s' % (
|
|
||||||
access_key, anvrid, server_time,
|
|
||||||
self._ANVACK_TABLE.get(access_key, self._API_KEY))),
|
|
||||||
'anvts': server_time,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
api['anvstk'] = md5_text('%s|%s|%d|%s' % (
|
||||||
|
access_key, anvrid, server_time,
|
||||||
|
self._ANVACK_TABLE.get(access_key, self._API_KEY)))
|
||||||
|
|
||||||
return self._download_json(
|
return self._download_json(
|
||||||
video_data_url, video_id, transform_source=strip_jsonp,
|
video_data_url, video_id, transform_source=strip_jsonp,
|
||||||
data=json.dumps(payload).encode('utf-8'))
|
data=json.dumps({'api': api}).encode('utf-8'))
|
||||||
|
|
||||||
def _get_anvato_videos(self, access_key, video_id):
|
def _get_anvato_videos(self, access_key, video_id):
|
||||||
video_data = self._get_video_json(access_key, video_id)
|
video_data = self._get_video_json(access_key, video_id)
|
||||||
@@ -259,7 +326,7 @@ class AnvatoIE(InfoExtractor):
|
|||||||
'description': video_data.get('def_description'),
|
'description': video_data.get('def_description'),
|
||||||
'tags': video_data.get('def_tags', '').split(','),
|
'tags': video_data.get('def_tags', '').split(','),
|
||||||
'categories': video_data.get('categories'),
|
'categories': video_data.get('categories'),
|
||||||
'thumbnail': video_data.get('thumbnail'),
|
'thumbnail': video_data.get('src_image_url') or video_data.get('thumbnail'),
|
||||||
'timestamp': int_or_none(video_data.get(
|
'timestamp': int_or_none(video_data.get(
|
||||||
'ts_published') or video_data.get('ts_added')),
|
'ts_published') or video_data.get('ts_added')),
|
||||||
'uploader': video_data.get('mcp_id'),
|
'uploader': video_data.get('mcp_id'),
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .yahoo import YahooIE
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
@@ -15,9 +15,9 @@ from ..utils import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class AolIE(InfoExtractor):
|
class AolIE(YahooIE):
|
||||||
IE_NAME = 'aol.com'
|
IE_NAME = 'aol.com'
|
||||||
_VALID_URL = r'(?:aol-video:|https?://(?:www\.)?aol\.(?:com|ca|co\.uk|de|jp)/video/(?:[^/]+/)*)(?P<id>[0-9a-f]+)'
|
_VALID_URL = r'(?:aol-video:|https?://(?:www\.)?aol\.(?:com|ca|co\.uk|de|jp)/video/(?:[^/]+/)*)(?P<id>\d{9}|[0-9a-f]{24}|[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12})'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# video with 5min ID
|
# video with 5min ID
|
||||||
@@ -76,10 +76,16 @@ class AolIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'https://www.aol.jp/video/playlist/5a28e936a1334d000137da0c/5a28f3151e642219fde19831/',
|
'url': 'https://www.aol.jp/video/playlist/5a28e936a1334d000137da0c/5a28f3151e642219fde19831/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# Yahoo video
|
||||||
|
'url': 'https://www.aol.com/video/play/991e6700-ac02-11ea-99ff-357400036f61/24bbc846-3e30-3c46-915e-fe8ccd7fcc46/',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
if '-' in video_id:
|
||||||
|
return self._extract_yahoo_video(video_id, 'us')
|
||||||
|
|
||||||
response = self._download_json(
|
response = self._download_json(
|
||||||
'https://feedapi.b2c.on.aol.com/v1.0/app/videos/aolon/%s/details' % video_id,
|
'https://feedapi.b2c.on.aol.com/v1.0/app/videos/aolon/%s/details' % video_id,
|
||||||
|
|||||||
@@ -6,25 +6,21 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
js_to_json,
|
int_or_none,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class APAIE(InfoExtractor):
|
class APAIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://[^/]+\.apa\.at/embed/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
|
_VALID_URL = r'(?P<base_url>https?://[^/]+\.apa\.at)/embed/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://uvp.apa.at/embed/293f6d17-692a-44e3-9fd5-7b178f3a1029',
|
'url': 'http://uvp.apa.at/embed/293f6d17-692a-44e3-9fd5-7b178f3a1029',
|
||||||
'md5': '2b12292faeb0a7d930c778c7a5b4759b',
|
'md5': '2b12292faeb0a7d930c778c7a5b4759b',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'jjv85FdZ',
|
'id': '293f6d17-692a-44e3-9fd5-7b178f3a1029',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': '"Blau ist mysteriös": Die Blue Man Group im Interview',
|
'title': '293f6d17-692a-44e3-9fd5-7b178f3a1029',
|
||||||
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
|
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
'duration': 254,
|
|
||||||
'timestamp': 1519211149,
|
|
||||||
'upload_date': '20180221',
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://uvp-apapublisher.sf.apa.at/embed/2f94e9e6-d945-4db2-9548-f9a41ebf7b78',
|
'url': 'https://uvp-apapublisher.sf.apa.at/embed/2f94e9e6-d945-4db2-9548-f9a41ebf7b78',
|
||||||
@@ -46,9 +42,11 @@ class APAIE(InfoExtractor):
|
|||||||
webpage)]
|
webpage)]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id, base_url = mobj.group('id', 'base_url')
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(
|
||||||
|
'%s/player/%s' % (base_url, video_id), video_id)
|
||||||
|
|
||||||
jwplatform_id = self._search_regex(
|
jwplatform_id = self._search_regex(
|
||||||
r'media[iI]d\s*:\s*["\'](?P<id>[a-zA-Z0-9]{8})', webpage,
|
r'media[iI]d\s*:\s*["\'](?P<id>[a-zA-Z0-9]{8})', webpage,
|
||||||
@@ -59,16 +57,18 @@ class APAIE(InfoExtractor):
|
|||||||
'jwplatform:' + jwplatform_id, ie='JWPlatform',
|
'jwplatform:' + jwplatform_id, ie='JWPlatform',
|
||||||
video_id=video_id)
|
video_id=video_id)
|
||||||
|
|
||||||
sources = self._parse_json(
|
def extract(field, name=None):
|
||||||
self._search_regex(
|
return self._search_regex(
|
||||||
r'sources\s*=\s*(\[.+?\])\s*;', webpage, 'sources'),
|
r'\b%s["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1' % field,
|
||||||
video_id, transform_source=js_to_json)
|
webpage, name or field, default=None, group='value')
|
||||||
|
|
||||||
|
title = extract('title') or video_id
|
||||||
|
description = extract('description')
|
||||||
|
thumbnail = extract('poster', 'thumbnail')
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for source in sources:
|
for format_id in ('hls', 'progressive'):
|
||||||
if not isinstance(source, dict):
|
source_url = url_or_none(extract(format_id))
|
||||||
continue
|
|
||||||
source_url = url_or_none(source.get('file'))
|
|
||||||
if not source_url:
|
if not source_url:
|
||||||
continue
|
continue
|
||||||
ext = determine_ext(source_url)
|
ext = determine_ext(source_url)
|
||||||
@@ -77,18 +77,19 @@ class APAIE(InfoExtractor):
|
|||||||
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
m3u8_id='hls', fatal=False))
|
m3u8_id='hls', fatal=False))
|
||||||
else:
|
else:
|
||||||
|
height = int_or_none(self._search_regex(
|
||||||
|
r'(\d+)\.mp4', source_url, 'height', default=None))
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': source_url,
|
'url': source_url,
|
||||||
|
'format_id': format_id,
|
||||||
|
'height': height,
|
||||||
})
|
})
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
thumbnail = self._search_regex(
|
|
||||||
r'image\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
|
|
||||||
'thumbnail', fatal=False, group='url')
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': video_id,
|
'title': title,
|
||||||
|
'description': description,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
get_element_by_id,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
merge_dicts,
|
merge_dicts,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
@@ -39,23 +40,15 @@ class AparatIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, video_id, fatal=False)
|
webpage = self._download_webpage(url, video_id, fatal=False)
|
||||||
|
|
||||||
if not webpage:
|
if not webpage:
|
||||||
# Note: There is an easier-to-parse configuration at
|
|
||||||
# http://www.aparat.com/video/video/config/videohash/%video_id
|
|
||||||
# but the URL in there does not work
|
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
'http://www.aparat.com/video/video/embed/vt/frame/showvideo/yes/videohash/' + video_id,
|
'http://www.aparat.com/video/video/embed/vt/frame/showvideo/yes/videohash/' + video_id,
|
||||||
video_id)
|
video_id)
|
||||||
|
|
||||||
options = self._parse_json(
|
options = self._parse_json(self._search_regex(
|
||||||
self._search_regex(
|
r'options\s*=\s*({.+?})\s*;', webpage, 'options'), video_id)
|
||||||
r'options\s*=\s*JSON\.parse\(\s*(["\'])(?P<value>(?:(?!\1).)+)\1\s*\)',
|
|
||||||
webpage, 'options', group='value'),
|
|
||||||
video_id)
|
|
||||||
|
|
||||||
player = options['plugins']['sabaPlayerPlugin']
|
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for sources in player['multiSRC']:
|
for sources in (options.get('multiSRC') or []):
|
||||||
for item in sources:
|
for item in sources:
|
||||||
if not isinstance(item, dict):
|
if not isinstance(item, dict):
|
||||||
continue
|
continue
|
||||||
@@ -85,11 +78,12 @@ class AparatIE(InfoExtractor):
|
|||||||
info = self._search_json_ld(webpage, video_id, default={})
|
info = self._search_json_ld(webpage, video_id, default={})
|
||||||
|
|
||||||
if not info.get('title'):
|
if not info.get('title'):
|
||||||
info['title'] = player['title']
|
info['title'] = get_element_by_id('videoTitle', webpage) or \
|
||||||
|
self._html_search_meta(['og:title', 'twitter:title', 'DC.Title', 'title'], webpage, fatal=True)
|
||||||
|
|
||||||
return merge_dicts(info, {
|
return merge_dicts(info, {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'thumbnail': url_or_none(options.get('poster')),
|
'thumbnail': url_or_none(options.get('poster')),
|
||||||
'duration': int_or_none(player.get('duration')),
|
'duration': int_or_none(options.get('duration')),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -9,10 +9,10 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class AppleConnectIE(InfoExtractor):
|
class AppleConnectIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://itunes\.apple\.com/\w{0,2}/?post/idsa\.(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://itunes\.apple\.com/\w{0,2}/?post/(?:id)?sa\.(?P<id>[\w-]+)'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'https://itunes.apple.com/us/post/idsa.4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
|
'url': 'https://itunes.apple.com/us/post/idsa.4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
|
||||||
'md5': 'e7c38568a01ea45402570e6029206723',
|
'md5': 'c1d41f72c8bcaf222e089434619316e4',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
|
'id': '4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
|
||||||
'ext': 'm4v',
|
'ext': 'm4v',
|
||||||
@@ -22,7 +22,10 @@ class AppleConnectIE(InfoExtractor):
|
|||||||
'upload_date': '20150710',
|
'upload_date': '20150710',
|
||||||
'timestamp': 1436545535,
|
'timestamp': 1436545535,
|
||||||
},
|
},
|
||||||
}
|
}, {
|
||||||
|
'url': 'https://itunes.apple.com/us/post/sa.0fe0229f-2457-11e5-9f40-1bb645f2d5d9',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
@@ -36,7 +39,7 @@ class AppleConnectIE(InfoExtractor):
|
|||||||
|
|
||||||
video_data = self._parse_json(video_json, video_id)
|
video_data = self._parse_json(video_json, video_id)
|
||||||
timestamp = str_to_int(self._html_search_regex(r'data-timestamp="(\d+)"', webpage, 'timestamp'))
|
timestamp = str_to_int(self._html_search_regex(r'data-timestamp="(\d+)"', webpage, 'timestamp'))
|
||||||
like_count = str_to_int(self._html_search_regex(r'(\d+) Loves', webpage, 'like count'))
|
like_count = str_to_int(self._html_search_regex(r'(\d+) Loves', webpage, 'like count', default=None))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
|||||||
93
youtube_dl/extractor/applepodcasts.py
Normal file
93
youtube_dl/extractor/applepodcasts.py
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
clean_html,
|
||||||
|
clean_podcast_url,
|
||||||
|
get_element_by_class,
|
||||||
|
int_or_none,
|
||||||
|
parse_codecs,
|
||||||
|
parse_iso8601,
|
||||||
|
try_get,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ApplePodcastsIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://podcasts\.apple\.com/(?:[^/]+/)?podcast(?:/[^/]+){1,2}.*?\bi=(?P<id>\d+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://podcasts.apple.com/us/podcast/207-whitney-webb-returns/id1135137367?i=1000482637777',
|
||||||
|
'md5': '41dc31cd650143e530d9423b6b5a344f',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1000482637777',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': '207 - Whitney Webb Returns',
|
||||||
|
'description': 'md5:75ef4316031df7b41ced4e7b987f79c6',
|
||||||
|
'upload_date': '20200705',
|
||||||
|
'timestamp': 1593932400,
|
||||||
|
'duration': 6454,
|
||||||
|
'series': 'The Tim Dillon Show',
|
||||||
|
'thumbnail': 're:.+[.](png|jpe?g|webp)',
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://podcasts.apple.com/podcast/207-whitney-webb-returns/id1135137367?i=1000482637777',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://podcasts.apple.com/podcast/207-whitney-webb-returns?i=1000482637777',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://podcasts.apple.com/podcast/id1135137367?i=1000482637777',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
episode_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, episode_id)
|
||||||
|
episode_data = {}
|
||||||
|
ember_data = {}
|
||||||
|
# new page type 2021-11
|
||||||
|
amp_data = self._parse_json(self._search_regex(
|
||||||
|
r'(?s)id="shoebox-media-api-cache-amp-podcasts"[^>]*>\s*({.+?})\s*<',
|
||||||
|
webpage, 'AMP data', default='{}'), episode_id, fatal=False) or {}
|
||||||
|
amp_data = try_get(amp_data,
|
||||||
|
lambda a: self._parse_json(
|
||||||
|
next(a[x] for x in iter(a) if episode_id in x),
|
||||||
|
episode_id),
|
||||||
|
dict) or {}
|
||||||
|
amp_data = amp_data.get('d') or []
|
||||||
|
episode_data = try_get(
|
||||||
|
amp_data,
|
||||||
|
lambda a: next(x for x in a
|
||||||
|
if x['type'] == 'podcast-episodes' and x['id'] == episode_id),
|
||||||
|
dict)
|
||||||
|
if not episode_data:
|
||||||
|
# try pre 2021-11 page type: TODO: consider deleting if no longer used
|
||||||
|
ember_data = self._parse_json(self._search_regex(
|
||||||
|
r'(?s)id="shoebox-ember-data-store"[^>]*>\s*({.+?})\s*<',
|
||||||
|
webpage, 'ember data'), episode_id) or {}
|
||||||
|
ember_data = ember_data.get(episode_id) or ember_data
|
||||||
|
episode_data = try_get(ember_data, lambda x: x['data'], dict)
|
||||||
|
episode = episode_data['attributes']
|
||||||
|
description = episode.get('description') or {}
|
||||||
|
|
||||||
|
series = None
|
||||||
|
for inc in (amp_data or ember_data.get('included') or []):
|
||||||
|
if inc.get('type') == 'media/podcast':
|
||||||
|
series = try_get(inc, lambda x: x['attributes']['name'])
|
||||||
|
series = series or clean_html(get_element_by_class('podcast-header__identity', webpage))
|
||||||
|
|
||||||
|
info = [{
|
||||||
|
'id': episode_id,
|
||||||
|
'title': episode['name'],
|
||||||
|
'url': clean_podcast_url(episode['assetUrl']),
|
||||||
|
'description': description.get('standard') or description.get('short'),
|
||||||
|
'timestamp': parse_iso8601(episode.get('releaseDateTime')),
|
||||||
|
'duration': int_or_none(episode.get('durationInMilliseconds'), 1000),
|
||||||
|
'series': series,
|
||||||
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
|
}]
|
||||||
|
self._sort_formats(info)
|
||||||
|
info = info[0]
|
||||||
|
codecs = parse_codecs(info.get('ext', 'mp3'))
|
||||||
|
info.update(codecs)
|
||||||
|
return info
|
||||||
@@ -2,15 +2,17 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
unified_strdate,
|
|
||||||
clean_html,
|
clean_html,
|
||||||
|
extract_attributes,
|
||||||
|
unified_strdate,
|
||||||
|
unified_timestamp,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ArchiveOrgIE(InfoExtractor):
|
class ArchiveOrgIE(InfoExtractor):
|
||||||
IE_NAME = 'archive.org'
|
IE_NAME = 'archive.org'
|
||||||
IE_DESC = 'archive.org videos'
|
IE_DESC = 'archive.org videos'
|
||||||
_VALID_URL = r'https?://(?:www\.)?archive\.org/(?:details|embed)/(?P<id>[^/?#]+)(?:[?].*)?$'
|
_VALID_URL = r'https?://(?:www\.)?archive\.org/(?:details|embed)/(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
|
'url': 'http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
|
||||||
'md5': '8af1d4cf447933ed3c7f4871162602db',
|
'md5': '8af1d4cf447933ed3c7f4871162602db',
|
||||||
@@ -19,8 +21,11 @@ class ArchiveOrgIE(InfoExtractor):
|
|||||||
'ext': 'ogg',
|
'ext': 'ogg',
|
||||||
'title': '1968 Demo - FJCC Conference Presentation Reel #1',
|
'title': '1968 Demo - FJCC Conference Presentation Reel #1',
|
||||||
'description': 'md5:da45c349df039f1cc8075268eb1b5c25',
|
'description': 'md5:da45c349df039f1cc8075268eb1b5c25',
|
||||||
'upload_date': '19681210',
|
'creator': 'SRI International',
|
||||||
'uploader': 'SRI International'
|
'release_date': '19681210',
|
||||||
|
'uploader': 'SRI International',
|
||||||
|
'timestamp': 1268695290,
|
||||||
|
'upload_date': '20100315',
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://archive.org/details/Cops1922',
|
'url': 'https://archive.org/details/Cops1922',
|
||||||
@@ -29,22 +34,43 @@ class ArchiveOrgIE(InfoExtractor):
|
|||||||
'id': 'Cops1922',
|
'id': 'Cops1922',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Buster Keaton\'s "Cops" (1922)',
|
'title': 'Buster Keaton\'s "Cops" (1922)',
|
||||||
'description': 'md5:89e7c77bf5d965dd5c0372cfb49470f6',
|
'description': 'md5:43a603fd6c5b4b90d12a96b921212b9c',
|
||||||
|
'timestamp': 1387699629,
|
||||||
|
'upload_date': '20131222',
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
|
'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://archive.org/details/MSNBCW_20131125_040000_To_Catch_a_Predator/',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
'http://archive.org/embed/' + video_id, video_id)
|
'http://archive.org/embed/' + video_id, video_id)
|
||||||
jwplayer_playlist = self._parse_json(self._search_regex(
|
|
||||||
r"(?s)Play\('[^']+'\s*,\s*(\[.+\])\s*,\s*{.*?}\)",
|
playlist = None
|
||||||
webpage, 'jwplayer playlist'), video_id)
|
play8 = self._search_regex(
|
||||||
info = self._parse_jwplayer_data(
|
r'(<[^>]+\bclass=["\']js-play8-playlist[^>]+>)', webpage,
|
||||||
{'playlist': jwplayer_playlist}, video_id, base_url=url)
|
'playlist', default=None)
|
||||||
|
if play8:
|
||||||
|
attrs = extract_attributes(play8)
|
||||||
|
playlist = attrs.get('value')
|
||||||
|
if not playlist:
|
||||||
|
# Old jwplayer fallback
|
||||||
|
playlist = self._search_regex(
|
||||||
|
r"(?s)Play\('[^']+'\s*,\s*(\[.+\])\s*,\s*{.*?}\)",
|
||||||
|
webpage, 'jwplayer playlist', default='[]')
|
||||||
|
jwplayer_playlist = self._parse_json(playlist, video_id, fatal=False)
|
||||||
|
if jwplayer_playlist:
|
||||||
|
info = self._parse_jwplayer_data(
|
||||||
|
{'playlist': jwplayer_playlist}, video_id, base_url=url)
|
||||||
|
else:
|
||||||
|
# HTML5 media fallback
|
||||||
|
info = self._parse_html5_media_entries(url, webpage, video_id)[0]
|
||||||
|
info['id'] = video_id
|
||||||
|
|
||||||
def get_optional(metadata, field):
|
def get_optional(metadata, field):
|
||||||
return metadata.get(field, [None])[0]
|
return metadata.get(field, [None])[0]
|
||||||
@@ -58,8 +84,12 @@ class ArchiveOrgIE(InfoExtractor):
|
|||||||
'description': clean_html(get_optional(metadata, 'description')),
|
'description': clean_html(get_optional(metadata, 'description')),
|
||||||
})
|
})
|
||||||
if info.get('_type') != 'playlist':
|
if info.get('_type') != 'playlist':
|
||||||
|
creator = get_optional(metadata, 'creator')
|
||||||
info.update({
|
info.update({
|
||||||
'uploader': get_optional(metadata, 'creator'),
|
'creator': creator,
|
||||||
'upload_date': unified_strdate(get_optional(metadata, 'date')),
|
'release_date': unified_strdate(get_optional(metadata, 'date')),
|
||||||
|
'uploader': get_optional(metadata, 'publisher') or creator,
|
||||||
|
'timestamp': unified_timestamp(get_optional(metadata, 'publicdate')),
|
||||||
|
'language': get_optional(metadata, 'language'),
|
||||||
})
|
})
|
||||||
return info
|
return info
|
||||||
|
|||||||
174
youtube_dl/extractor/arcpublishing.py
Normal file
174
youtube_dl/extractor/arcpublishing.py
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
extract_attributes,
|
||||||
|
int_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
try_get,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ArcPublishingIE(InfoExtractor):
|
||||||
|
_UUID_REGEX = r'[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12}'
|
||||||
|
_VALID_URL = r'arcpublishing:(?P<org>[a-z]+):(?P<id>%s)' % _UUID_REGEX
|
||||||
|
_TESTS = [{
|
||||||
|
# https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/
|
||||||
|
'url': 'arcpublishing:adn:8c99cb6e-b29c-4bc9-9173-7bf9979225ab',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# https://www.bostonglobe.com/video/2020/12/30/metro/footage-released-showing-officer-talking-about-striking-protesters-with-car/
|
||||||
|
'url': 'arcpublishing:bostonglobe:232b7ae6-7d73-432d-bc0a-85dbf0119ab1',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# https://www.actionnewsjax.com/video/live-stream/
|
||||||
|
'url': 'arcpublishing:cmg:cfb1cf1b-3ab5-4d1b-86c5-a5515d311f2a',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# https://elcomercio.pe/videos/deportes/deporte-total-futbol-peruano-seleccion-peruana-la-valorizacion-de-los-peruanos-en-el-exterior-tras-un-2020-atipico-nnav-vr-video-noticia/
|
||||||
|
'url': 'arcpublishing:elcomercio:27a7e1f8-2ec7-4177-874f-a4feed2885b3',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# https://www.clickondetroit.com/video/community/2020/05/15/events-surrounding-woodward-dream-cruise-being-canceled/
|
||||||
|
'url': 'arcpublishing:gmg:c8793fb2-8d44-4242-881e-2db31da2d9fe',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# https://www.wabi.tv/video/2020/12/30/trenton-company-making-equipment-pfizer-covid-vaccine/
|
||||||
|
'url': 'arcpublishing:gray:0b0ba30e-032a-4598-8810-901d70e6033e',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# https://www.lateja.cr/el-mundo/video-china-aprueba-con-condiciones-su-primera/dfcbfa57-527f-45ff-a69b-35fe71054143/video/
|
||||||
|
'url': 'arcpublishing:gruponacion:dfcbfa57-527f-45ff-a69b-35fe71054143',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# https://www.fifthdomain.com/video/2018/03/09/is-america-vulnerable-to-a-cyber-attack/
|
||||||
|
'url': 'arcpublishing:mco:aa0ca6fe-1127-46d4-b32c-be0d6fdb8055',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# https://www.vl.no/kultur/2020/12/09/en-melding-fra-en-lytter-endret-julelista-til-lewi-bergrud/
|
||||||
|
'url': 'arcpublishing:mentormedier:47a12084-650b-4011-bfd0-3699b6947b2d',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# https://www.14news.com/2020/12/30/whiskey-theft-caught-camera-henderson-liquor-store/
|
||||||
|
'url': 'arcpublishing:raycom:b89f61f8-79fa-4c09-8255-e64237119bf7',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# https://www.theglobeandmail.com/world/video-ethiopian-woman-who-became-symbol-of-integration-in-italy-killed-on/
|
||||||
|
'url': 'arcpublishing:tgam:411b34c1-8701-4036-9831-26964711664b',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# https://www.pilotonline.com/460f2931-8130-4719-8ea1-ffcb2d7cb685-132.html
|
||||||
|
'url': 'arcpublishing:tronc:460f2931-8130-4719-8ea1-ffcb2d7cb685',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
_POWA_DEFAULTS = [
|
||||||
|
(['cmg', 'prisa'], '%s-config-prod.api.cdn.arcpublishing.com/video'),
|
||||||
|
([
|
||||||
|
'adn', 'advancelocal', 'answers', 'bonnier', 'bostonglobe', 'demo',
|
||||||
|
'gmg', 'gruponacion', 'infobae', 'mco', 'nzme', 'pmn', 'raycom',
|
||||||
|
'spectator', 'tbt', 'tgam', 'tronc', 'wapo', 'wweek',
|
||||||
|
], 'video-api-cdn.%s.arcpublishing.com/api'),
|
||||||
|
]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
entries = []
|
||||||
|
# https://arcpublishing.atlassian.net/wiki/spaces/POWA/overview
|
||||||
|
for powa_el in re.findall(r'(<div[^>]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="%s"[^>]*>)' % ArcPublishingIE._UUID_REGEX, webpage):
|
||||||
|
powa = extract_attributes(powa_el) or {}
|
||||||
|
org = powa.get('data-org')
|
||||||
|
uuid = powa.get('data-uuid')
|
||||||
|
if org and uuid:
|
||||||
|
entries.append('arcpublishing:%s:%s' % (org, uuid))
|
||||||
|
return entries
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
org, uuid = re.match(self._VALID_URL, url).groups()
|
||||||
|
for orgs, tmpl in self._POWA_DEFAULTS:
|
||||||
|
if org in orgs:
|
||||||
|
base_api_tmpl = tmpl
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
base_api_tmpl = '%s-prod-cdn.video-api.arcpublishing.com/api'
|
||||||
|
if org == 'wapo':
|
||||||
|
org = 'washpost'
|
||||||
|
video = self._download_json(
|
||||||
|
'https://%s/v1/ansvideos/findByUuid' % (base_api_tmpl % org),
|
||||||
|
uuid, query={'uuid': uuid})[0]
|
||||||
|
title = video['headlines']['basic']
|
||||||
|
is_live = video.get('status') == 'live'
|
||||||
|
|
||||||
|
urls = []
|
||||||
|
formats = []
|
||||||
|
for s in video.get('streams', []):
|
||||||
|
s_url = s.get('url')
|
||||||
|
if not s_url or s_url in urls:
|
||||||
|
continue
|
||||||
|
urls.append(s_url)
|
||||||
|
stream_type = s.get('stream_type')
|
||||||
|
if stream_type == 'smil':
|
||||||
|
smil_formats = self._extract_smil_formats(
|
||||||
|
s_url, uuid, fatal=False)
|
||||||
|
for f in smil_formats:
|
||||||
|
if f['url'].endswith('/cfx/st'):
|
||||||
|
f['app'] = 'cfx/st'
|
||||||
|
if not f['play_path'].startswith('mp4:'):
|
||||||
|
f['play_path'] = 'mp4:' + f['play_path']
|
||||||
|
if isinstance(f['tbr'], float):
|
||||||
|
f['vbr'] = f['tbr'] * 1000
|
||||||
|
del f['tbr']
|
||||||
|
f['format_id'] = 'rtmp-%d' % f['vbr']
|
||||||
|
formats.extend(smil_formats)
|
||||||
|
elif stream_type in ('ts', 'hls'):
|
||||||
|
m3u8_formats = self._extract_m3u8_formats(
|
||||||
|
s_url, uuid, 'mp4', 'm3u8' if is_live else 'm3u8_native',
|
||||||
|
m3u8_id='hls', fatal=False)
|
||||||
|
if all([f.get('acodec') == 'none' for f in m3u8_formats]):
|
||||||
|
continue
|
||||||
|
for f in m3u8_formats:
|
||||||
|
if f.get('acodec') == 'none':
|
||||||
|
f['preference'] = -40
|
||||||
|
elif f.get('vcodec') == 'none':
|
||||||
|
f['preference'] = -50
|
||||||
|
height = f.get('height')
|
||||||
|
if not height:
|
||||||
|
continue
|
||||||
|
vbr = self._search_regex(
|
||||||
|
r'[_x]%d[_-](\d+)' % height, f['url'], 'vbr', default=None)
|
||||||
|
if vbr:
|
||||||
|
f['vbr'] = int(vbr)
|
||||||
|
formats.extend(m3u8_formats)
|
||||||
|
else:
|
||||||
|
vbr = int_or_none(s.get('bitrate'))
|
||||||
|
formats.append({
|
||||||
|
'format_id': '%s-%d' % (stream_type, vbr) if vbr else stream_type,
|
||||||
|
'vbr': vbr,
|
||||||
|
'width': int_or_none(s.get('width')),
|
||||||
|
'height': int_or_none(s.get('height')),
|
||||||
|
'filesize': int_or_none(s.get('filesize')),
|
||||||
|
'url': s_url,
|
||||||
|
'preference': -1,
|
||||||
|
})
|
||||||
|
self._sort_formats(
|
||||||
|
formats, ('preference', 'width', 'height', 'vbr', 'filesize', 'tbr', 'ext', 'format_id'))
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
for subtitle in (try_get(video, lambda x: x['subtitles']['urls'], list) or []):
|
||||||
|
subtitle_url = subtitle.get('url')
|
||||||
|
if subtitle_url:
|
||||||
|
subtitles.setdefault('en', []).append({'url': subtitle_url})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': uuid,
|
||||||
|
'title': self._live_title(title) if is_live else title,
|
||||||
|
'thumbnail': try_get(video, lambda x: x['promo_image']['url']),
|
||||||
|
'description': try_get(video, lambda x: x['subheadlines']['basic']),
|
||||||
|
'formats': formats,
|
||||||
|
'duration': int_or_none(video.get('duration'), 100),
|
||||||
|
'timestamp': parse_iso8601(video.get('created_date')),
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'is_live': is_live,
|
||||||
|
}
|
||||||
@@ -187,13 +187,13 @@ class ARDMediathekIE(ARDMediathekBaseIE):
|
|||||||
if doc.tag == 'rss':
|
if doc.tag == 'rss':
|
||||||
return GenericIE()._extract_rss(url, video_id, doc)
|
return GenericIE()._extract_rss(url, video_id, doc)
|
||||||
|
|
||||||
title = self._html_search_regex(
|
title = self._og_search_title(webpage, default=None) or self._html_search_regex(
|
||||||
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
|
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
|
||||||
r'<meta name="dcterms\.title" content="(.*?)"/>',
|
r'<meta name="dcterms\.title" content="(.*?)"/>',
|
||||||
r'<h4 class="headline">(.*?)</h4>',
|
r'<h4 class="headline">(.*?)</h4>',
|
||||||
r'<title[^>]*>(.*?)</title>'],
|
r'<title[^>]*>(.*?)</title>'],
|
||||||
webpage, 'title')
|
webpage, 'title')
|
||||||
description = self._html_search_meta(
|
description = self._og_search_description(webpage, default=None) or self._html_search_meta(
|
||||||
'dcterms.abstract', webpage, 'description', default=None)
|
'dcterms.abstract', webpage, 'description', default=None)
|
||||||
if description is None:
|
if description is None:
|
||||||
description = self._html_search_meta(
|
description = self._html_search_meta(
|
||||||
@@ -249,31 +249,40 @@ class ARDMediathekIE(ARDMediathekBaseIE):
|
|||||||
|
|
||||||
|
|
||||||
class ARDIE(InfoExtractor):
|
class ARDIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos(?:extern)?/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
|
_VALID_URL = r'(?P<mainurl>https?://(?:www\.)?daserste\.de/(?:[^/?#&]+/)+(?P<id>[^/?#&]+))\.html'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# available till 14.02.2019
|
# available till 7.01.2022
|
||||||
'url': 'http://www.daserste.de/information/talk/maischberger/videos/das-groko-drama-zerlegen-sich-die-volksparteien-video-102.html',
|
'url': 'https://www.daserste.de/information/talk/maischberger/videos/maischberger-die-woche-video100.html',
|
||||||
'md5': '8e4ec85f31be7c7fc08a26cdbc5a1f49',
|
'md5': '867d8aa39eeaf6d76407c5ad1bb0d4c1',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'display_id': 'das-groko-drama-zerlegen-sich-die-volksparteien-video',
|
'id': 'maischberger-die-woche-video100',
|
||||||
'id': '102',
|
'display_id': 'maischberger-die-woche-video100',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'duration': 4435.0,
|
'duration': 3687.0,
|
||||||
'title': 'Das GroKo-Drama: Zerlegen sich die Volksparteien?',
|
'title': 'maischberger. die woche vom 7. Januar 2021',
|
||||||
'upload_date': '20180214',
|
'upload_date': '20210107',
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.daserste.de/information/reportage-dokumentation/erlebnis-erde/videosextern/woelfe-und-herdenschutzhunde-ungleiche-brueder-102.html',
|
'url': 'https://www.daserste.de/information/politik-weltgeschehen/morgenmagazin/videosextern/dominik-kahun-aus-der-nhl-direkt-zur-weltmeisterschaft-100.html',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.daserste.de/information/nachrichten-wetter/tagesthemen/videosextern/tagesthemen-17736.html',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
|
'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.daserste.de/unterhaltung/serie/in-aller-freundschaft-die-jungen-aerzte/Drehpause-100.html',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.daserste.de/unterhaltung/film/filmmittwoch-im-ersten/videos/making-ofwendezeit-video-100.html',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
display_id = mobj.group('display_id')
|
display_id = mobj.group('id')
|
||||||
|
|
||||||
player_url = mobj.group('mainurl') + '~playerXml.xml'
|
player_url = mobj.group('mainurl') + '~playerXml.xml'
|
||||||
doc = self._download_xml(player_url, display_id)
|
doc = self._download_xml(player_url, display_id)
|
||||||
@@ -284,26 +293,63 @@ class ARDIE(InfoExtractor):
|
|||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for a in video_node.findall('.//asset'):
|
for a in video_node.findall('.//asset'):
|
||||||
|
file_name = xpath_text(a, './fileName', default=None)
|
||||||
|
if not file_name:
|
||||||
|
continue
|
||||||
|
format_type = a.attrib.get('type')
|
||||||
|
format_url = url_or_none(file_name)
|
||||||
|
if format_url:
|
||||||
|
ext = determine_ext(file_name)
|
||||||
|
if ext == 'm3u8':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
format_url, display_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
|
m3u8_id=format_type or 'hls', fatal=False))
|
||||||
|
continue
|
||||||
|
elif ext == 'f4m':
|
||||||
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
update_url_query(format_url, {'hdcore': '3.7.0'}),
|
||||||
|
display_id, f4m_id=format_type or 'hds', fatal=False))
|
||||||
|
continue
|
||||||
f = {
|
f = {
|
||||||
'format_id': a.attrib['type'],
|
'format_id': format_type,
|
||||||
'width': int_or_none(a.find('./frameWidth').text),
|
'width': int_or_none(xpath_text(a, './frameWidth')),
|
||||||
'height': int_or_none(a.find('./frameHeight').text),
|
'height': int_or_none(xpath_text(a, './frameHeight')),
|
||||||
'vbr': int_or_none(a.find('./bitrateVideo').text),
|
'vbr': int_or_none(xpath_text(a, './bitrateVideo')),
|
||||||
'abr': int_or_none(a.find('./bitrateAudio').text),
|
'abr': int_or_none(xpath_text(a, './bitrateAudio')),
|
||||||
'vcodec': a.find('./codecVideo').text,
|
'vcodec': xpath_text(a, './codecVideo'),
|
||||||
'tbr': int_or_none(a.find('./totalBitrate').text),
|
'tbr': int_or_none(xpath_text(a, './totalBitrate')),
|
||||||
}
|
}
|
||||||
if a.find('./serverPrefix').text:
|
server_prefix = xpath_text(a, './serverPrefix', default=None)
|
||||||
f['url'] = a.find('./serverPrefix').text
|
if server_prefix:
|
||||||
f['playpath'] = a.find('./fileName').text
|
f.update({
|
||||||
|
'url': server_prefix,
|
||||||
|
'playpath': file_name,
|
||||||
|
})
|
||||||
else:
|
else:
|
||||||
f['url'] = a.find('./fileName').text
|
if not format_url:
|
||||||
|
continue
|
||||||
|
f['url'] = format_url
|
||||||
formats.append(f)
|
formats.append(f)
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
_SUB_FORMATS = (
|
||||||
|
('./dataTimedText', 'ttml'),
|
||||||
|
('./dataTimedTextNoOffset', 'ttml'),
|
||||||
|
('./dataTimedTextVtt', 'vtt'),
|
||||||
|
)
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
for subsel, subext in _SUB_FORMATS:
|
||||||
|
for node in video_node.findall(subsel):
|
||||||
|
subtitles.setdefault('de', []).append({
|
||||||
|
'url': node.attrib['url'],
|
||||||
|
'ext': subext,
|
||||||
|
})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': mobj.group('id'),
|
'id': xpath_text(video_node, './videoId', default=display_id),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
'title': video_node.find('./title').text,
|
'title': video_node.find('./title').text,
|
||||||
'duration': parse_duration(video_node.find('./duration').text),
|
'duration': parse_duration(video_node.find('./duration').text),
|
||||||
@@ -313,19 +359,19 @@ class ARDIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||||
_VALID_URL = r'https://(?:(?:beta|www)\.)?ardmediathek\.de/(?P<client>[^/]+)/(?:player|live|video)/(?P<display_id>(?:[^/]+/)*)(?P<video_id>[a-zA-Z0-9]+)'
|
_VALID_URL = r'https://(?:(?:beta|www)\.)?ardmediathek\.de/(?:[^/]+/)?(?:player|live|video)/(?:[^/]+/)*(?P<id>Y3JpZDovL[a-zA-Z0-9]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://ardmediathek.de/ard/video/die-robuste-roswita/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE',
|
'url': 'https://www.ardmediathek.de/mdr/video/die-robuste-roswita/Y3JpZDovL21kci5kZS9iZWl0cmFnL2Ntcy84MWMxN2MzZC0wMjkxLTRmMzUtODk4ZS0wYzhlOWQxODE2NGI/',
|
||||||
'md5': 'dfdc87d2e7e09d073d5a80770a9ce88f',
|
'md5': 'a1dc75a39c61601b980648f7c9f9f71d',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'display_id': 'die-robuste-roswita',
|
'display_id': 'die-robuste-roswita',
|
||||||
'id': '70153354',
|
'id': '78566716',
|
||||||
'title': 'Die robuste Roswita',
|
'title': 'Die robuste Roswita',
|
||||||
'description': r're:^Der Mord.*trüber ist als die Ilm.',
|
'description': r're:^Der Mord.*totgeglaubte Ehefrau Roswita',
|
||||||
'duration': 5316,
|
'duration': 5316,
|
||||||
'thumbnail': 'https://img.ardmediathek.de/standard/00/70/15/33/90/-1852531467/16x9/960?mandant=ard',
|
'thumbnail': 'https://img.ardmediathek.de/standard/00/78/56/67/84/575672121/16x9/960?mandant=ard',
|
||||||
'timestamp': 1577047500,
|
'timestamp': 1596658200,
|
||||||
'upload_date': '20191222',
|
'upload_date': '20200805',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@@ -343,22 +389,22 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'https://www.ardmediathek.de/swr/live/Y3JpZDovL3N3ci5kZS8xMzQ4MTA0Mg',
|
'url': 'https://www.ardmediathek.de/swr/live/Y3JpZDovL3N3ci5kZS8xMzQ4MTA0Mg',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.ardmediathek.de/video/coronavirus-update-ndr-info/astrazeneca-kurz-lockdown-und-pims-syndrom-81/ndr/Y3JpZDovL25kci5kZS84NzE0M2FjNi0wMWEwLTQ5ODEtOTE5NS1mOGZhNzdhOTFmOTI/',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.ardmediathek.de/ard/player/Y3JpZDovL3dkci5kZS9CZWl0cmFnLWQ2NDJjYWEzLTMwZWYtNGI4NS1iMTI2LTU1N2UxYTcxOGIzOQ/tatort-duo-koeln-leipzig-ihr-kinderlein-kommet',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('video_id')
|
|
||||||
display_id = mobj.group('display_id')
|
|
||||||
if display_id:
|
|
||||||
display_id = display_id.rstrip('/')
|
|
||||||
if not display_id:
|
|
||||||
display_id = video_id
|
|
||||||
|
|
||||||
player_page = self._download_json(
|
player_page = self._download_json(
|
||||||
'https://api.ardmediathek.de/public-gateway',
|
'https://api.ardmediathek.de/public-gateway',
|
||||||
display_id, data=json.dumps({
|
video_id, data=json.dumps({
|
||||||
'query': '''{
|
'query': '''{
|
||||||
playerPage(client:"%s", clipId: "%s") {
|
playerPage(client: "ard", clipId: "%s") {
|
||||||
blockedByFsk
|
blockedByFsk
|
||||||
broadcastedOn
|
broadcastedOn
|
||||||
maturityContentRating
|
maturityContentRating
|
||||||
@@ -388,7 +434,7 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}''' % (mobj.group('client'), video_id),
|
}''' % video_id,
|
||||||
}).encode(), headers={
|
}).encode(), headers={
|
||||||
'Content-Type': 'application/json'
|
'Content-Type': 'application/json'
|
||||||
})['data']['playerPage']
|
})['data']['playerPage']
|
||||||
@@ -413,7 +459,6 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
|||||||
r'\(FSK\s*(\d+)\)\s*$', description, 'age limit', default=None))
|
r'\(FSK\s*(\d+)\)\s*$', description, 'age limit', default=None))
|
||||||
info.update({
|
info.update({
|
||||||
'age_limit': age_limit,
|
'age_limit': age_limit,
|
||||||
'display_id': display_id,
|
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
'timestamp': unified_timestamp(player_page.get('broadcastedOn')),
|
'timestamp': unified_timestamp(player_page.get('broadcastedOn')),
|
||||||
|
|||||||
@@ -6,13 +6,11 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urlparse
|
from ..compat import compat_urlparse
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
mimetype2ext,
|
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
strip_jsonp,
|
try_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -20,22 +18,27 @@ class ArkenaIE(InfoExtractor):
|
|||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
https?://
|
https?://
|
||||||
(?:
|
(?:
|
||||||
video\.arkena\.com/play2/embed/player\?|
|
video\.(?:arkena|qbrick)\.com/play2/embed/player\?|
|
||||||
play\.arkena\.com/(?:config|embed)/avp/v\d/player/media/(?P<id>[^/]+)/[^/]+/(?P<account_id>\d+)
|
play\.arkena\.com/(?:config|embed)/avp/v\d/player/media/(?P<id>[^/]+)/[^/]+/(?P<account_id>\d+)
|
||||||
)
|
)
|
||||||
'''
|
'''
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411',
|
'url': 'https://video.qbrick.com/play2/embed/player?accountId=1034090&mediaId=d8ab4607-00090107-aab86310',
|
||||||
'md5': 'b96f2f71b359a8ecd05ce4e1daa72365',
|
'md5': '97f117754e5f3c020f5f26da4a44ebaf',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe',
|
'id': 'd8ab4607-00090107-aab86310',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Big Buck Bunny',
|
'title': 'EM_HT20_117_roslund_v2.mp4',
|
||||||
'description': 'Royalty free test video',
|
'timestamp': 1608285912,
|
||||||
'timestamp': 1432816365,
|
'upload_date': '20201218',
|
||||||
'upload_date': '20150528',
|
'duration': 1429.162667,
|
||||||
'is_live': False,
|
'subtitles': {
|
||||||
|
'sv': 'count:3',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411',
|
||||||
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://play.arkena.com/config/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411/?callbackMethod=jQuery1111023664739129262213_1469227693893',
|
'url': 'https://play.arkena.com/config/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411/?callbackMethod=jQuery1111023664739129262213_1469227693893',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@@ -72,62 +75,89 @@ class ArkenaIE(InfoExtractor):
|
|||||||
if not video_id or not account_id:
|
if not video_id or not account_id:
|
||||||
raise ExtractorError('Invalid URL', expected=True)
|
raise ExtractorError('Invalid URL', expected=True)
|
||||||
|
|
||||||
playlist = self._download_json(
|
media = self._download_json(
|
||||||
'https://play.arkena.com/config/avp/v2/player/media/%s/0/%s/?callbackMethod=_'
|
'https://video.qbrick.com/api/v1/public/accounts/%s/medias/%s' % (account_id, video_id),
|
||||||
% (video_id, account_id),
|
video_id, query={
|
||||||
video_id, transform_source=strip_jsonp)['Playlist'][0]
|
# https://video.qbrick.com/docs/api/examples/library-api.html
|
||||||
|
'fields': 'asset/resources/*/renditions/*(height,id,language,links/*(href,mimeType),type,size,videos/*(audios/*(codec,sampleRate),bitrate,codec,duration,height,width),width),created,metadata/*(title,description),tags',
|
||||||
|
})
|
||||||
|
metadata = media.get('metadata') or {}
|
||||||
|
title = metadata['title']
|
||||||
|
|
||||||
media_info = playlist['MediaInfo']
|
duration = None
|
||||||
title = media_info['Title']
|
|
||||||
media_files = playlist['MediaFiles']
|
|
||||||
|
|
||||||
is_live = False
|
|
||||||
formats = []
|
formats = []
|
||||||
for kind_case, kind_formats in media_files.items():
|
thumbnails = []
|
||||||
kind = kind_case.lower()
|
subtitles = {}
|
||||||
for f in kind_formats:
|
for resource in media['asset']['resources']:
|
||||||
f_url = f.get('Url')
|
for rendition in (resource.get('renditions') or []):
|
||||||
if not f_url:
|
rendition_type = rendition.get('type')
|
||||||
continue
|
for i, link in enumerate(rendition.get('links') or []):
|
||||||
is_live = f.get('Live') == 'true'
|
href = link.get('href')
|
||||||
exts = (mimetype2ext(f.get('Type')), determine_ext(f_url, None))
|
if not href:
|
||||||
if kind == 'm3u8' or 'm3u8' in exts:
|
continue
|
||||||
formats.extend(self._extract_m3u8_formats(
|
if rendition_type == 'image':
|
||||||
f_url, video_id, 'mp4', 'm3u8_native',
|
thumbnails.append({
|
||||||
m3u8_id=kind, fatal=False, live=is_live))
|
'filesize': int_or_none(rendition.get('size')),
|
||||||
elif kind == 'flash' or 'f4m' in exts:
|
'height': int_or_none(rendition.get('height')),
|
||||||
formats.extend(self._extract_f4m_formats(
|
'id': rendition.get('id'),
|
||||||
f_url, video_id, f4m_id=kind, fatal=False))
|
'url': href,
|
||||||
elif kind == 'dash' or 'mpd' in exts:
|
'width': int_or_none(rendition.get('width')),
|
||||||
formats.extend(self._extract_mpd_formats(
|
})
|
||||||
f_url, video_id, mpd_id=kind, fatal=False))
|
elif rendition_type == 'subtitle':
|
||||||
elif kind == 'silverlight':
|
subtitles.setdefault(rendition.get('language') or 'en', []).append({
|
||||||
# TODO: process when ism is supported (see
|
'url': href,
|
||||||
# https://github.com/ytdl-org/youtube-dl/issues/8118)
|
})
|
||||||
continue
|
elif rendition_type == 'video':
|
||||||
else:
|
f = {
|
||||||
tbr = float_or_none(f.get('Bitrate'), 1000)
|
'filesize': int_or_none(rendition.get('size')),
|
||||||
formats.append({
|
'format_id': rendition.get('id'),
|
||||||
'url': f_url,
|
'url': href,
|
||||||
'format_id': '%s-%d' % (kind, tbr) if tbr else kind,
|
}
|
||||||
'tbr': tbr,
|
video = try_get(rendition, lambda x: x['videos'][i], dict)
|
||||||
})
|
if video:
|
||||||
|
if not duration:
|
||||||
|
duration = float_or_none(video.get('duration'))
|
||||||
|
f.update({
|
||||||
|
'height': int_or_none(video.get('height')),
|
||||||
|
'tbr': int_or_none(video.get('bitrate'), 1000),
|
||||||
|
'vcodec': video.get('codec'),
|
||||||
|
'width': int_or_none(video.get('width')),
|
||||||
|
})
|
||||||
|
audio = try_get(video, lambda x: x['audios'][0], dict)
|
||||||
|
if audio:
|
||||||
|
f.update({
|
||||||
|
'acodec': audio.get('codec'),
|
||||||
|
'asr': int_or_none(audio.get('sampleRate')),
|
||||||
|
})
|
||||||
|
formats.append(f)
|
||||||
|
elif rendition_type == 'index':
|
||||||
|
mime_type = link.get('mimeType')
|
||||||
|
if mime_type == 'application/smil+xml':
|
||||||
|
formats.extend(self._extract_smil_formats(
|
||||||
|
href, video_id, fatal=False))
|
||||||
|
elif mime_type == 'application/x-mpegURL':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
href, video_id, 'mp4', 'm3u8_native',
|
||||||
|
m3u8_id='hls', fatal=False))
|
||||||
|
elif mime_type == 'application/hds+xml':
|
||||||
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
href, video_id, f4m_id='hds', fatal=False))
|
||||||
|
elif mime_type == 'application/dash+xml':
|
||||||
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
href, video_id, f4m_id='hds', fatal=False))
|
||||||
|
elif mime_type == 'application/vnd.ms-sstr+xml':
|
||||||
|
formats.extend(self._extract_ism_formats(
|
||||||
|
href, video_id, ism_id='mss', fatal=False))
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
description = media_info.get('Description')
|
|
||||||
video_id = media_info.get('VideoId') or video_id
|
|
||||||
timestamp = parse_iso8601(media_info.get('PublishDate'))
|
|
||||||
thumbnails = [{
|
|
||||||
'url': thumbnail['Url'],
|
|
||||||
'width': int_or_none(thumbnail.get('Size')),
|
|
||||||
} for thumbnail in (media_info.get('Poster') or []) if thumbnail.get('Url')]
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': description,
|
'description': metadata.get('description'),
|
||||||
'timestamp': timestamp,
|
'timestamp': parse_iso8601(media.get('created')),
|
||||||
'is_live': is_live,
|
|
||||||
'thumbnails': thumbnails,
|
'thumbnails': thumbnails,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'duration': duration,
|
||||||
|
'tags': media.get('tags'),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|||||||
101
youtube_dl/extractor/arnes.py
Normal file
101
youtube_dl/extractor/arnes.py
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import (
|
||||||
|
compat_parse_qs,
|
||||||
|
compat_urllib_parse_urlparse,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
|
float_or_none,
|
||||||
|
int_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
remove_start,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ArnesIE(InfoExtractor):
|
||||||
|
IE_NAME = 'video.arnes.si'
|
||||||
|
IE_DESC = 'Arnes Video'
|
||||||
|
_VALID_URL = r'https?://video\.arnes\.si/(?:[a-z]{2}/)?(?:watch|embed|api/(?:asset|public/video))/(?P<id>[0-9a-zA-Z]{12})'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://video.arnes.si/watch/a1qrWTOQfVoU?t=10',
|
||||||
|
'md5': '4d0f4d0a03571b33e1efac25fd4a065d',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'a1qrWTOQfVoU',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Linearna neodvisnost, definicija',
|
||||||
|
'description': 'Linearna neodvisnost, definicija',
|
||||||
|
'license': 'PRIVATE',
|
||||||
|
'creator': 'Polona Oblak',
|
||||||
|
'timestamp': 1585063725,
|
||||||
|
'upload_date': '20200324',
|
||||||
|
'channel': 'Polona Oblak',
|
||||||
|
'channel_id': 'q6pc04hw24cj',
|
||||||
|
'channel_url': 'https://video.arnes.si/?channel=q6pc04hw24cj',
|
||||||
|
'duration': 596.75,
|
||||||
|
'view_count': int,
|
||||||
|
'tags': ['linearna_algebra'],
|
||||||
|
'start_time': 10,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://video.arnes.si/api/asset/s1YjnV7hadlC/play.mp4',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://video.arnes.si/embed/s1YjnV7hadlC',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://video.arnes.si/en/watch/s1YjnV7hadlC',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://video.arnes.si/embed/s1YjnV7hadlC?t=123&hideRelated=1',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://video.arnes.si/api/public/video/s1YjnV7hadlC',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
_BASE_URL = 'https://video.arnes.si'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
video = self._download_json(
|
||||||
|
self._BASE_URL + '/api/public/video/' + video_id, video_id)['data']
|
||||||
|
title = video['title']
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for media in (video.get('media') or []):
|
||||||
|
media_url = media.get('url')
|
||||||
|
if not media_url:
|
||||||
|
continue
|
||||||
|
formats.append({
|
||||||
|
'url': self._BASE_URL + media_url,
|
||||||
|
'format_id': remove_start(media.get('format'), 'FORMAT_'),
|
||||||
|
'format_note': media.get('formatTranslation'),
|
||||||
|
'width': int_or_none(media.get('width')),
|
||||||
|
'height': int_or_none(media.get('height')),
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
channel = video.get('channel') or {}
|
||||||
|
channel_id = channel.get('url')
|
||||||
|
thumbnail = video.get('thumbnailUrl')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats,
|
||||||
|
'thumbnail': self._BASE_URL + thumbnail,
|
||||||
|
'description': video.get('description'),
|
||||||
|
'license': video.get('license'),
|
||||||
|
'creator': video.get('author'),
|
||||||
|
'timestamp': parse_iso8601(video.get('creationTime')),
|
||||||
|
'channel': channel.get('name'),
|
||||||
|
'channel_id': channel_id,
|
||||||
|
'channel_url': self._BASE_URL + '/?channel=' + channel_id if channel_id else None,
|
||||||
|
'duration': float_or_none(video.get('duration'), 1000),
|
||||||
|
'view_count': int_or_none(video.get('views')),
|
||||||
|
'tags': video.get('hashtags'),
|
||||||
|
'start_time': int_or_none(compat_parse_qs(
|
||||||
|
compat_urllib_parse_urlparse(url).query).get('t', [None])[0]),
|
||||||
|
}
|
||||||
@@ -12,6 +12,7 @@ from ..utils import (
|
|||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
qualities,
|
qualities,
|
||||||
|
strip_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
@@ -252,3 +253,49 @@ class ArteTVPlaylistIE(ArteTVBaseIE):
|
|||||||
title = collection.get('title')
|
title = collection.get('title')
|
||||||
description = collection.get('shortDescription') or collection.get('teaserText')
|
description = collection.get('shortDescription') or collection.get('teaserText')
|
||||||
return self.playlist_result(entries, playlist_id, title, description)
|
return self.playlist_result(entries, playlist_id, title, description)
|
||||||
|
|
||||||
|
|
||||||
|
class ArteTVCategoryIE(ArteTVBaseIE):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?arte\.tv/(?P<lang>%s)/videos/(?P<id>[\w-]+(?:/[\w-]+)*)/?\s*$' % ArteTVBaseIE._ARTE_LANGUAGES
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.arte.tv/en/videos/politics-and-society/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'politics-and-society',
|
||||||
|
'title': 'Politics and society',
|
||||||
|
'description': 'Investigative documentary series, geopolitical analysis, and international commentary',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 13,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def suitable(cls, url):
|
||||||
|
return (
|
||||||
|
not any(ie.suitable(url) for ie in (ArteTVIE, ArteTVPlaylistIE, ))
|
||||||
|
and super(ArteTVCategoryIE, cls).suitable(url))
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
lang, playlist_id = re.match(self._VALID_URL, url).groups()
|
||||||
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
|
||||||
|
items = []
|
||||||
|
for video in re.finditer(
|
||||||
|
r'<a\b[^>]*?href\s*=\s*(?P<q>"|\'|\b)(?P<url>https?://www\.arte\.tv/%s/videos/[\w/-]+)(?P=q)' % lang,
|
||||||
|
webpage):
|
||||||
|
video = video.group('url')
|
||||||
|
if video == url:
|
||||||
|
continue
|
||||||
|
if any(ie.suitable(video) for ie in (ArteTVIE, ArteTVPlaylistIE, )):
|
||||||
|
items.append(video)
|
||||||
|
|
||||||
|
if items:
|
||||||
|
title = (self._og_search_title(webpage, default=None)
|
||||||
|
or self._html_search_regex(r'<title\b[^>]*>([^<]+)</title>', default=None))
|
||||||
|
title = strip_or_none(title.rsplit('|', 1)[0]) or self._generic_title(url)
|
||||||
|
|
||||||
|
result = self.playlist_from_matches(items, playlist_id=playlist_id, playlist_title=title)
|
||||||
|
if result:
|
||||||
|
description = self._og_search_description(webpage, default=None)
|
||||||
|
if description:
|
||||||
|
result['description'] = description
|
||||||
|
return result
|
||||||
|
|||||||
@@ -1,27 +1,91 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import functools
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .kaltura import KalturaIE
|
from .kaltura import KalturaIE
|
||||||
from ..utils import extract_attributes
|
from ..utils import (
|
||||||
|
extract_attributes,
|
||||||
|
int_or_none,
|
||||||
|
OnDemandPagedList,
|
||||||
|
parse_age_limit,
|
||||||
|
strip_or_none,
|
||||||
|
try_get,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class AsianCrushIE(InfoExtractor):
|
class AsianCrushBaseIE(InfoExtractor):
|
||||||
_VALID_URL_BASE = r'https?://(?:www\.)?(?P<host>(?:(?:asiancrush|yuyutv|midnightpulp)\.com|cocoro\.tv))'
|
_VALID_URL_BASE = r'https?://(?:www\.)?(?P<host>(?:(?:asiancrush|yuyutv|midnightpulp)\.com|(?:cocoro|retrocrush)\.tv))'
|
||||||
_VALID_URL = r'%s/video/(?:[^/]+/)?0+(?P<id>\d+)v\b' % _VALID_URL_BASE
|
_KALTURA_KEYS = [
|
||||||
|
'video_url', 'progressive_url', 'download_url', 'thumbnail_url',
|
||||||
|
'widescreen_thumbnail_url', 'screencap_widescreen',
|
||||||
|
]
|
||||||
|
_API_SUFFIX = {'retrocrush.tv': '-ott'}
|
||||||
|
|
||||||
|
def _call_api(self, host, endpoint, video_id, query, resource):
|
||||||
|
return self._download_json(
|
||||||
|
'https://api%s.%s/%s' % (self._API_SUFFIX.get(host, ''), host, endpoint), video_id,
|
||||||
|
'Downloading %s JSON metadata' % resource, query=query,
|
||||||
|
headers=self.geo_verification_headers())['objects']
|
||||||
|
|
||||||
|
def _download_object_data(self, host, object_id, resource):
|
||||||
|
return self._call_api(
|
||||||
|
host, 'search', object_id, {'id': object_id}, resource)[0]
|
||||||
|
|
||||||
|
def _get_object_description(self, obj):
|
||||||
|
return strip_or_none(obj.get('long_description') or obj.get('short_description'))
|
||||||
|
|
||||||
|
def _parse_video_data(self, video):
|
||||||
|
title = video['name']
|
||||||
|
|
||||||
|
entry_id, partner_id = [None] * 2
|
||||||
|
for k in self._KALTURA_KEYS:
|
||||||
|
k_url = video.get(k)
|
||||||
|
if k_url:
|
||||||
|
mobj = re.search(r'/p/(\d+)/.+?/entryId/([^/]+)/', k_url)
|
||||||
|
if mobj:
|
||||||
|
partner_id, entry_id = mobj.groups()
|
||||||
|
break
|
||||||
|
|
||||||
|
meta_categories = try_get(video, lambda x: x['meta']['categories'], list) or []
|
||||||
|
categories = list(filter(None, [c.get('name') for c in meta_categories]))
|
||||||
|
|
||||||
|
show_info = video.get('show_info') or {}
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': 'kaltura:%s:%s' % (partner_id, entry_id),
|
||||||
|
'ie_key': KalturaIE.ie_key(),
|
||||||
|
'id': entry_id,
|
||||||
|
'title': title,
|
||||||
|
'description': self._get_object_description(video),
|
||||||
|
'age_limit': parse_age_limit(video.get('mpaa_rating') or video.get('tv_rating')),
|
||||||
|
'categories': categories,
|
||||||
|
'series': show_info.get('show_name'),
|
||||||
|
'season_number': int_or_none(show_info.get('season_num')),
|
||||||
|
'season_id': show_info.get('season_id'),
|
||||||
|
'episode_number': int_or_none(show_info.get('episode_num')),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class AsianCrushIE(AsianCrushBaseIE):
|
||||||
|
_VALID_URL = r'%s/video/(?:[^/]+/)?0+(?P<id>\d+)v\b' % AsianCrushBaseIE._VALID_URL_BASE
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.asiancrush.com/video/012869v/women-who-flirt/',
|
'url': 'https://www.asiancrush.com/video/004289v/women-who-flirt',
|
||||||
'md5': 'c3b740e48d0ba002a42c0b72857beae6',
|
'md5': 'c3b740e48d0ba002a42c0b72857beae6',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1_y4tmjm5r',
|
'id': '1_y4tmjm5r',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Women Who Flirt',
|
'title': 'Women Who Flirt',
|
||||||
'description': 'md5:7e986615808bcfb11756eb503a751487',
|
'description': 'md5:b65c7e0ae03a85585476a62a186f924c',
|
||||||
'timestamp': 1496936429,
|
'timestamp': 1496936429,
|
||||||
'upload_date': '20170608',
|
'upload_date': '20170608',
|
||||||
'uploader_id': 'craig@crifkin.com',
|
'uploader_id': 'craig@crifkin.com',
|
||||||
|
'age_limit': 13,
|
||||||
|
'categories': 'count:5',
|
||||||
|
'duration': 5812,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.asiancrush.com/video/she-was-pretty/011886v-pretty-episode-3/',
|
'url': 'https://www.asiancrush.com/video/she-was-pretty/011886v-pretty-episode-3/',
|
||||||
@@ -41,67 +105,35 @@ class AsianCrushIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'https://www.cocoro.tv/video/the-wonderful-wizard-of-oz/008878v-the-wonderful-wizard-of-oz-ep01/',
|
'url': 'https://www.cocoro.tv/video/the-wonderful-wizard-of-oz/008878v-the-wonderful-wizard-of-oz-ep01/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.retrocrush.tv/video/true-tears/012328v-i...gave-away-my-tears',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
host, video_id = re.match(self._VALID_URL, url).groups()
|
||||||
host = mobj.group('host')
|
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
if host == 'cocoro.tv':
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
entry_id, partner_id, title = [None] * 3
|
embed_vars = self._parse_json(self._search_regex(
|
||||||
|
|
||||||
vars = self._parse_json(
|
|
||||||
self._search_regex(
|
|
||||||
r'iEmbedVars\s*=\s*({.+?})', webpage, 'embed vars',
|
r'iEmbedVars\s*=\s*({.+?})', webpage, 'embed vars',
|
||||||
default='{}'), video_id, fatal=False)
|
default='{}'), video_id, fatal=False) or {}
|
||||||
if vars:
|
video_id = embed_vars.get('entry_id') or video_id
|
||||||
entry_id = vars.get('entry_id')
|
|
||||||
partner_id = vars.get('partner_id')
|
|
||||||
title = vars.get('vid_label')
|
|
||||||
|
|
||||||
if not entry_id:
|
video = self._download_object_data(host, video_id, 'video')
|
||||||
entry_id = self._search_regex(
|
return self._parse_video_data(video)
|
||||||
r'\bentry_id["\']\s*:\s*["\'](\d+)', webpage, 'entry id')
|
|
||||||
|
|
||||||
player = self._download_webpage(
|
|
||||||
'https://api.%s/embeddedVideoPlayer' % host, video_id,
|
|
||||||
query={'id': entry_id})
|
|
||||||
|
|
||||||
kaltura_id = self._search_regex(
|
|
||||||
r'entry_id["\']\s*:\s*(["\'])(?P<id>(?:(?!\1).)+)\1', player,
|
|
||||||
'kaltura id', group='id')
|
|
||||||
|
|
||||||
if not partner_id:
|
|
||||||
partner_id = self._search_regex(
|
|
||||||
r'/p(?:artner_id)?/(\d+)', player, 'partner id',
|
|
||||||
default='513551')
|
|
||||||
|
|
||||||
description = self._html_search_regex(
|
|
||||||
r'(?s)<div[^>]+\bclass=["\']description["\'][^>]*>(.+?)</div>',
|
|
||||||
webpage, 'description', fatal=False)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'_type': 'url_transparent',
|
|
||||||
'url': 'kaltura:%s:%s' % (partner_id, kaltura_id),
|
|
||||||
'ie_key': KalturaIE.ie_key(),
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'description': description,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class AsianCrushPlaylistIE(InfoExtractor):
|
class AsianCrushPlaylistIE(AsianCrushBaseIE):
|
||||||
_VALID_URL = r'%s/series/0+(?P<id>\d+)s\b' % AsianCrushIE._VALID_URL_BASE
|
_VALID_URL = r'%s/series/0+(?P<id>\d+)s\b' % AsianCrushBaseIE._VALID_URL_BASE
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.asiancrush.com/series/012481s/scholar-walks-night/',
|
'url': 'https://www.asiancrush.com/series/006447s/fruity-samurai',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '12481',
|
'id': '6447',
|
||||||
'title': 'Scholar Who Walks the Night',
|
'title': 'Fruity Samurai',
|
||||||
'description': 'md5:7addd7c5132a09fd4741152d96cce886',
|
'description': 'md5:7535174487e4a202d3872a7fc8f2f154',
|
||||||
},
|
},
|
||||||
'playlist_count': 20,
|
'playlist_count': 13,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.yuyutv.com/series/013920s/peep-show/',
|
'url': 'https://www.yuyutv.com/series/013920s/peep-show/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@@ -111,35 +143,58 @@ class AsianCrushPlaylistIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'https://www.cocoro.tv/series/008549s/the-wonderful-wizard-of-oz/',
|
'url': 'https://www.cocoro.tv/series/008549s/the-wonderful-wizard-of-oz/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.retrocrush.tv/series/012355s/true-tears',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
_PAGE_SIZE = 1000000000
|
||||||
|
|
||||||
|
def _fetch_page(self, domain, parent_id, page):
|
||||||
|
videos = self._call_api(
|
||||||
|
domain, 'getreferencedobjects', parent_id, {
|
||||||
|
'max': self._PAGE_SIZE,
|
||||||
|
'object_type': 'video',
|
||||||
|
'parent_id': parent_id,
|
||||||
|
'start': page * self._PAGE_SIZE,
|
||||||
|
}, 'page %d' % (page + 1))
|
||||||
|
for video in videos:
|
||||||
|
yield self._parse_video_data(video)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
playlist_id = self._match_id(url)
|
host, playlist_id = re.match(self._VALID_URL, url).groups()
|
||||||
|
|
||||||
webpage = self._download_webpage(url, playlist_id)
|
if host == 'cocoro.tv':
|
||||||
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
|
|
||||||
for mobj in re.finditer(
|
for mobj in re.finditer(
|
||||||
r'<a[^>]+href=(["\'])(?P<url>%s.*?)\1[^>]*>' % AsianCrushIE._VALID_URL,
|
r'<a[^>]+href=(["\'])(?P<url>%s.*?)\1[^>]*>' % AsianCrushIE._VALID_URL,
|
||||||
webpage):
|
webpage):
|
||||||
attrs = extract_attributes(mobj.group(0))
|
attrs = extract_attributes(mobj.group(0))
|
||||||
if attrs.get('class') == 'clearfix':
|
if attrs.get('class') == 'clearfix':
|
||||||
entries.append(self.url_result(
|
entries.append(self.url_result(
|
||||||
mobj.group('url'), ie=AsianCrushIE.ie_key()))
|
mobj.group('url'), ie=AsianCrushIE.ie_key()))
|
||||||
|
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r'(?s)<h1\b[^>]\bid=["\']movieTitle[^>]+>(.+?)</h1>', webpage,
|
r'(?s)<h1\b[^>]\bid=["\']movieTitle[^>]+>(.+?)</h1>', webpage,
|
||||||
'title', default=None) or self._og_search_title(
|
'title', default=None) or self._og_search_title(
|
||||||
webpage, default=None) or self._html_search_meta(
|
webpage, default=None) or self._html_search_meta(
|
||||||
'twitter:title', webpage, 'title',
|
'twitter:title', webpage, 'title',
|
||||||
default=None) or self._search_regex(
|
default=None) or self._search_regex(
|
||||||
r'<title>([^<]+)</title>', webpage, 'title', fatal=False)
|
r'<title>([^<]+)</title>', webpage, 'title', fatal=False)
|
||||||
if title:
|
if title:
|
||||||
title = re.sub(r'\s*\|\s*.+?$', '', title)
|
title = re.sub(r'\s*\|\s*.+?$', '', title)
|
||||||
|
|
||||||
description = self._og_search_description(
|
description = self._og_search_description(
|
||||||
webpage, default=None) or self._html_search_meta(
|
webpage, default=None) or self._html_search_meta(
|
||||||
'twitter:description', webpage, 'description', fatal=False)
|
'twitter:description', webpage, 'description', fatal=False)
|
||||||
|
else:
|
||||||
|
show = self._download_object_data(host, playlist_id, 'show')
|
||||||
|
title = show.get('name')
|
||||||
|
description = self._get_object_description(show)
|
||||||
|
entries = OnDemandPagedList(
|
||||||
|
functools.partial(self._fetch_page, host, playlist_id),
|
||||||
|
self._PAGE_SIZE)
|
||||||
|
|
||||||
return self.playlist_result(entries, playlist_id, title, description)
|
return self.playlist_result(entries, playlist_id, title, description)
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class AudiomackIE(InfoExtractor):
|
class AudiomackIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
|
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/(?:song/|(?=.+/song/))(?P<id>[\w/-]+)'
|
||||||
IE_NAME = 'audiomack'
|
IE_NAME = 'audiomack'
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
# hosted on audiomack
|
# hosted on audiomack
|
||||||
@@ -29,25 +29,27 @@ class AudiomackIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
# audiomack wrapper around soundcloud song
|
# audiomack wrapper around soundcloud song
|
||||||
|
# Needs new test URL.
|
||||||
{
|
{
|
||||||
'add_ie': ['Soundcloud'],
|
'add_ie': ['Soundcloud'],
|
||||||
'url': 'http://www.audiomack.com/song/hip-hop-daily/black-mamba-freestyle',
|
'url': 'http://www.audiomack.com/song/hip-hop-daily/black-mamba-freestyle',
|
||||||
'info_dict': {
|
'only_matching': True,
|
||||||
'id': '258901379',
|
# 'info_dict': {
|
||||||
'ext': 'mp3',
|
# 'id': '258901379',
|
||||||
'description': 'mamba day freestyle for the legend Kobe Bryant ',
|
# 'ext': 'mp3',
|
||||||
'title': 'Black Mamba Freestyle [Prod. By Danny Wolf]',
|
# 'description': 'mamba day freestyle for the legend Kobe Bryant ',
|
||||||
'uploader': 'ILOVEMAKONNEN',
|
# 'title': 'Black Mamba Freestyle [Prod. By Danny Wolf]',
|
||||||
'upload_date': '20160414',
|
# 'uploader': 'ILOVEMAKONNEN',
|
||||||
}
|
# 'upload_date': '20160414',
|
||||||
|
# }
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
# URLs end with [uploader name]/[uploader title]
|
# URLs end with [uploader name]/song/[uploader title]
|
||||||
# this title is whatever the user types in, and is rarely
|
# this title is whatever the user types in, and is rarely
|
||||||
# the proper song title. Real metadata is in the api response
|
# the proper song title. Real metadata is in the api response
|
||||||
album_url_tag = self._match_id(url)
|
album_url_tag = self._match_id(url).replace('/song/', '/')
|
||||||
|
|
||||||
# Request the extended version of the api for extra fields like artist and title
|
# Request the extended version of the api for extra fields like artist and title
|
||||||
api_response = self._download_json(
|
api_response = self._download_json(
|
||||||
@@ -73,13 +75,13 @@ class AudiomackIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class AudiomackAlbumIE(InfoExtractor):
|
class AudiomackAlbumIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/album/(?P<id>[\w/-]+)'
|
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/(?:album/|(?=.+/album/))(?P<id>[\w/-]+)'
|
||||||
IE_NAME = 'audiomack:album'
|
IE_NAME = 'audiomack:album'
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
# Standard album playlist
|
# Standard album playlist
|
||||||
{
|
{
|
||||||
'url': 'http://www.audiomack.com/album/flytunezcom/tha-tour-part-2-mixtape',
|
'url': 'http://www.audiomack.com/album/flytunezcom/tha-tour-part-2-mixtape',
|
||||||
'playlist_count': 15,
|
'playlist_count': 11,
|
||||||
'info_dict':
|
'info_dict':
|
||||||
{
|
{
|
||||||
'id': '812251',
|
'id': '812251',
|
||||||
@@ -95,24 +97,24 @@ class AudiomackAlbumIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
'playlist': [{
|
'playlist': [{
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'title': 'PPP (Pistol P Project) - 9. Heaven or Hell (CHIMACA) ft Zuse (prod by DJ FU)',
|
'title': 'PPP (Pistol P Project) - 10. 4 Minutes Of Hell Part 4 (prod by DY OF 808 MAFIA)',
|
||||||
'id': '837577',
|
'id': '837580',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'uploader': 'Lil Herb a.k.a. G Herbo',
|
'uploader': 'Lil Herb a.k.a. G Herbo',
|
||||||
}
|
}
|
||||||
}],
|
}],
|
||||||
'params': {
|
'params': {
|
||||||
'playliststart': 9,
|
'playliststart': 2,
|
||||||
'playlistend': 9,
|
'playlistend': 2,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
# URLs end with [uploader name]/[uploader title]
|
# URLs end with [uploader name]/album/[uploader title]
|
||||||
# this title is whatever the user types in, and is rarely
|
# this title is whatever the user types in, and is rarely
|
||||||
# the proper song title. Real metadata is in the api response
|
# the proper song title. Real metadata is in the api response
|
||||||
album_url_tag = self._match_id(url)
|
album_url_tag = self._match_id(url).replace('/album/', '/')
|
||||||
result = {'_type': 'playlist', 'entries': []}
|
result = {'_type': 'playlist', 'entries': []}
|
||||||
# There is no one endpoint for album metadata - instead it is included/repeated in each song's metadata
|
# There is no one endpoint for album metadata - instead it is included/repeated in each song's metadata
|
||||||
# Therefore we don't know how many songs the album has and must infi-loop until failure
|
# Therefore we don't know how many songs the album has and must infi-loop until failure
|
||||||
@@ -134,7 +136,7 @@ class AudiomackAlbumIE(InfoExtractor):
|
|||||||
# Pull out the album metadata and add to result (if it exists)
|
# Pull out the album metadata and add to result (if it exists)
|
||||||
for resultkey, apikey in [('id', 'album_id'), ('title', 'album_title')]:
|
for resultkey, apikey in [('id', 'album_id'), ('title', 'album_title')]:
|
||||||
if apikey in api_response and resultkey not in result:
|
if apikey in api_response and resultkey not in result:
|
||||||
result[resultkey] = api_response[apikey]
|
result[resultkey] = compat_str(api_response[apikey])
|
||||||
song_id = url_basename(api_response['url']).rpartition('.')[0]
|
song_id = url_basename(api_response['url']).rpartition('.')[0]
|
||||||
result['entries'].append({
|
result['entries'].append({
|
||||||
'id': compat_str(api_response.get('id', song_id)),
|
'id': compat_str(api_response.get('id', song_id)),
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ class AWAANBaseIE(InfoExtractor):
|
|||||||
'duration': int_or_none(video_data.get('duration')),
|
'duration': int_or_none(video_data.get('duration')),
|
||||||
'timestamp': parse_iso8601(video_data.get('create_time'), ' '),
|
'timestamp': parse_iso8601(video_data.get('create_time'), ' '),
|
||||||
'is_live': is_live,
|
'is_live': is_live,
|
||||||
|
'uploader_id': video_data.get('user_id'),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -107,6 +108,7 @@ class AWAANLiveIE(AWAANBaseIE):
|
|||||||
'title': 're:Dubai Al Oula [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
'title': 're:Dubai Al Oula [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
||||||
'upload_date': '20150107',
|
'upload_date': '20150107',
|
||||||
'timestamp': 1420588800,
|
'timestamp': 1420588800,
|
||||||
|
'uploader_id': '71',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
# m3u8 download
|
# m3u8 download
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ class AZMedienIE(InfoExtractor):
|
|||||||
'url': 'https://www.telebaern.tv/telebaern-news/montag-1-oktober-2018-ganze-sendung-133531189#video=0_7xjo9lf1',
|
'url': 'https://www.telebaern.tv/telebaern-news/montag-1-oktober-2018-ganze-sendung-133531189#video=0_7xjo9lf1',
|
||||||
'only_matching': True
|
'only_matching': True
|
||||||
}]
|
}]
|
||||||
_API_TEMPL = 'https://www.%s/api/pub/gql/%s/NewsArticleTeaser/cb9f2f81ed22e9b47f4ca64ea3cc5a5d13e88d1d'
|
_API_TEMPL = 'https://www.%s/api/pub/gql/%s/NewsArticleTeaser/a4016f65fe62b81dc6664dd9f4910e4ab40383be'
|
||||||
_PARTNER_ID = '1719221'
|
_PARTNER_ID = '1719221'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|||||||
37
youtube_dl/extractor/bandaichannel.py
Normal file
37
youtube_dl/extractor/bandaichannel.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .brightcove import BrightcoveNewIE
|
||||||
|
from ..utils import extract_attributes
|
||||||
|
|
||||||
|
|
||||||
|
class BandaiChannelIE(BrightcoveNewIE):
|
||||||
|
IE_NAME = 'bandaichannel'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?b-ch\.com/titles/(?P<id>\d+/\d+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.b-ch.com/titles/514/001',
|
||||||
|
'md5': 'a0f2d787baa5729bed71108257f613a4',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '6128044564001',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'メタルファイターMIKU 第1話',
|
||||||
|
'timestamp': 1580354056,
|
||||||
|
'uploader_id': '5797077852001',
|
||||||
|
'upload_date': '20200130',
|
||||||
|
'duration': 1387.733,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'format': 'bestvideo',
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
attrs = extract_attributes(self._search_regex(
|
||||||
|
r'(<video-js[^>]+\bid="bcplayer"[^>]*>)', webpage, 'player'))
|
||||||
|
bc = self._download_json(
|
||||||
|
'https://pbifcd.b-ch.com/v1/playbackinfo/ST/70/' + attrs['data-info'],
|
||||||
|
video_id, headers={'X-API-KEY': attrs['data-auth'].strip()})['bc']
|
||||||
|
return self._parse_brightcove_metadata(bc, bc['id'])
|
||||||
@@ -49,6 +49,7 @@ class BandcampIE(InfoExtractor):
|
|||||||
'uploader': 'Ben Prunty',
|
'uploader': 'Ben Prunty',
|
||||||
'timestamp': 1396508491,
|
'timestamp': 1396508491,
|
||||||
'upload_date': '20140403',
|
'upload_date': '20140403',
|
||||||
|
'release_timestamp': 1396483200,
|
||||||
'release_date': '20140403',
|
'release_date': '20140403',
|
||||||
'duration': 260.877,
|
'duration': 260.877,
|
||||||
'track': 'Lanius (Battle)',
|
'track': 'Lanius (Battle)',
|
||||||
@@ -69,6 +70,7 @@ class BandcampIE(InfoExtractor):
|
|||||||
'uploader': 'Mastodon',
|
'uploader': 'Mastodon',
|
||||||
'timestamp': 1322005399,
|
'timestamp': 1322005399,
|
||||||
'upload_date': '20111122',
|
'upload_date': '20111122',
|
||||||
|
'release_timestamp': 1076112000,
|
||||||
'release_date': '20040207',
|
'release_date': '20040207',
|
||||||
'duration': 120.79,
|
'duration': 120.79,
|
||||||
'track': 'Hail to Fire',
|
'track': 'Hail to Fire',
|
||||||
@@ -197,7 +199,7 @@ class BandcampIE(InfoExtractor):
|
|||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'uploader': artist,
|
'uploader': artist,
|
||||||
'timestamp': timestamp,
|
'timestamp': timestamp,
|
||||||
'release_date': unified_strdate(tralbum.get('album_release_date')),
|
'release_timestamp': unified_timestamp(tralbum.get('album_release_date')),
|
||||||
'duration': duration,
|
'duration': duration,
|
||||||
'track': track,
|
'track': track,
|
||||||
'track_number': track_number,
|
'track_number': track_number,
|
||||||
|
|||||||
@@ -1,37 +1,46 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import functools
|
||||||
import itertools
|
import itertools
|
||||||
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import (
|
||||||
|
compat_etree_Element,
|
||||||
|
compat_HTTPError,
|
||||||
|
compat_parse_qs,
|
||||||
|
compat_str,
|
||||||
|
compat_urllib_error,
|
||||||
|
compat_urllib_parse_urlparse,
|
||||||
|
compat_urlparse,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
OnDemandPagedList,
|
||||||
clean_html,
|
clean_html,
|
||||||
dict_get,
|
dict_get,
|
||||||
ExtractorError,
|
|
||||||
float_or_none,
|
float_or_none,
|
||||||
get_element_by_class,
|
get_element_by_class,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
strip_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
|
unified_timestamp,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
urljoin,
|
urljoin,
|
||||||
)
|
)
|
||||||
from ..compat import (
|
|
||||||
compat_etree_Element,
|
|
||||||
compat_HTTPError,
|
|
||||||
compat_urlparse,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class BBCCoUkIE(InfoExtractor):
|
class BBCCoUkIE(InfoExtractor):
|
||||||
IE_NAME = 'bbc.co.uk'
|
IE_NAME = 'bbc.co.uk'
|
||||||
IE_DESC = 'BBC iPlayer'
|
IE_DESC = 'BBC iPlayer'
|
||||||
_ID_REGEX = r'(?:[pbm][\da-z]{7}|w[\da-z]{7,14})'
|
_ID_REGEX = r'(?:[pbml][\da-z]{7}|w[\da-z]{7,14})'
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
https?://
|
https?://
|
||||||
(?:www\.)?bbc\.co\.uk/
|
(?:www\.)?bbc\.co\.uk/
|
||||||
@@ -49,22 +58,17 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
_LOGIN_URL = 'https://account.bbc.com/signin'
|
_LOGIN_URL = 'https://account.bbc.com/signin'
|
||||||
_NETRC_MACHINE = 'bbc'
|
_NETRC_MACHINE = 'bbc'
|
||||||
|
|
||||||
_MEDIASELECTOR_URLS = [
|
_MEDIA_SELECTOR_URL_TEMPL = 'https://open.live.bbc.co.uk/mediaselector/6/select/version/2.0/mediaset/%s/vpid/%s'
|
||||||
|
_MEDIA_SETS = [
|
||||||
# Provides HQ HLS streams with even better quality that pc mediaset but fails
|
# Provides HQ HLS streams with even better quality that pc mediaset but fails
|
||||||
# with geolocation in some cases when it's even not geo restricted at all (e.g.
|
# with geolocation in some cases when it's even not geo restricted at all (e.g.
|
||||||
# http://www.bbc.co.uk/programmes/b06bp7lf). Also may fail with selectionunavailable.
|
# http://www.bbc.co.uk/programmes/b06bp7lf). Also may fail with selectionunavailable.
|
||||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
|
'iptv-all',
|
||||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s',
|
'pc',
|
||||||
]
|
]
|
||||||
|
|
||||||
_MEDIASELECTION_NS = 'http://bbc.co.uk/2008/mp/mediaselection'
|
|
||||||
_EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist'
|
_EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist'
|
||||||
|
|
||||||
_NAMESPACES = (
|
|
||||||
_MEDIASELECTION_NS,
|
|
||||||
_EMP_PLAYLIST_NS,
|
|
||||||
)
|
|
||||||
|
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
|
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
|
||||||
@@ -261,8 +265,6 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_USP_RE = r'/([^/]+?)\.ism(?:\.hlsv2\.ism)?/[^/]+\.m3u8'
|
|
||||||
|
|
||||||
def _login(self):
|
def _login(self):
|
||||||
username, password = self._get_login_info()
|
username, password = self._get_login_info()
|
||||||
if username is None:
|
if username is None:
|
||||||
@@ -307,22 +309,14 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
def _extract_items(self, playlist):
|
def _extract_items(self, playlist):
|
||||||
return playlist.findall('./{%s}item' % self._EMP_PLAYLIST_NS)
|
return playlist.findall('./{%s}item' % self._EMP_PLAYLIST_NS)
|
||||||
|
|
||||||
def _findall_ns(self, element, xpath):
|
|
||||||
elements = []
|
|
||||||
for ns in self._NAMESPACES:
|
|
||||||
elements.extend(element.findall(xpath % ns))
|
|
||||||
return elements
|
|
||||||
|
|
||||||
def _extract_medias(self, media_selection):
|
def _extract_medias(self, media_selection):
|
||||||
error = media_selection.find('./{%s}error' % self._MEDIASELECTION_NS)
|
error = media_selection.get('result')
|
||||||
if error is None:
|
if error:
|
||||||
media_selection.find('./{%s}error' % self._EMP_PLAYLIST_NS)
|
raise BBCCoUkIE.MediaSelectionError(error)
|
||||||
if error is not None:
|
return media_selection.get('media') or []
|
||||||
raise BBCCoUkIE.MediaSelectionError(error.get('id'))
|
|
||||||
return self._findall_ns(media_selection, './{%s}media')
|
|
||||||
|
|
||||||
def _extract_connections(self, media):
|
def _extract_connections(self, media):
|
||||||
return self._findall_ns(media, './{%s}connection')
|
return media.get('connection') or []
|
||||||
|
|
||||||
def _get_subtitles(self, media, programme_id):
|
def _get_subtitles(self, media, programme_id):
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
@@ -334,13 +328,13 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
cc_url, programme_id, 'Downloading captions', fatal=False)
|
cc_url, programme_id, 'Downloading captions', fatal=False)
|
||||||
if not isinstance(captions, compat_etree_Element):
|
if not isinstance(captions, compat_etree_Element):
|
||||||
continue
|
continue
|
||||||
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
|
subtitles['en'] = [
|
||||||
subtitles[lang] = [
|
|
||||||
{
|
{
|
||||||
'url': connection.get('href'),
|
'url': connection.get('href'),
|
||||||
'ext': 'ttml',
|
'ext': 'ttml',
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
break
|
||||||
return subtitles
|
return subtitles
|
||||||
|
|
||||||
def _raise_extractor_error(self, media_selection_error):
|
def _raise_extractor_error(self, media_selection_error):
|
||||||
@@ -350,10 +344,10 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
|
|
||||||
def _download_media_selector(self, programme_id):
|
def _download_media_selector(self, programme_id):
|
||||||
last_exception = None
|
last_exception = None
|
||||||
for mediaselector_url in self._MEDIASELECTOR_URLS:
|
for media_set in self._MEDIA_SETS:
|
||||||
try:
|
try:
|
||||||
return self._download_media_selector_url(
|
return self._download_media_selector_url(
|
||||||
mediaselector_url % programme_id, programme_id)
|
self._MEDIA_SELECTOR_URL_TEMPL % (media_set, programme_id), programme_id)
|
||||||
except BBCCoUkIE.MediaSelectionError as e:
|
except BBCCoUkIE.MediaSelectionError as e:
|
||||||
if e.id in ('notukerror', 'geolocation', 'selectionunavailable'):
|
if e.id in ('notukerror', 'geolocation', 'selectionunavailable'):
|
||||||
last_exception = e
|
last_exception = e
|
||||||
@@ -362,8 +356,8 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
self._raise_extractor_error(last_exception)
|
self._raise_extractor_error(last_exception)
|
||||||
|
|
||||||
def _download_media_selector_url(self, url, programme_id=None):
|
def _download_media_selector_url(self, url, programme_id=None):
|
||||||
media_selection = self._download_xml(
|
media_selection = self._download_json(
|
||||||
url, programme_id, 'Downloading media selection XML',
|
url, programme_id, 'Downloading media selection JSON',
|
||||||
expected_status=(403, 404))
|
expected_status=(403, 404))
|
||||||
return self._process_media_selector(media_selection, programme_id)
|
return self._process_media_selector(media_selection, programme_id)
|
||||||
|
|
||||||
@@ -377,7 +371,6 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
if kind in ('video', 'audio'):
|
if kind in ('video', 'audio'):
|
||||||
bitrate = int_or_none(media.get('bitrate'))
|
bitrate = int_or_none(media.get('bitrate'))
|
||||||
encoding = media.get('encoding')
|
encoding = media.get('encoding')
|
||||||
service = media.get('service')
|
|
||||||
width = int_or_none(media.get('width'))
|
width = int_or_none(media.get('width'))
|
||||||
height = int_or_none(media.get('height'))
|
height = int_or_none(media.get('height'))
|
||||||
file_size = int_or_none(media.get('media_file_size'))
|
file_size = int_or_none(media.get('media_file_size'))
|
||||||
@@ -392,8 +385,6 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
supplier = connection.get('supplier')
|
supplier = connection.get('supplier')
|
||||||
transfer_format = connection.get('transferFormat')
|
transfer_format = connection.get('transferFormat')
|
||||||
format_id = supplier or conn_kind or protocol
|
format_id = supplier or conn_kind or protocol
|
||||||
if service:
|
|
||||||
format_id = '%s_%s' % (service, format_id)
|
|
||||||
# ASX playlist
|
# ASX playlist
|
||||||
if supplier == 'asx':
|
if supplier == 'asx':
|
||||||
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
|
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
|
||||||
@@ -405,23 +396,22 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
formats.extend(self._extract_mpd_formats(
|
formats.extend(self._extract_mpd_formats(
|
||||||
href, programme_id, mpd_id=format_id, fatal=False))
|
href, programme_id, mpd_id=format_id, fatal=False))
|
||||||
elif transfer_format == 'hls':
|
elif transfer_format == 'hls':
|
||||||
formats.extend(self._extract_m3u8_formats(
|
# TODO: let expected_status be passed into _extract_xxx_formats() instead
|
||||||
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
|
try:
|
||||||
m3u8_id=format_id, fatal=False))
|
fmts = self._extract_m3u8_formats(
|
||||||
if re.search(self._USP_RE, href):
|
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
|
||||||
usp_formats = self._extract_m3u8_formats(
|
|
||||||
re.sub(self._USP_RE, r'/\1.ism/\1.m3u8', href),
|
|
||||||
programme_id, ext='mp4', entry_protocol='m3u8_native',
|
|
||||||
m3u8_id=format_id, fatal=False)
|
m3u8_id=format_id, fatal=False)
|
||||||
for f in usp_formats:
|
except ExtractorError as e:
|
||||||
if f.get('height') and f['height'] > 720:
|
if not (isinstance(e.exc_info[1], compat_urllib_error.HTTPError)
|
||||||
continue
|
and e.exc_info[1].code in (403, 404)):
|
||||||
formats.append(f)
|
raise
|
||||||
|
fmts = []
|
||||||
|
formats.extend(fmts)
|
||||||
elif transfer_format == 'hds':
|
elif transfer_format == 'hds':
|
||||||
formats.extend(self._extract_f4m_formats(
|
formats.extend(self._extract_f4m_formats(
|
||||||
href, programme_id, f4m_id=format_id, fatal=False))
|
href, programme_id, f4m_id=format_id, fatal=False))
|
||||||
else:
|
else:
|
||||||
if not service and not supplier and bitrate:
|
if not supplier and bitrate:
|
||||||
format_id += '-%d' % bitrate
|
format_id += '-%d' % bitrate
|
||||||
fmt = {
|
fmt = {
|
||||||
'format_id': format_id,
|
'format_id': format_id,
|
||||||
@@ -554,7 +544,7 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, group_id, 'Downloading video page')
|
webpage = self._download_webpage(url, group_id, 'Downloading video page')
|
||||||
|
|
||||||
error = self._search_regex(
|
error = self._search_regex(
|
||||||
r'<div\b[^>]+\bclass=["\']smp__message delta["\'][^>]*>([^<]+)<',
|
r'<div\b[^>]+\bclass=["\'](?:smp|playout)__message delta["\'][^>]*>\s*([^<]+?)\s*<',
|
||||||
webpage, 'error', default=None)
|
webpage, 'error', default=None)
|
||||||
if error:
|
if error:
|
||||||
raise ExtractorError(error, expected=True)
|
raise ExtractorError(error, expected=True)
|
||||||
@@ -607,16 +597,9 @@ class BBCIE(BBCCoUkIE):
|
|||||||
IE_DESC = 'BBC'
|
IE_DESC = 'BBC'
|
||||||
_VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
|
_VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
|
||||||
|
|
||||||
_MEDIASELECTOR_URLS = [
|
_MEDIA_SETS = [
|
||||||
# Provides HQ HLS streams but fails with geolocation in some cases when it's
|
'mobile-tablet-main',
|
||||||
# even not geo restricted at all
|
'pc',
|
||||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
|
|
||||||
# Provides more formats, namely direct mp4 links, but fails on some videos with
|
|
||||||
# notukerror for non UK (?) users (e.g.
|
|
||||||
# http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
|
|
||||||
'http://open.live.bbc.co.uk/mediaselector/4/mtis/stream/%s',
|
|
||||||
# Provides fewer formats, but works everywhere for everybody (hopefully)
|
|
||||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/journalism-pc/vpid/%s',
|
|
||||||
]
|
]
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
@@ -790,23 +773,44 @@ class BBCIE(BBCCoUkIE):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
# custom redirection to www.bbc.com
|
# custom redirection to www.bbc.com
|
||||||
|
# also, video with window.__INITIAL_DATA__
|
||||||
'url': 'http://www.bbc.co.uk/news/science-environment-33661876',
|
'url': 'http://www.bbc.co.uk/news/science-environment-33661876',
|
||||||
'only_matching': True,
|
'info_dict': {
|
||||||
|
'id': 'p02xzws1',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': "Pluto may have 'nitrogen glaciers'",
|
||||||
|
'description': 'md5:6a95b593f528d7a5f2605221bc56912f',
|
||||||
|
'thumbnail': r're:https?://.+/.+\.jpg',
|
||||||
|
'timestamp': 1437785037,
|
||||||
|
'upload_date': '20150725',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# video with window.__INITIAL_DATA__ and value as JSON string
|
||||||
|
'url': 'https://www.bbc.com/news/av/world-europe-59468682',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p0b71qth',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Why France is making this woman a national hero',
|
||||||
|
'description': 'md5:7affdfab80e9c3a1f976230a1ff4d5e4',
|
||||||
|
'thumbnail': r're:https?://.+/.+\.jpg',
|
||||||
|
'timestamp': 1638230731,
|
||||||
|
'upload_date': '20211130',
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
# single video article embedded with data-media-vpid
|
# single video article embedded with data-media-vpid
|
||||||
'url': 'http://www.bbc.co.uk/sport/rowing/35908187',
|
'url': 'http://www.bbc.co.uk/sport/rowing/35908187',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
|
# bbcthreeConfig
|
||||||
'url': 'https://www.bbc.co.uk/bbcthree/clip/73d0bbd0-abc3-4cea-b3c0-cdae21905eb1',
|
'url': 'https://www.bbc.co.uk/bbcthree/clip/73d0bbd0-abc3-4cea-b3c0-cdae21905eb1',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'p06556y7',
|
'id': 'p06556y7',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
|
'title': 'Things Not To Say to people that live on council estates',
|
||||||
'description': 'md5:4b7dfd063d5a789a1512e99662be3ddd',
|
'description': "From being labelled a 'chav', to the presumption that they're 'scroungers', people who live on council estates encounter all kinds of prejudices and false assumptions about themselves, their families, and their lifestyles. Here, eight people discuss the common statements, misconceptions, and clichés that they're tired of hearing.",
|
||||||
|
'duration': 360,
|
||||||
|
'thumbnail': r're:https?://.+/.+\.jpg',
|
||||||
},
|
},
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
}
|
|
||||||
}, {
|
}, {
|
||||||
# window.__PRELOADED_STATE__
|
# window.__PRELOADED_STATE__
|
||||||
'url': 'https://www.bbc.co.uk/radio/play/b0b9z4yl',
|
'url': 'https://www.bbc.co.uk/radio/play/b0b9z4yl',
|
||||||
@@ -827,11 +831,25 @@ class BBCIE(BBCCoUkIE):
|
|||||||
'description': 'Learn English words and phrases from this story',
|
'description': 'Learn English words and phrases from this story',
|
||||||
},
|
},
|
||||||
'add_ie': [BBCCoUkIE.ie_key()],
|
'add_ie': [BBCCoUkIE.ie_key()],
|
||||||
|
}, {
|
||||||
|
# BBC Reel
|
||||||
|
'url': 'https://www.bbc.com/reel/video/p07c6sb6/how-positive-thinking-is-harming-your-happiness',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p07c6sb9',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'How positive thinking is harming your happiness',
|
||||||
|
'alt_title': 'The downsides of positive thinking',
|
||||||
|
'description': 'md5:fad74b31da60d83b8265954ee42d85b4',
|
||||||
|
'duration': 235,
|
||||||
|
'thumbnail': r're:https?://.+/p07c9dsr.jpg',
|
||||||
|
'upload_date': '20190604',
|
||||||
|
'categories': ['Psychology'],
|
||||||
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def suitable(cls, url):
|
def suitable(cls, url):
|
||||||
EXCLUDE_IE = (BBCCoUkIE, BBCCoUkArticleIE, BBCCoUkIPlayerPlaylistIE, BBCCoUkPlaylistIE)
|
EXCLUDE_IE = (BBCCoUkIE, BBCCoUkArticleIE, BBCCoUkIPlayerEpisodesIE, BBCCoUkIPlayerGroupIE, BBCCoUkPlaylistIE)
|
||||||
return (False if any(ie.suitable(url) for ie in EXCLUDE_IE)
|
return (False if any(ie.suitable(url) for ie in EXCLUDE_IE)
|
||||||
else super(BBCIE, cls).suitable(url))
|
else super(BBCIE, cls).suitable(url))
|
||||||
|
|
||||||
@@ -963,7 +981,7 @@ class BBCIE(BBCCoUkIE):
|
|||||||
else:
|
else:
|
||||||
entry['title'] = info['title']
|
entry['title'] = info['title']
|
||||||
entry['formats'].extend(info['formats'])
|
entry['formats'].extend(info['formats'])
|
||||||
except Exception as e:
|
except ExtractorError as e:
|
||||||
# Some playlist URL may fail with 500, at the same time
|
# Some playlist URL may fail with 500, at the same time
|
||||||
# the other one may work fine (e.g.
|
# the other one may work fine (e.g.
|
||||||
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
|
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
|
||||||
@@ -1014,6 +1032,37 @@ class BBCIE(BBCCoUkIE):
|
|||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# bbc reel (e.g. https://www.bbc.com/reel/video/p07c6sb6/how-positive-thinking-is-harming-your-happiness)
|
||||||
|
initial_data = self._parse_json(self._html_search_regex(
|
||||||
|
r'<script[^>]+id=(["\'])initial-data\1[^>]+data-json=(["\'])(?P<json>(?:(?!\2).)+)',
|
||||||
|
webpage, 'initial data', default='{}', group='json'), playlist_id, fatal=False)
|
||||||
|
if initial_data:
|
||||||
|
init_data = try_get(
|
||||||
|
initial_data, lambda x: x['initData']['items'][0], dict) or {}
|
||||||
|
smp_data = init_data.get('smpData') or {}
|
||||||
|
clip_data = try_get(smp_data, lambda x: x['items'][0], dict) or {}
|
||||||
|
version_id = clip_data.get('versionID')
|
||||||
|
if version_id:
|
||||||
|
title = smp_data['title']
|
||||||
|
formats, subtitles = self._download_media_selector(version_id)
|
||||||
|
self._sort_formats(formats)
|
||||||
|
image_url = smp_data.get('holdingImageURL')
|
||||||
|
display_date = init_data.get('displayDate')
|
||||||
|
topic_title = init_data.get('topicTitle')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': version_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats,
|
||||||
|
'alt_title': init_data.get('shortTitle'),
|
||||||
|
'thumbnail': image_url.replace('$recipe', 'raw') if image_url else None,
|
||||||
|
'description': smp_data.get('summary') or init_data.get('shortSummary'),
|
||||||
|
'upload_date': display_date.replace('-', '') if display_date else None,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'duration': int_or_none(clip_data.get('duration')),
|
||||||
|
'categories': [topic_title] if topic_title else None,
|
||||||
|
}
|
||||||
|
|
||||||
# Morph based embed (e.g. http://www.bbc.co.uk/sport/live/olympics/36895975)
|
# Morph based embed (e.g. http://www.bbc.co.uk/sport/live/olympics/36895975)
|
||||||
# There are several setPayload calls may be present but the video
|
# There are several setPayload calls may be present but the video
|
||||||
# seems to be always related to the first one
|
# seems to be always related to the first one
|
||||||
@@ -1075,7 +1124,7 @@ class BBCIE(BBCCoUkIE):
|
|||||||
thumbnail = None
|
thumbnail = None
|
||||||
image_url = current_programme.get('image_url')
|
image_url = current_programme.get('image_url')
|
||||||
if image_url:
|
if image_url:
|
||||||
thumbnail = image_url.replace('{recipe}', '1920x1920')
|
thumbnail = image_url.replace('{recipe}', 'raw')
|
||||||
return {
|
return {
|
||||||
'id': programme_id,
|
'id': programme_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
@@ -1134,9 +1183,16 @@ class BBCIE(BBCCoUkIE):
|
|||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
entries, playlist_id, playlist_title, playlist_description)
|
entries, playlist_id, playlist_title, playlist_description)
|
||||||
|
|
||||||
initial_data = self._parse_json(self._search_regex(
|
initial_data = self._search_regex(
|
||||||
r'window\.__INITIAL_DATA__\s*=\s*({.+?});', webpage,
|
r'window\.__INITIAL_DATA__\s*=\s*("{.+?}")\s*;', webpage,
|
||||||
'preload state', default='{}'), playlist_id, fatal=False)
|
'quoted preload state', default=None)
|
||||||
|
if initial_data is None:
|
||||||
|
initial_data = self._search_regex(
|
||||||
|
r'window\.__INITIAL_DATA__\s*=\s*({.+?})\s*;', webpage,
|
||||||
|
'preload state', default={})
|
||||||
|
else:
|
||||||
|
initial_data = self._parse_json(initial_data or '"{}"', playlist_id, fatal=False)
|
||||||
|
initial_data = self._parse_json(initial_data, playlist_id, fatal=False)
|
||||||
if initial_data:
|
if initial_data:
|
||||||
def parse_media(media):
|
def parse_media(media):
|
||||||
if not media:
|
if not media:
|
||||||
@@ -1148,19 +1204,39 @@ class BBCIE(BBCCoUkIE):
|
|||||||
continue
|
continue
|
||||||
formats, subtitles = self._download_media_selector(item_id)
|
formats, subtitles = self._download_media_selector(item_id)
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
item_desc = None
|
||||||
|
blocks = try_get(media, lambda x: x['summary']['blocks'], list)
|
||||||
|
if blocks:
|
||||||
|
summary = []
|
||||||
|
for block in blocks:
|
||||||
|
text = try_get(block, lambda x: x['model']['text'], compat_str)
|
||||||
|
if text:
|
||||||
|
summary.append(text)
|
||||||
|
if summary:
|
||||||
|
item_desc = '\n\n'.join(summary)
|
||||||
|
item_time = None
|
||||||
|
for meta in try_get(media, lambda x: x['metadata']['items'], list) or []:
|
||||||
|
if try_get(meta, lambda x: x['label']) == 'Published':
|
||||||
|
item_time = unified_timestamp(meta.get('timestamp'))
|
||||||
|
break
|
||||||
entries.append({
|
entries.append({
|
||||||
'id': item_id,
|
'id': item_id,
|
||||||
'title': item_title,
|
'title': item_title,
|
||||||
'thumbnail': item.get('holdingImageUrl'),
|
'thumbnail': item.get('holdingImageUrl'),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
|
'timestamp': item_time,
|
||||||
|
'description': strip_or_none(item_desc),
|
||||||
})
|
})
|
||||||
for resp in (initial_data.get('data') or {}).values():
|
for resp in (initial_data.get('data') or {}).values():
|
||||||
name = resp.get('name')
|
name = resp.get('name')
|
||||||
if name == 'media-experience':
|
if name == 'media-experience':
|
||||||
parse_media(try_get(resp, lambda x: x['data']['initialItem']['mediaItem'], dict))
|
parse_media(try_get(resp, lambda x: x['data']['initialItem']['mediaItem'], dict))
|
||||||
elif name == 'article':
|
elif name == 'article':
|
||||||
for block in (try_get(resp, lambda x: x['data']['blocks'], list) or []):
|
for block in (try_get(resp,
|
||||||
|
(lambda x: x['data']['blocks'],
|
||||||
|
lambda x: x['data']['content']['model']['blocks'],),
|
||||||
|
list) or []):
|
||||||
if block.get('type') != 'media':
|
if block.get('type') != 'media':
|
||||||
continue
|
continue
|
||||||
parse_media(block.get('model'))
|
parse_media(block.get('model'))
|
||||||
@@ -1327,21 +1403,149 @@ class BBCCoUkPlaylistBaseIE(InfoExtractor):
|
|||||||
playlist_id, title, description)
|
playlist_id, title, description)
|
||||||
|
|
||||||
|
|
||||||
class BBCCoUkIPlayerPlaylistIE(BBCCoUkPlaylistBaseIE):
|
class BBCCoUkIPlayerPlaylistBaseIE(InfoExtractor):
|
||||||
IE_NAME = 'bbc.co.uk:iplayer:playlist'
|
_VALID_URL_TMPL = r'https?://(?:www\.)?bbc\.co\.uk/iplayer/%%s/(?P<id>%s)' % BBCCoUkIE._ID_REGEX
|
||||||
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/iplayer/(?:episodes|group)/(?P<id>%s)' % BBCCoUkIE._ID_REGEX
|
|
||||||
_URL_TEMPLATE = 'http://www.bbc.co.uk/iplayer/episode/%s'
|
@staticmethod
|
||||||
_VIDEO_ID_TEMPLATE = r'data-ip-id=["\'](%s)'
|
def _get_default(episode, key, default_key='default'):
|
||||||
|
return try_get(episode, lambda x: x[key][default_key])
|
||||||
|
|
||||||
|
def _get_description(self, data):
|
||||||
|
synopsis = data.get(self._DESCRIPTION_KEY) or {}
|
||||||
|
return dict_get(synopsis, ('large', 'medium', 'small'))
|
||||||
|
|
||||||
|
def _fetch_page(self, programme_id, per_page, series_id, page):
|
||||||
|
elements = self._get_elements(self._call_api(
|
||||||
|
programme_id, per_page, page + 1, series_id))
|
||||||
|
for element in elements:
|
||||||
|
episode = self._get_episode(element)
|
||||||
|
episode_id = episode.get('id')
|
||||||
|
if not episode_id:
|
||||||
|
continue
|
||||||
|
thumbnail = None
|
||||||
|
image = self._get_episode_image(episode)
|
||||||
|
if image:
|
||||||
|
thumbnail = image.replace('{recipe}', 'raw')
|
||||||
|
category = self._get_default(episode, 'labels', 'category')
|
||||||
|
yield {
|
||||||
|
'_type': 'url',
|
||||||
|
'id': episode_id,
|
||||||
|
'title': self._get_episode_field(episode, 'subtitle'),
|
||||||
|
'url': 'https://www.bbc.co.uk/iplayer/episode/' + episode_id,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'description': self._get_description(episode),
|
||||||
|
'categories': [category] if category else None,
|
||||||
|
'series': self._get_episode_field(episode, 'title'),
|
||||||
|
'ie_key': BBCCoUkIE.ie_key(),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
pid = self._match_id(url)
|
||||||
|
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
||||||
|
series_id = qs.get('seriesId', [None])[0]
|
||||||
|
page = qs.get('page', [None])[0]
|
||||||
|
per_page = 36 if page else self._PAGE_SIZE
|
||||||
|
fetch_page = functools.partial(self._fetch_page, pid, per_page, series_id)
|
||||||
|
entries = fetch_page(int(page) - 1) if page else OnDemandPagedList(fetch_page, self._PAGE_SIZE)
|
||||||
|
playlist_data = self._get_playlist_data(self._call_api(pid, 1))
|
||||||
|
return self.playlist_result(
|
||||||
|
entries, pid, self._get_playlist_title(playlist_data),
|
||||||
|
self._get_description(playlist_data))
|
||||||
|
|
||||||
|
|
||||||
|
class BBCCoUkIPlayerEpisodesIE(BBCCoUkIPlayerPlaylistBaseIE):
|
||||||
|
IE_NAME = 'bbc.co.uk:iplayer:episodes'
|
||||||
|
_VALID_URL = BBCCoUkIPlayerPlaylistBaseIE._VALID_URL_TMPL % 'episodes'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.bbc.co.uk/iplayer/episodes/b05rcz9v',
|
'url': 'http://www.bbc.co.uk/iplayer/episodes/b05rcz9v',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'b05rcz9v',
|
'id': 'b05rcz9v',
|
||||||
'title': 'The Disappearance',
|
'title': 'The Disappearance',
|
||||||
'description': 'French thriller serial about a missing teenager.',
|
'description': 'md5:58eb101aee3116bad4da05f91179c0cb',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 6,
|
'playlist_mincount': 8,
|
||||||
'skip': 'This programme is not currently available on BBC iPlayer',
|
|
||||||
}, {
|
}, {
|
||||||
|
# all seasons
|
||||||
|
'url': 'https://www.bbc.co.uk/iplayer/episodes/b094m5t9/doctor-foster',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b094m5t9',
|
||||||
|
'title': 'Doctor Foster',
|
||||||
|
'description': 'md5:5aa9195fad900e8e14b52acd765a9fd6',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 10,
|
||||||
|
}, {
|
||||||
|
# explicit season
|
||||||
|
'url': 'https://www.bbc.co.uk/iplayer/episodes/b094m5t9/doctor-foster?seriesId=b094m6nv',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b094m5t9',
|
||||||
|
'title': 'Doctor Foster',
|
||||||
|
'description': 'md5:5aa9195fad900e8e14b52acd765a9fd6',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 5,
|
||||||
|
}, {
|
||||||
|
# all pages
|
||||||
|
'url': 'https://www.bbc.co.uk/iplayer/episodes/m0004c4v/beechgrove',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'm0004c4v',
|
||||||
|
'title': 'Beechgrove',
|
||||||
|
'description': 'Gardening show that celebrates Scottish horticulture and growing conditions.',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 37,
|
||||||
|
}, {
|
||||||
|
# explicit page
|
||||||
|
'url': 'https://www.bbc.co.uk/iplayer/episodes/m0004c4v/beechgrove?page=2',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'm0004c4v',
|
||||||
|
'title': 'Beechgrove',
|
||||||
|
'description': 'Gardening show that celebrates Scottish horticulture and growing conditions.',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 1,
|
||||||
|
}]
|
||||||
|
_PAGE_SIZE = 100
|
||||||
|
_DESCRIPTION_KEY = 'synopsis'
|
||||||
|
|
||||||
|
def _get_episode_image(self, episode):
|
||||||
|
return self._get_default(episode, 'image')
|
||||||
|
|
||||||
|
def _get_episode_field(self, episode, field):
|
||||||
|
return self._get_default(episode, field)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_elements(data):
|
||||||
|
return data['entities']['results']
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_episode(element):
|
||||||
|
return element.get('episode') or {}
|
||||||
|
|
||||||
|
def _call_api(self, pid, per_page, page=1, series_id=None):
|
||||||
|
variables = {
|
||||||
|
'id': pid,
|
||||||
|
'page': page,
|
||||||
|
'perPage': per_page,
|
||||||
|
}
|
||||||
|
if series_id:
|
||||||
|
variables['sliceId'] = series_id
|
||||||
|
return self._download_json(
|
||||||
|
'https://graph.ibl.api.bbc.co.uk/', pid, headers={
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
}, data=json.dumps({
|
||||||
|
'id': '5692d93d5aac8d796a0305e895e61551',
|
||||||
|
'variables': variables,
|
||||||
|
}).encode('utf-8'))['data']['programme']
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_playlist_data(data):
|
||||||
|
return data
|
||||||
|
|
||||||
|
def _get_playlist_title(self, data):
|
||||||
|
return self._get_default(data, 'title')
|
||||||
|
|
||||||
|
|
||||||
|
class BBCCoUkIPlayerGroupIE(BBCCoUkIPlayerPlaylistBaseIE):
|
||||||
|
IE_NAME = 'bbc.co.uk:iplayer:group'
|
||||||
|
_VALID_URL = BBCCoUkIPlayerPlaylistBaseIE._VALID_URL_TMPL % 'group'
|
||||||
|
_TESTS = [{
|
||||||
# Available for over a year unlike 30 days for most other programmes
|
# Available for over a year unlike 30 days for most other programmes
|
||||||
'url': 'http://www.bbc.co.uk/iplayer/group/p02tcc32',
|
'url': 'http://www.bbc.co.uk/iplayer/group/p02tcc32',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -1350,14 +1554,56 @@ class BBCCoUkIPlayerPlaylistIE(BBCCoUkPlaylistBaseIE):
|
|||||||
'description': 'md5:683e901041b2fe9ba596f2ab04c4dbe7',
|
'description': 'md5:683e901041b2fe9ba596f2ab04c4dbe7',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 10,
|
'playlist_mincount': 10,
|
||||||
|
}, {
|
||||||
|
# all pages
|
||||||
|
'url': 'https://www.bbc.co.uk/iplayer/group/p081d7j7',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p081d7j7',
|
||||||
|
'title': 'Music in Scotland',
|
||||||
|
'description': 'Perfomances in Scotland and programmes featuring Scottish acts.',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 47,
|
||||||
|
}, {
|
||||||
|
# explicit page
|
||||||
|
'url': 'https://www.bbc.co.uk/iplayer/group/p081d7j7?page=2',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p081d7j7',
|
||||||
|
'title': 'Music in Scotland',
|
||||||
|
'description': 'Perfomances in Scotland and programmes featuring Scottish acts.',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 11,
|
||||||
}]
|
}]
|
||||||
|
_PAGE_SIZE = 200
|
||||||
|
_DESCRIPTION_KEY = 'synopses'
|
||||||
|
|
||||||
def _extract_title_and_description(self, webpage):
|
def _get_episode_image(self, episode):
|
||||||
title = self._search_regex(r'<h1>([^<]+)</h1>', webpage, 'title', fatal=False)
|
return self._get_default(episode, 'images', 'standard')
|
||||||
description = self._search_regex(
|
|
||||||
r'<p[^>]+class=(["\'])subtitle\1[^>]*>(?P<value>[^<]+)</p>',
|
def _get_episode_field(self, episode, field):
|
||||||
webpage, 'description', fatal=False, group='value')
|
return episode.get(field)
|
||||||
return title, description
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_elements(data):
|
||||||
|
return data['elements']
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_episode(element):
|
||||||
|
return element
|
||||||
|
|
||||||
|
def _call_api(self, pid, per_page, page=1, series_id=None):
|
||||||
|
return self._download_json(
|
||||||
|
'http://ibl.api.bbc.co.uk/ibl/v1/groups/%s/episodes' % pid,
|
||||||
|
pid, query={
|
||||||
|
'page': page,
|
||||||
|
'per_page': per_page,
|
||||||
|
})['group_episodes']
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_playlist_data(data):
|
||||||
|
return data['group']
|
||||||
|
|
||||||
|
def _get_playlist_title(self, data):
|
||||||
|
return data.get('title')
|
||||||
|
|
||||||
|
|
||||||
class BBCCoUkPlaylistIE(BBCCoUkPlaylistBaseIE):
|
class BBCCoUkPlaylistIE(BBCCoUkPlaylistBaseIE):
|
||||||
|
|||||||
103
youtube_dl/extractor/bfmtv.py
Normal file
103
youtube_dl/extractor/bfmtv.py
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import extract_attributes
|
||||||
|
|
||||||
|
|
||||||
|
class BFMTVBaseIE(InfoExtractor):
|
||||||
|
_VALID_URL_BASE = r'https?://(?:www\.)?bfmtv\.com/'
|
||||||
|
_VALID_URL_TMPL = _VALID_URL_BASE + r'(?:[^/]+/)*[^/?&#]+_%s[A-Z]-(?P<id>\d{12})\.html'
|
||||||
|
_VIDEO_BLOCK_REGEX = r'(<div[^>]+class="video_block"[^>]*>)'
|
||||||
|
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
|
||||||
|
|
||||||
|
def _brightcove_url_result(self, video_id, video_block):
|
||||||
|
account_id = video_block.get('accountid') or '876450612001'
|
||||||
|
player_id = video_block.get('playerid') or 'I2qBTln4u'
|
||||||
|
return self.url_result(
|
||||||
|
self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id),
|
||||||
|
'BrightcoveNew', video_id)
|
||||||
|
|
||||||
|
|
||||||
|
class BFMTVIE(BFMTVBaseIE):
|
||||||
|
IE_NAME = 'bfmtv'
|
||||||
|
_VALID_URL = BFMTVBaseIE._VALID_URL_TMPL % 'V'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.bfmtv.com/politique/emmanuel-macron-l-islam-est-une-religion-qui-vit-une-crise-aujourd-hui-partout-dans-le-monde_VN-202010020146.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '6196747868001',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Emmanuel Macron: "L\'Islam est une religion qui vit une crise aujourd’hui, partout dans le monde"',
|
||||||
|
'description': 'Le Président s\'exprime sur la question du séparatisme depuis les Mureaux, dans les Yvelines.',
|
||||||
|
'uploader_id': '876450610001',
|
||||||
|
'upload_date': '20201002',
|
||||||
|
'timestamp': 1601629620,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
bfmtv_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, bfmtv_id)
|
||||||
|
video_block = extract_attributes(self._search_regex(
|
||||||
|
self._VIDEO_BLOCK_REGEX, webpage, 'video block'))
|
||||||
|
return self._brightcove_url_result(video_block['videoid'], video_block)
|
||||||
|
|
||||||
|
|
||||||
|
class BFMTVLiveIE(BFMTVIE):
|
||||||
|
IE_NAME = 'bfmtv:live'
|
||||||
|
_VALID_URL = BFMTVBaseIE._VALID_URL_BASE + '(?P<id>(?:[^/]+/)?en-direct)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.bfmtv.com/en-direct/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '5615950982001',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': r're:^le direct BFMTV WEB \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
|
||||||
|
'uploader_id': '876450610001',
|
||||||
|
'upload_date': '20171018',
|
||||||
|
'timestamp': 1508329950,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.bfmtv.com/economie/en-direct/',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
|
||||||
|
class BFMTVArticleIE(BFMTVBaseIE):
|
||||||
|
IE_NAME = 'bfmtv:article'
|
||||||
|
_VALID_URL = BFMTVBaseIE._VALID_URL_TMPL % 'A'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.bfmtv.com/sante/covid-19-un-responsable-de-l-institut-pasteur-se-demande-quand-la-france-va-se-reconfiner_AV-202101060198.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '202101060198',
|
||||||
|
'title': 'Covid-19: un responsable de l\'Institut Pasteur se demande "quand la France va se reconfiner"',
|
||||||
|
'description': 'md5:947974089c303d3ac6196670ae262843',
|
||||||
|
},
|
||||||
|
'playlist_count': 2,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.bfmtv.com/international/pour-bolsonaro-le-bresil-est-en-faillite-mais-il-ne-peut-rien-faire_AD-202101060232.html',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.bfmtv.com/sante/covid-19-oui-le-vaccin-de-pfizer-distribue-en-france-a-bien-ete-teste-sur-des-personnes-agees_AN-202101060275.html',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
bfmtv_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, bfmtv_id)
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
for video_block_el in re.findall(self._VIDEO_BLOCK_REGEX, webpage):
|
||||||
|
video_block = extract_attributes(video_block_el)
|
||||||
|
video_id = video_block.get('videoid')
|
||||||
|
if not video_id:
|
||||||
|
continue
|
||||||
|
entries.append(self._brightcove_url_result(video_id, video_block))
|
||||||
|
|
||||||
|
return self.playlist_result(
|
||||||
|
entries, bfmtv_id, self._og_search_title(webpage, fatal=False),
|
||||||
|
self._html_search_meta(['og:description', 'description'], webpage))
|
||||||
30
youtube_dl/extractor/bibeltv.py
Normal file
30
youtube_dl/extractor/bibeltv.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class BibelTVIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?bibeltv\.de/mediathek/videos/(?:crn/)?(?P<id>\d+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.bibeltv.de/mediathek/videos/329703-sprachkurs-in-malaiisch',
|
||||||
|
'md5': '252f908192d611de038b8504b08bf97f',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'ref:329703',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Sprachkurs in Malaiisch',
|
||||||
|
'description': 'md5:3e9f197d29ee164714e67351cf737dfe',
|
||||||
|
'timestamp': 1608316701,
|
||||||
|
'uploader_id': '5840105145001',
|
||||||
|
'upload_date': '20201218',
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.bibeltv.de/mediathek/videos/crn/326374',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/5840105145001/default_default/index.html?videoId=ref:%s'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
crn_id = self._match_id(url)
|
||||||
|
return self.url_result(
|
||||||
|
self.BRIGHTCOVE_URL_TEMPLATE % crn_id, 'BrightcoveNew')
|
||||||
60
youtube_dl/extractor/bigo.py
Normal file
60
youtube_dl/extractor/bigo.py
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import ExtractorError, urlencode_postdata
|
||||||
|
|
||||||
|
|
||||||
|
class BigoIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?bigo\.tv/(?:[a-z]{2,}/)?(?P<id>[^/]+)'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.bigo.tv/ja/221338632',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '6576287577575737440',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '土よ〜💁♂️ 休憩室/REST room',
|
||||||
|
'thumbnail': r're:https?://.+',
|
||||||
|
'uploader': '✨Shin💫',
|
||||||
|
'uploader_id': '221338632',
|
||||||
|
'is_live': True,
|
||||||
|
},
|
||||||
|
'skip': 'livestream',
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.bigo.tv/th/Tarlerm1304',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://bigo.tv/115976881',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
user_id = self._match_id(url)
|
||||||
|
|
||||||
|
info_raw = self._download_json(
|
||||||
|
'https://ta.bigo.tv/official_website/studio/getInternalStudioInfo',
|
||||||
|
user_id, data=urlencode_postdata({'siteId': user_id}))
|
||||||
|
|
||||||
|
if not isinstance(info_raw, dict):
|
||||||
|
raise ExtractorError('Received invalid JSON data')
|
||||||
|
if info_raw.get('code'):
|
||||||
|
raise ExtractorError(
|
||||||
|
'Bigo says: %s (code %s)' % (info_raw.get('msg'), info_raw.get('code')), expected=True)
|
||||||
|
info = info_raw.get('data') or {}
|
||||||
|
|
||||||
|
if not info.get('alive'):
|
||||||
|
raise ExtractorError('This user is offline.', expected=True)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': info.get('roomId') or user_id,
|
||||||
|
'title': info.get('roomTopic') or info.get('nick_name') or user_id,
|
||||||
|
'formats': [{
|
||||||
|
'url': info.get('hls_src'),
|
||||||
|
'ext': 'mp4',
|
||||||
|
'protocol': 'm3u8',
|
||||||
|
}],
|
||||||
|
'thumbnail': info.get('snapshot'),
|
||||||
|
'uploader': info.get('nick_name'),
|
||||||
|
'uploader_id': user_id,
|
||||||
|
'is_live': True,
|
||||||
|
}
|
||||||
@@ -156,6 +156,7 @@ class BiliBiliIE(InfoExtractor):
|
|||||||
cid = js['result']['cid']
|
cid = js['result']['cid']
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
|
'Accept': 'application/json',
|
||||||
'Referer': url
|
'Referer': url
|
||||||
}
|
}
|
||||||
headers.update(self.geo_verification_headers())
|
headers.update(self.geo_verification_headers())
|
||||||
@@ -232,7 +233,7 @@ class BiliBiliIE(InfoExtractor):
|
|||||||
webpage)
|
webpage)
|
||||||
if uploader_mobj:
|
if uploader_mobj:
|
||||||
info.update({
|
info.update({
|
||||||
'uploader': uploader_mobj.group('name'),
|
'uploader': uploader_mobj.group('name').strip(),
|
||||||
'uploader_id': uploader_mobj.group('id'),
|
'uploader_id': uploader_mobj.group('id'),
|
||||||
})
|
})
|
||||||
if not info.get('uploader'):
|
if not info.get('uploader'):
|
||||||
@@ -368,6 +369,11 @@ class BilibiliAudioIE(BilibiliAudioBaseIE):
|
|||||||
'filesize': int_or_none(play_data.get('size')),
|
'filesize': int_or_none(play_data.get('size')),
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
for a_format in formats:
|
||||||
|
a_format.setdefault('http_headers', {}).update({
|
||||||
|
'Referer': url,
|
||||||
|
})
|
||||||
|
|
||||||
song = self._call_api('song/info', au_id)
|
song = self._call_api('song/info', au_id)
|
||||||
title = song['title']
|
title = song['title']
|
||||||
statistic = song.get('statistic') or {}
|
statistic = song.get('statistic') or {}
|
||||||
|
|||||||
@@ -90,13 +90,19 @@ class BleacherReportCMSIE(AMPIE):
|
|||||||
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/video_embed\?id=(?P<id>[0-9a-f-]{36}|\d{5})'
|
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/video_embed\?id=(?P<id>[0-9a-f-]{36}|\d{5})'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://bleacherreport.com/video_embed?id=8fd44c2f-3dc5-4821-9118-2c825a98c0e1&library=video-cms',
|
'url': 'http://bleacherreport.com/video_embed?id=8fd44c2f-3dc5-4821-9118-2c825a98c0e1&library=video-cms',
|
||||||
'md5': '2e4b0a997f9228ffa31fada5c53d1ed1',
|
'md5': '670b2d73f48549da032861130488c681',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '8fd44c2f-3dc5-4821-9118-2c825a98c0e1',
|
'id': '8fd44c2f-3dc5-4821-9118-2c825a98c0e1',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'Cena vs. Rollins Would Expose the Heavyweight Division',
|
'title': 'Cena vs. Rollins Would Expose the Heavyweight Division',
|
||||||
'description': 'md5:984afb4ade2f9c0db35f3267ed88b36e',
|
'description': 'md5:984afb4ade2f9c0db35f3267ed88b36e',
|
||||||
|
'upload_date': '20150723',
|
||||||
|
'timestamp': 1437679032,
|
||||||
|
|
||||||
},
|
},
|
||||||
|
'expected_warnings': [
|
||||||
|
'Unable to download f4m manifest'
|
||||||
|
]
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|||||||
@@ -1,86 +0,0 @@
|
|||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
import json
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
remove_start,
|
|
||||||
int_or_none,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class BlinkxIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'(?:https?://(?:www\.)blinkx\.com/#?ce/|blinkx:)(?P<id>[^?]+)'
|
|
||||||
IE_NAME = 'blinkx'
|
|
||||||
|
|
||||||
_TEST = {
|
|
||||||
'url': 'http://www.blinkx.com/ce/Da0Gw3xc5ucpNduzLuDDlv4WC9PuI4fDi1-t6Y3LyfdY2SZS5Urbvn-UPJvrvbo8LTKTc67Wu2rPKSQDJyZeeORCR8bYkhs8lI7eqddznH2ofh5WEEdjYXnoRtj7ByQwt7atMErmXIeYKPsSDuMAAqJDlQZ-3Ff4HJVeH_s3Gh8oQ',
|
|
||||||
'md5': '337cf7a344663ec79bf93a526a2e06c7',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'Da0Gw3xc',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'No Daily Show for John Oliver; HBO Show Renewed - IGN News',
|
|
||||||
'uploader': 'IGN News',
|
|
||||||
'upload_date': '20150217',
|
|
||||||
'timestamp': 1424215740,
|
|
||||||
'description': 'HBO has renewed Last Week Tonight With John Oliver for two more seasons.',
|
|
||||||
'duration': 47.743333,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
display_id = video_id[:8]
|
|
||||||
|
|
||||||
api_url = ('https://apib4.blinkx.com/api.php?action=play_video&'
|
|
||||||
+ 'video=%s' % video_id)
|
|
||||||
data_json = self._download_webpage(api_url, display_id)
|
|
||||||
data = json.loads(data_json)['api']['results'][0]
|
|
||||||
duration = None
|
|
||||||
thumbnails = []
|
|
||||||
formats = []
|
|
||||||
for m in data['media']:
|
|
||||||
if m['type'] == 'jpg':
|
|
||||||
thumbnails.append({
|
|
||||||
'url': m['link'],
|
|
||||||
'width': int(m['w']),
|
|
||||||
'height': int(m['h']),
|
|
||||||
})
|
|
||||||
elif m['type'] == 'original':
|
|
||||||
duration = float(m['d'])
|
|
||||||
elif m['type'] == 'youtube':
|
|
||||||
yt_id = m['link']
|
|
||||||
self.to_screen('Youtube video detected: %s' % yt_id)
|
|
||||||
return self.url_result(yt_id, 'Youtube', video_id=yt_id)
|
|
||||||
elif m['type'] in ('flv', 'mp4'):
|
|
||||||
vcodec = remove_start(m['vcodec'], 'ff')
|
|
||||||
acodec = remove_start(m['acodec'], 'ff')
|
|
||||||
vbr = int_or_none(m.get('vbr') or m.get('vbitrate'), 1000)
|
|
||||||
abr = int_or_none(m.get('abr') or m.get('abitrate'), 1000)
|
|
||||||
tbr = vbr + abr if vbr and abr else None
|
|
||||||
format_id = '%s-%sk-%s' % (vcodec, tbr, m['w'])
|
|
||||||
formats.append({
|
|
||||||
'format_id': format_id,
|
|
||||||
'url': m['link'],
|
|
||||||
'vcodec': vcodec,
|
|
||||||
'acodec': acodec,
|
|
||||||
'abr': abr,
|
|
||||||
'vbr': vbr,
|
|
||||||
'tbr': tbr,
|
|
||||||
'width': int_or_none(m.get('w')),
|
|
||||||
'height': int_or_none(m.get('h')),
|
|
||||||
})
|
|
||||||
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': display_id,
|
|
||||||
'fullid': video_id,
|
|
||||||
'title': data['title'],
|
|
||||||
'formats': formats,
|
|
||||||
'uploader': data['channel_name'],
|
|
||||||
'timestamp': data['pubdate_epoch'],
|
|
||||||
'description': data.get('description'),
|
|
||||||
'thumbnails': thumbnails,
|
|
||||||
'duration': duration,
|
|
||||||
}
|
|
||||||
60
youtube_dl/extractor/bongacams.py
Normal file
60
youtube_dl/extractor/bongacams.py
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_str
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
try_get,
|
||||||
|
urlencode_postdata,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class BongaCamsIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?P<host>(?:[^/]+\.)?bongacams\d*\.com)/(?P<id>[^/?&#]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://de.bongacams.com/azumi-8',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://cn.bongacams.com/azumi-8',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
host = mobj.group('host')
|
||||||
|
channel_id = mobj.group('id')
|
||||||
|
|
||||||
|
amf = self._download_json(
|
||||||
|
'https://%s/tools/amf.php' % host, channel_id,
|
||||||
|
data=urlencode_postdata((
|
||||||
|
('method', 'getRoomData'),
|
||||||
|
('args[]', channel_id),
|
||||||
|
('args[]', 'false'),
|
||||||
|
)), headers={'X-Requested-With': 'XMLHttpRequest'})
|
||||||
|
|
||||||
|
server_url = amf['localData']['videoServerUrl']
|
||||||
|
|
||||||
|
uploader_id = try_get(
|
||||||
|
amf, lambda x: x['performerData']['username'], compat_str) or channel_id
|
||||||
|
uploader = try_get(
|
||||||
|
amf, lambda x: x['performerData']['displayName'], compat_str)
|
||||||
|
like_count = int_or_none(try_get(
|
||||||
|
amf, lambda x: x['performerData']['loversCount']))
|
||||||
|
|
||||||
|
formats = self._extract_m3u8_formats(
|
||||||
|
'%s/hls/stream_%s/playlist.m3u8' % (server_url, uploader_id),
|
||||||
|
channel_id, 'mp4', m3u8_id='hls', live=True)
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': channel_id,
|
||||||
|
'title': self._live_title(uploader or uploader_id),
|
||||||
|
'uploader': uploader,
|
||||||
|
'uploader_id': uploader_id,
|
||||||
|
'like_count': like_count,
|
||||||
|
'age_limit': 18,
|
||||||
|
'is_live': True,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
@@ -12,7 +12,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class BravoTVIE(AdobePassIE):
|
class BravoTVIE(AdobePassIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?bravotv\.com/(?:[^/]+/)+(?P<id>[^/?#]+)'
|
_VALID_URL = r'https?://(?:www\.)?(?P<req_id>bravotv|oxygen)\.com/(?:[^/]+/)+(?P<id>[^/?#]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.bravotv.com/top-chef/season-16/episode-15/videos/the-top-chef-season-16-winner-is',
|
'url': 'https://www.bravotv.com/top-chef/season-16/episode-15/videos/the-top-chef-season-16-winner-is',
|
||||||
'md5': 'e34684cfea2a96cd2ee1ef3a60909de9',
|
'md5': 'e34684cfea2a96cd2ee1ef3a60909de9',
|
||||||
@@ -28,10 +28,13 @@ class BravoTVIE(AdobePassIE):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'http://www.bravotv.com/below-deck/season-3/ep-14-reunion-part-1',
|
'url': 'http://www.bravotv.com/below-deck/season-3/ep-14-reunion-part-1',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.oxygen.com/in-ice-cold-blood/season-2/episode-16/videos/handling-the-horwitz-house-after-the-murder-season-2',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
site, display_id = re.match(self._VALID_URL, url).groups()
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
settings = self._parse_json(self._search_regex(
|
settings = self._parse_json(self._search_regex(
|
||||||
r'<script[^>]+data-drupal-selector="drupal-settings-json"[^>]*>({.+?})</script>', webpage, 'drupal settings'),
|
r'<script[^>]+data-drupal-selector="drupal-settings-json"[^>]*>({.+?})</script>', webpage, 'drupal settings'),
|
||||||
@@ -53,11 +56,14 @@ class BravoTVIE(AdobePassIE):
|
|||||||
tp_path = release_pid = tve['release_pid']
|
tp_path = release_pid = tve['release_pid']
|
||||||
if tve.get('entitlement') == 'auth':
|
if tve.get('entitlement') == 'auth':
|
||||||
adobe_pass = settings.get('tve_adobe_auth', {})
|
adobe_pass = settings.get('tve_adobe_auth', {})
|
||||||
|
if site == 'bravotv':
|
||||||
|
site = 'bravo'
|
||||||
resource = self._get_mvpd_resource(
|
resource = self._get_mvpd_resource(
|
||||||
adobe_pass.get('adobePassResourceId', 'bravo'),
|
adobe_pass.get('adobePassResourceId') or site,
|
||||||
tve['title'], release_pid, tve.get('rating'))
|
tve['title'], release_pid, tve.get('rating'))
|
||||||
query['auth'] = self._extract_mvpd_auth(
|
query['auth'] = self._extract_mvpd_auth(
|
||||||
url, release_pid, adobe_pass.get('adobePassRequestorId', 'bravo'), resource)
|
url, release_pid,
|
||||||
|
adobe_pass.get('adobePassRequestorId') or site, resource)
|
||||||
else:
|
else:
|
||||||
shared_playlist = settings['ls_playlist']
|
shared_playlist = settings['ls_playlist']
|
||||||
account_pid = shared_playlist['account_pid']
|
account_pid = shared_playlist['account_pid']
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ from ..utils import (
|
|||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
|
try_get,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
UnsupportedError,
|
UnsupportedError,
|
||||||
@@ -470,13 +471,18 @@ class BrightcoveNewIE(AdobePassIE):
|
|||||||
def _parse_brightcove_metadata(self, json_data, video_id, headers={}):
|
def _parse_brightcove_metadata(self, json_data, video_id, headers={}):
|
||||||
title = json_data['name'].strip()
|
title = json_data['name'].strip()
|
||||||
|
|
||||||
|
num_drm_sources = 0
|
||||||
formats = []
|
formats = []
|
||||||
for source in json_data.get('sources', []):
|
sources = json_data.get('sources') or []
|
||||||
|
for source in sources:
|
||||||
container = source.get('container')
|
container = source.get('container')
|
||||||
ext = mimetype2ext(source.get('type'))
|
ext = mimetype2ext(source.get('type'))
|
||||||
src = source.get('src')
|
src = source.get('src')
|
||||||
# https://support.brightcove.com/playback-api-video-fields-reference#key_systems_object
|
# https://support.brightcove.com/playback-api-video-fields-reference#key_systems_object
|
||||||
if ext == 'ism' or container == 'WVM' or source.get('key_systems'):
|
if container == 'WVM' or source.get('key_systems'):
|
||||||
|
num_drm_sources += 1
|
||||||
|
continue
|
||||||
|
elif ext == 'ism':
|
||||||
continue
|
continue
|
||||||
elif ext == 'm3u8' or container == 'M2TS':
|
elif ext == 'm3u8' or container == 'M2TS':
|
||||||
if not src:
|
if not src:
|
||||||
@@ -533,20 +539,15 @@ class BrightcoveNewIE(AdobePassIE):
|
|||||||
'format_id': build_format_id('rtmp'),
|
'format_id': build_format_id('rtmp'),
|
||||||
})
|
})
|
||||||
formats.append(f)
|
formats.append(f)
|
||||||
if not formats:
|
|
||||||
# for sonyliv.com DRM protected videos
|
|
||||||
s3_source_url = json_data.get('custom_fields', {}).get('s3sourceurl')
|
|
||||||
if s3_source_url:
|
|
||||||
formats.append({
|
|
||||||
'url': s3_source_url,
|
|
||||||
'format_id': 'source',
|
|
||||||
})
|
|
||||||
|
|
||||||
errors = json_data.get('errors')
|
if not formats:
|
||||||
if not formats and errors:
|
errors = json_data.get('errors')
|
||||||
error = errors[0]
|
if errors:
|
||||||
raise ExtractorError(
|
error = errors[0]
|
||||||
error.get('message') or error.get('error_subcode') or error['error_code'], expected=True)
|
raise ExtractorError(
|
||||||
|
error.get('message') or error.get('error_subcode') or error['error_code'], expected=True)
|
||||||
|
if sources and num_drm_sources == len(sources):
|
||||||
|
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
@@ -600,24 +601,27 @@ class BrightcoveNewIE(AdobePassIE):
|
|||||||
store_pk = lambda x: self._downloader.cache.store('brightcove', policy_key_id, x)
|
store_pk = lambda x: self._downloader.cache.store('brightcove', policy_key_id, x)
|
||||||
|
|
||||||
def extract_policy_key():
|
def extract_policy_key():
|
||||||
webpage = self._download_webpage(
|
base_url = 'http://players.brightcove.net/%s/%s_%s/' % (account_id, player_id, embed)
|
||||||
'http://players.brightcove.net/%s/%s_%s/index.min.js'
|
config = self._download_json(
|
||||||
% (account_id, player_id, embed), video_id)
|
base_url + 'config.json', video_id, fatal=False) or {}
|
||||||
|
policy_key = try_get(
|
||||||
policy_key = None
|
config, lambda x: x['video_cloud']['policy_key'])
|
||||||
|
|
||||||
catalog = self._search_regex(
|
|
||||||
r'catalog\(({.+?})\);', webpage, 'catalog', default=None)
|
|
||||||
if catalog:
|
|
||||||
catalog = self._parse_json(
|
|
||||||
js_to_json(catalog), video_id, fatal=False)
|
|
||||||
if catalog:
|
|
||||||
policy_key = catalog.get('policyKey')
|
|
||||||
|
|
||||||
if not policy_key:
|
if not policy_key:
|
||||||
policy_key = self._search_regex(
|
webpage = self._download_webpage(
|
||||||
r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
|
base_url + 'index.min.js', video_id)
|
||||||
webpage, 'policy key', group='pk')
|
|
||||||
|
catalog = self._search_regex(
|
||||||
|
r'catalog\(({.+?})\);', webpage, 'catalog', default=None)
|
||||||
|
if catalog:
|
||||||
|
catalog = self._parse_json(
|
||||||
|
js_to_json(catalog), video_id, fatal=False)
|
||||||
|
if catalog:
|
||||||
|
policy_key = catalog.get('policyKey')
|
||||||
|
|
||||||
|
if not policy_key:
|
||||||
|
policy_key = self._search_regex(
|
||||||
|
r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
|
||||||
|
webpage, 'policy key', group='pk')
|
||||||
|
|
||||||
store_pk(policy_key)
|
store_pk(policy_key)
|
||||||
return policy_key
|
return policy_key
|
||||||
|
|||||||
@@ -8,18 +8,20 @@ from .gigya import GigyaBaseIE
|
|||||||
from ..compat import compat_HTTPError
|
from ..compat import compat_HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
strip_or_none,
|
clean_html,
|
||||||
|
extract_attributes,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
get_element_by_class,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
merge_dicts,
|
merge_dicts,
|
||||||
parse_iso8601,
|
|
||||||
str_or_none,
|
str_or_none,
|
||||||
|
strip_or_none,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class CanvasIE(InfoExtractor):
|
class CanvasIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?P<site_id>canvas|een|ketnet|vrt(?:video|nieuws)|sporza)/assets/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?P<site_id>canvas|een|ketnet|vrt(?:video|nieuws)|sporza|dako)/assets/(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
|
'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
|
||||||
'md5': '68993eda72ef62386a15ea2cf3c93107',
|
'md5': '68993eda72ef62386a15ea2cf3c93107',
|
||||||
@@ -37,6 +39,7 @@ class CanvasIE(InfoExtractor):
|
|||||||
'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
|
'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
_GEO_BYPASS = False
|
||||||
_HLS_ENTRY_PROTOCOLS_MAP = {
|
_HLS_ENTRY_PROTOCOLS_MAP = {
|
||||||
'HLS': 'm3u8_native',
|
'HLS': 'm3u8_native',
|
||||||
'HLS_AES': 'm3u8',
|
'HLS_AES': 'm3u8',
|
||||||
@@ -47,29 +50,34 @@ class CanvasIE(InfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
site_id, video_id = mobj.group('site_id'), mobj.group('id')
|
site_id, video_id = mobj.group('site_id'), mobj.group('id')
|
||||||
|
|
||||||
# Old API endpoint, serves more formats but may fail for some videos
|
data = None
|
||||||
data = self._download_json(
|
if site_id != 'vrtvideo':
|
||||||
'https://mediazone.vrt.be/api/v1/%s/assets/%s'
|
# Old API endpoint, serves more formats but may fail for some videos
|
||||||
% (site_id, video_id), video_id, 'Downloading asset JSON',
|
data = self._download_json(
|
||||||
'Unable to download asset JSON', fatal=False)
|
'https://mediazone.vrt.be/api/v1/%s/assets/%s'
|
||||||
|
% (site_id, video_id), video_id, 'Downloading asset JSON',
|
||||||
|
'Unable to download asset JSON', fatal=False)
|
||||||
|
|
||||||
# New API endpoint
|
# New API endpoint
|
||||||
if not data:
|
if not data:
|
||||||
|
headers = self.geo_verification_headers()
|
||||||
|
headers.update({'Content-Type': 'application/json'})
|
||||||
token = self._download_json(
|
token = self._download_json(
|
||||||
'%s/tokens' % self._REST_API_BASE, video_id,
|
'%s/tokens' % self._REST_API_BASE, video_id,
|
||||||
'Downloading token', data=b'',
|
'Downloading token', data=b'', headers=headers)['vrtPlayerToken']
|
||||||
headers={'Content-Type': 'application/json'})['vrtPlayerToken']
|
|
||||||
data = self._download_json(
|
data = self._download_json(
|
||||||
'%s/videos/%s' % (self._REST_API_BASE, video_id),
|
'%s/videos/%s' % (self._REST_API_BASE, video_id),
|
||||||
video_id, 'Downloading video JSON', fatal=False, query={
|
video_id, 'Downloading video JSON', query={
|
||||||
'vrtPlayerToken': token,
|
'vrtPlayerToken': token,
|
||||||
'client': '%s@PROD' % site_id,
|
'client': '%s@PROD' % site_id,
|
||||||
}, expected_status=400)
|
}, expected_status=400)
|
||||||
message = data.get('message')
|
if not data.get('title'):
|
||||||
if message and not data.get('title'):
|
code = data.get('code')
|
||||||
if data.get('code') == 'AUTHENTICATION_REQUIRED':
|
if code == 'AUTHENTICATION_REQUIRED':
|
||||||
self.raise_login_required(message)
|
self.raise_login_required()
|
||||||
raise ExtractorError(message, expected=True)
|
elif code == 'INVALID_LOCATION':
|
||||||
|
self.raise_geo_restricted(countries=['BE'])
|
||||||
|
raise ExtractorError(data.get('message') or code, expected=True)
|
||||||
|
|
||||||
title = data['title']
|
title = data['title']
|
||||||
description = data.get('description')
|
description = data.get('description')
|
||||||
@@ -205,20 +213,24 @@ class CanvasEenIE(InfoExtractor):
|
|||||||
|
|
||||||
class VrtNUIE(GigyaBaseIE):
|
class VrtNUIE(GigyaBaseIE):
|
||||||
IE_DESC = 'VrtNU.be'
|
IE_DESC = 'VrtNU.be'
|
||||||
_VALID_URL = r'https?://(?:www\.)?vrt\.be/(?P<site_id>vrtnu)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:www\.)?vrt\.be/vrtnu/a-z/(?:[^/]+/){2}(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# Available via old API endpoint
|
# Available via old API endpoint
|
||||||
'url': 'https://www.vrt.be/vrtnu/a-z/postbus-x/1/postbus-x-s1a1/',
|
'url': 'https://www.vrt.be/vrtnu/a-z/postbus-x/1989/postbus-x-s1989a1/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'pbs-pub-2e2d8c27-df26-45c9-9dc6-90c78153044d$vid-90c932b1-e21d-4fb8-99b1-db7b49cf74de',
|
'id': 'pbs-pub-e8713dac-899e-41de-9313-81269f4c04ac$vid-90c932b1-e21d-4fb8-99b1-db7b49cf74de',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'De zwarte weduwe',
|
'title': 'Postbus X - Aflevering 1 (Seizoen 1989)',
|
||||||
'description': 'md5:db1227b0f318c849ba5eab1fef895ee4',
|
'description': 'md5:b704f669eb9262da4c55b33d7c6ed4b7',
|
||||||
'duration': 1457.04,
|
'duration': 1457.04,
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
'season': 'Season 1',
|
'series': 'Postbus X',
|
||||||
'season_number': 1,
|
'season': 'Seizoen 1989',
|
||||||
|
'season_number': 1989,
|
||||||
|
'episode': 'De zwarte weduwe',
|
||||||
'episode_number': 1,
|
'episode_number': 1,
|
||||||
|
'timestamp': 1595822400,
|
||||||
|
'upload_date': '20200727',
|
||||||
},
|
},
|
||||||
'skip': 'This video is only available for registered users',
|
'skip': 'This video is only available for registered users',
|
||||||
'params': {
|
'params': {
|
||||||
@@ -300,69 +312,73 @@ class VrtNUIE(GigyaBaseIE):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
|
|
||||||
webpage, urlh = self._download_webpage_handle(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
attrs = extract_attributes(self._search_regex(
|
||||||
|
r'(<nui-media[^>]+>)', webpage, 'media element'))
|
||||||
|
video_id = attrs['videoid']
|
||||||
|
publication_id = attrs.get('publicationid')
|
||||||
|
if publication_id:
|
||||||
|
video_id = publication_id + '$' + video_id
|
||||||
|
|
||||||
|
page = (self._parse_json(self._search_regex(
|
||||||
|
r'digitalData\s*=\s*({.+?});', webpage, 'digial data',
|
||||||
|
default='{}'), video_id, fatal=False) or {}).get('page') or {}
|
||||||
|
|
||||||
info = self._search_json_ld(webpage, display_id, default={})
|
info = self._search_json_ld(webpage, display_id, default={})
|
||||||
|
|
||||||
# title is optional here since it may be extracted by extractor
|
|
||||||
# that is delegated from here
|
|
||||||
title = strip_or_none(self._html_search_regex(
|
|
||||||
r'(?ms)<h1 class="content__heading">(.+?)</h1>',
|
|
||||||
webpage, 'title', default=None))
|
|
||||||
|
|
||||||
description = self._html_search_regex(
|
|
||||||
r'(?ms)<div class="content__description">(.+?)</div>',
|
|
||||||
webpage, 'description', default=None)
|
|
||||||
|
|
||||||
season = self._html_search_regex(
|
|
||||||
[r'''(?xms)<div\ class="tabs__tab\ tabs__tab--active">\s*
|
|
||||||
<span>seizoen\ (.+?)</span>\s*
|
|
||||||
</div>''',
|
|
||||||
r'<option value="seizoen (\d{1,3})" data-href="[^"]+?" selected>'],
|
|
||||||
webpage, 'season', default=None)
|
|
||||||
|
|
||||||
season_number = int_or_none(season)
|
|
||||||
|
|
||||||
episode_number = int_or_none(self._html_search_regex(
|
|
||||||
r'''(?xms)<div\ class="content__episode">\s*
|
|
||||||
<abbr\ title="aflevering">afl</abbr>\s*<span>(\d+)</span>
|
|
||||||
</div>''',
|
|
||||||
webpage, 'episode_number', default=None))
|
|
||||||
|
|
||||||
release_date = parse_iso8601(self._html_search_regex(
|
|
||||||
r'(?ms)<div class="content__broadcastdate">\s*<time\ datetime="(.+?)"',
|
|
||||||
webpage, 'release_date', default=None))
|
|
||||||
|
|
||||||
# If there's a ? or a # in the URL, remove them and everything after
|
|
||||||
clean_url = urlh.geturl().split('?')[0].split('#')[0].strip('/')
|
|
||||||
securevideo_url = clean_url + '.mssecurevideo.json'
|
|
||||||
|
|
||||||
try:
|
|
||||||
video = self._download_json(securevideo_url, display_id)
|
|
||||||
except ExtractorError as e:
|
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
|
||||||
self.raise_login_required()
|
|
||||||
raise
|
|
||||||
|
|
||||||
# We are dealing with a '../<show>.relevant' URL
|
|
||||||
redirect_url = video.get('url')
|
|
||||||
if redirect_url:
|
|
||||||
return self.url_result(self._proto_relative_url(redirect_url, 'https:'))
|
|
||||||
|
|
||||||
# There is only one entry, but with an unknown key, so just get
|
|
||||||
# the first one
|
|
||||||
video_id = list(video.values())[0].get('videoid')
|
|
||||||
|
|
||||||
return merge_dicts(info, {
|
return merge_dicts(info, {
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
'url': 'https://mediazone.vrt.be/api/v1/vrtvideo/assets/%s' % video_id,
|
'url': 'https://mediazone.vrt.be/api/v1/vrtvideo/assets/%s' % video_id,
|
||||||
'ie_key': CanvasIE.ie_key(),
|
'ie_key': CanvasIE.ie_key(),
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
|
'season_number': int_or_none(page.get('episode_season')),
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
class DagelijkseKostIE(InfoExtractor):
|
||||||
|
IE_DESC = 'dagelijksekost.een.be'
|
||||||
|
_VALID_URL = r'https?://dagelijksekost\.een\.be/gerechten/(?P<id>[^/?#&]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://dagelijksekost.een.be/gerechten/hachis-parmentier-met-witloof',
|
||||||
|
'md5': '30bfffc323009a3e5f689bef6efa2365',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'md-ast-27a4d1ff-7d7b-425e-b84f-a4d227f592fa',
|
||||||
|
'display_id': 'hachis-parmentier-met-witloof',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Hachis parmentier met witloof',
|
||||||
|
'description': 'md5:9960478392d87f63567b5b117688cdc5',
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
'duration': 283.02,
|
||||||
|
},
|
||||||
|
'expected_warnings': ['is not a supported codec'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
title = strip_or_none(get_element_by_class(
|
||||||
|
'dish-metadata__title', webpage
|
||||||
|
) or self._html_search_meta(
|
||||||
|
'twitter:title', webpage))
|
||||||
|
|
||||||
|
description = clean_html(get_element_by_class(
|
||||||
|
'dish-description', webpage)
|
||||||
|
) or self._html_search_meta(
|
||||||
|
('description', 'twitter:description', 'og:description'),
|
||||||
|
webpage)
|
||||||
|
|
||||||
|
video_id = self._html_search_regex(
|
||||||
|
r'data-url=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id',
|
||||||
|
group='id')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': 'https://mediazone.vrt.be/api/v1/dako/assets/%s' % video_id,
|
||||||
|
'ie_key': CanvasIE.ie_key(),
|
||||||
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
'season': season,
|
}
|
||||||
'season_number': season_number,
|
|
||||||
'episode_number': episode_number,
|
|
||||||
'release_date': release_date,
|
|
||||||
})
|
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ class CBSBaseIE(ThePlatformFeedIE):
|
|||||||
|
|
||||||
|
|
||||||
class CBSIE(CBSBaseIE):
|
class CBSIE(CBSBaseIE):
|
||||||
_VALID_URL = r'(?:cbs:|https?://(?:www\.)?(?:cbs\.com/shows/[^/]+/video|colbertlateshow\.com/(?:video|podcasts))/)(?P<id>[\w-]+)'
|
_VALID_URL = r'(?:cbs:|https?://(?:www\.)?(?:(?:cbs|paramountplus)\.com/shows/[^/]+/video|colbertlateshow\.com/(?:video|podcasts))/)(?P<id>[\w-]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
|
'url': 'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
|
||||||
@@ -52,6 +52,9 @@ class CBSIE(CBSBaseIE):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'http://www.colbertlateshow.com/podcasts/dYSwjqPs_X1tvbV_P2FcPWRa_qT6akTC/in-the-bad-room-with-stephen/',
|
'url': 'http://www.colbertlateshow.com/podcasts/dYSwjqPs_X1tvbV_P2FcPWRa_qT6akTC/in-the-bad-room-with-stephen/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.paramountplus.com/shows/all-rise/video/QmR1WhNkh1a_IrdHZrbcRklm176X_rVc/all-rise-space/',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _extract_video_info(self, content_id, site='cbs', mpx_acc=2198311517):
|
def _extract_video_info(self, content_id, site='cbs', mpx_acc=2198311517):
|
||||||
|
|||||||
@@ -11,7 +11,47 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class CBSLocalIE(AnvatoIE):
|
class CBSLocalIE(AnvatoIE):
|
||||||
_VALID_URL = r'https?://[a-z]+\.cbslocal\.com/(?:\d+/\d+/\d+|video)/(?P<id>[0-9a-z-]+)'
|
_VALID_URL_BASE = r'https?://[a-z]+\.cbslocal\.com/'
|
||||||
|
_VALID_URL = _VALID_URL_BASE + r'video/(?P<id>\d+)'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://newyork.cbslocal.com/video/3580809-a-very-blue-anniversary/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '3580809',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'A Very Blue Anniversary',
|
||||||
|
'description': 'CBS2’s Cindy Hsu has more.',
|
||||||
|
'thumbnail': 're:^https?://.*',
|
||||||
|
'timestamp': int,
|
||||||
|
'upload_date': r're:^\d{8}$',
|
||||||
|
'uploader': 'CBS',
|
||||||
|
'subtitles': {
|
||||||
|
'en': 'mincount:5',
|
||||||
|
},
|
||||||
|
'categories': [
|
||||||
|
'Stations\\Spoken Word\\WCBSTV',
|
||||||
|
'Syndication\\AOL',
|
||||||
|
'Syndication\\MSN',
|
||||||
|
'Syndication\\NDN',
|
||||||
|
'Syndication\\Yahoo',
|
||||||
|
'Content\\News',
|
||||||
|
'Content\\News\\Local News',
|
||||||
|
],
|
||||||
|
'tags': ['CBS 2 News Weekends', 'Cindy Hsu', 'Blue Man Group'],
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mcp_id = self._match_id(url)
|
||||||
|
return self.url_result(
|
||||||
|
'anvato:anvato_cbslocal_app_web_prod_547f3e49241ef0e5d30c79b2efbca5d92c698f67:' + mcp_id, 'Anvato', mcp_id)
|
||||||
|
|
||||||
|
|
||||||
|
class CBSLocalArticleIE(AnvatoIE):
|
||||||
|
_VALID_URL = CBSLocalIE._VALID_URL_BASE + r'\d+/\d+/\d+/(?P<id>[0-9a-z-]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# Anvato backend
|
# Anvato backend
|
||||||
@@ -52,31 +92,6 @@ class CBSLocalIE(AnvatoIE):
|
|||||||
# m3u8 download
|
# m3u8 download
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
'url': 'http://newyork.cbslocal.com/video/3580809-a-very-blue-anniversary/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '3580809',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'A Very Blue Anniversary',
|
|
||||||
'description': 'CBS2’s Cindy Hsu has more.',
|
|
||||||
'thumbnail': 're:^https?://.*',
|
|
||||||
'timestamp': int,
|
|
||||||
'upload_date': r're:^\d{8}$',
|
|
||||||
'uploader': 'CBS',
|
|
||||||
'subtitles': {
|
|
||||||
'en': 'mincount:5',
|
|
||||||
},
|
|
||||||
'categories': [
|
|
||||||
'Stations\\Spoken Word\\WCBSTV',
|
|
||||||
'Syndication\\AOL',
|
|
||||||
'Syndication\\MSN',
|
|
||||||
'Syndication\\NDN',
|
|
||||||
'Syndication\\Yahoo',
|
|
||||||
'Content\\News',
|
|
||||||
'Content\\News\\Local News',
|
|
||||||
],
|
|
||||||
'tags': ['CBS 2 News Weekends', 'Cindy Hsu', 'Blue Man Group'],
|
|
||||||
},
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ class CBSNewsEmbedIE(CBSIE):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
item = self._parse_json(zlib.decompress(compat_b64decode(
|
item = self._parse_json(zlib.decompress(compat_b64decode(
|
||||||
compat_urllib_parse_unquote(self._match_id(url))),
|
compat_urllib_parse_unquote(self._match_id(url))),
|
||||||
-zlib.MAX_WBITS), None)['video']['items'][0]
|
-zlib.MAX_WBITS).decode('utf-8'), None)['video']['items'][0]
|
||||||
return self._extract_video_info(item['mpxRefId'], 'cbsnews')
|
return self._extract_video_info(item['mpxRefId'], 'cbsnews')
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,38 +1,113 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .cbs import CBSBaseIE
|
import re
|
||||||
|
|
||||||
|
# from .cbs import CBSBaseIE
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
try_get,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class CBSSportsIE(CBSBaseIE):
|
# class CBSSportsEmbedIE(CBSBaseIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?cbssports\.com/[^/]+/(?:video|news)/(?P<id>[^/?#&]+)'
|
class CBSSportsEmbedIE(InfoExtractor):
|
||||||
|
IE_NAME = 'cbssports:embed'
|
||||||
|
_VALID_URL = r'''(?ix)https?://(?:(?:www\.)?cbs|embed\.247)sports\.com/player/embed.+?
|
||||||
|
(?:
|
||||||
|
ids%3D(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})|
|
||||||
|
pcid%3D(?P<pcid>\d+)
|
||||||
|
)'''
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.cbssports.com/nba/video/donovan-mitchell-flashes-star-potential-in-game-2-victory-over-thunder/',
|
'url': 'https://www.cbssports.com/player/embed/?args=player_id%3Db56c03a6-231a-4bbe-9c55-af3c8a8e9636%26ids%3Db56c03a6-231a-4bbe-9c55-af3c8a8e9636%26resizable%3D1%26autoplay%3Dtrue%26domain%3Dcbssports.com%26comp_ads_enabled%3Dfalse%26watchAndRead%3D0%26startTime%3D0%26env%3Dprod',
|
||||||
'info_dict': {
|
'only_matching': True,
|
||||||
'id': '1214315075735',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Donovan Mitchell flashes star potential in Game 2 victory over Thunder',
|
|
||||||
'description': 'md5:df6f48622612c2d6bd2e295ddef58def',
|
|
||||||
'timestamp': 1524111457,
|
|
||||||
'upload_date': '20180419',
|
|
||||||
'uploader': 'CBSI-NEW',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# m3u8 download
|
|
||||||
'skip_download': True,
|
|
||||||
}
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.cbssports.com/nba/news/nba-playoffs-2018-watch-76ers-vs-heat-game-3-series-schedule-tv-channel-online-stream/',
|
'url': 'https://embed.247sports.com/player/embed/?args=%3fplayer_id%3d1827823171591%26channel%3dcollege-football-recruiting%26pcid%3d1827823171591%26width%3d640%26height%3d360%26autoplay%3dTrue%26comp_ads_enabled%3dFalse%26uvpc%3dhttps%253a%252f%252fwww.cbssports.com%252fapi%252fcontent%252fvideo%252fconfig%252f%253fcfg%253duvp_247sports_v4%2526partner%253d247%26uvpc_m%3dhttps%253a%252f%252fwww.cbssports.com%252fapi%252fcontent%252fvideo%252fconfig%252f%253fcfg%253duvp_247sports_m_v4%2526partner_m%253d247_mobile%26utag%3d247sportssite%26resizable%3dTrue',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _extract_video_info(self, filter_query, video_id):
|
# def _extract_video_info(self, filter_query, video_id):
|
||||||
return self._extract_feed_info('dJ5BDC', 'VxxJg8Ymh8sE', filter_query, video_id)
|
# return self._extract_feed_info('dJ5BDC', 'VxxJg8Ymh8sE', filter_query, video_id)
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
uuid, pcid = re.match(self._VALID_URL, url).groups()
|
||||||
|
query = {'id': uuid} if uuid else {'pcid': pcid}
|
||||||
|
video = self._download_json(
|
||||||
|
'https://www.cbssports.com/api/content/video/',
|
||||||
|
uuid or pcid, query=query)[0]
|
||||||
|
video_id = video['id']
|
||||||
|
title = video['title']
|
||||||
|
metadata = video.get('metaData') or {}
|
||||||
|
# return self._extract_video_info('byId=%d' % metadata['mpxOutletId'], video_id)
|
||||||
|
# return self._extract_video_info('byGuid=' + metadata['mpxRefId'], video_id)
|
||||||
|
|
||||||
|
formats = self._extract_m3u8_formats(
|
||||||
|
metadata['files'][0]['url'], video_id, 'mp4',
|
||||||
|
'm3u8_native', m3u8_id='hls', fatal=False)
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
image = video.get('image')
|
||||||
|
thumbnails = None
|
||||||
|
if image:
|
||||||
|
image_path = image.get('path')
|
||||||
|
if image_path:
|
||||||
|
thumbnails = [{
|
||||||
|
'url': image_path,
|
||||||
|
'width': int_or_none(image.get('width')),
|
||||||
|
'height': int_or_none(image.get('height')),
|
||||||
|
'filesize': int_or_none(image.get('size')),
|
||||||
|
}]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats,
|
||||||
|
'thumbnails': thumbnails,
|
||||||
|
'description': video.get('description'),
|
||||||
|
'timestamp': int_or_none(try_get(video, lambda x: x['dateCreated']['epoch'])),
|
||||||
|
'duration': int_or_none(metadata.get('duration')),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class CBSSportsBaseIE(InfoExtractor):
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
video_id = self._search_regex(
|
iframe_url = self._search_regex(
|
||||||
[r'(?:=|%26)pcid%3D(\d+)', r'embedVideo(?:Container)?_(\d+)'],
|
r'<iframe[^>]+(?:data-)?src="(https?://[^/]+/player/embed[^"]+)"',
|
||||||
webpage, 'video id')
|
webpage, 'embed url')
|
||||||
return self._extract_video_info('byId=%s' % video_id, video_id)
|
return self.url_result(iframe_url, CBSSportsEmbedIE.ie_key())
|
||||||
|
|
||||||
|
|
||||||
|
class CBSSportsIE(CBSSportsBaseIE):
|
||||||
|
IE_NAME = 'cbssports'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?cbssports\.com/[^/]+/video/(?P<id>[^/?#&]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.cbssports.com/college-football/video/cover-3-stanford-spring-gleaning/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b56c03a6-231a-4bbe-9c55-af3c8a8e9636',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Cover 3: Stanford Spring Gleaning',
|
||||||
|
'description': 'The Cover 3 crew break down everything you need to know about the Stanford Cardinal this spring.',
|
||||||
|
'timestamp': 1617218398,
|
||||||
|
'upload_date': '20210331',
|
||||||
|
'duration': 502,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
|
||||||
|
class TwentyFourSevenSportsIE(CBSSportsBaseIE):
|
||||||
|
IE_NAME = '247sports'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?247sports\.com/Video/(?:[^/?#&]+-)?(?P<id>\d+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://247sports.com/Video/2021-QB-Jake-Garcia-senior-highlights-through-five-games-10084854/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '4f1265cb-c3b5-44a8-bb1d-1914119a0ccc',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '2021 QB Jake Garcia senior highlights through five games',
|
||||||
|
'description': 'md5:8cb67ebed48e2e6adac1701e0ff6e45b',
|
||||||
|
'timestamp': 1607114223,
|
||||||
|
'upload_date': '20201204',
|
||||||
|
'duration': 208,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|||||||
@@ -1,15 +1,18 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import calendar
|
||||||
|
import datetime
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
|
extract_timezone,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_iso8601,
|
|
||||||
parse_resolution,
|
parse_resolution,
|
||||||
|
try_get,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -24,8 +27,9 @@ class CCMAIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'L\'espot de La Marató de TV3',
|
'title': 'L\'espot de La Marató de TV3',
|
||||||
'description': 'md5:f12987f320e2f6e988e9908e4fe97765',
|
'description': 'md5:f12987f320e2f6e988e9908e4fe97765',
|
||||||
'timestamp': 1470918540,
|
'timestamp': 1478608140,
|
||||||
'upload_date': '20160811',
|
'upload_date': '20161108',
|
||||||
|
'age_limit': 0,
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.ccma.cat/catradio/alacarta/programa/el-consell-de-savis-analitza-el-derbi/audio/943685/',
|
'url': 'http://www.ccma.cat/catradio/alacarta/programa/el-consell-de-savis-analitza-el-derbi/audio/943685/',
|
||||||
@@ -35,8 +39,24 @@ class CCMAIE(InfoExtractor):
|
|||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': 'El Consell de Savis analitza el derbi',
|
'title': 'El Consell de Savis analitza el derbi',
|
||||||
'description': 'md5:e2a3648145f3241cb9c6b4b624033e53',
|
'description': 'md5:e2a3648145f3241cb9c6b4b624033e53',
|
||||||
'upload_date': '20171205',
|
'upload_date': '20170512',
|
||||||
'timestamp': 1512507300,
|
'timestamp': 1494622500,
|
||||||
|
'vcodec': 'none',
|
||||||
|
'categories': ['Esports'],
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.ccma.cat/tv3/alacarta/crims/crims-josep-tallada-lespereu-me-capitol-1/video/6031387/',
|
||||||
|
'md5': 'b43c3d3486f430f3032b5b160d80cbc3',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '6031387',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Crims - Josep Talleda, l\'"Espereu-me" (capítol 1)',
|
||||||
|
'description': 'md5:7cbdafb640da9d0d2c0f62bad1e74e60',
|
||||||
|
'timestamp': 1582577700,
|
||||||
|
'upload_date': '20200224',
|
||||||
|
'subtitles': 'mincount:4',
|
||||||
|
'age_limit': 16,
|
||||||
|
'series': 'Crims',
|
||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@@ -72,17 +92,28 @@ class CCMAIE(InfoExtractor):
|
|||||||
|
|
||||||
informacio = media['informacio']
|
informacio = media['informacio']
|
||||||
title = informacio['titol']
|
title = informacio['titol']
|
||||||
durada = informacio.get('durada', {})
|
durada = informacio.get('durada') or {}
|
||||||
duration = int_or_none(durada.get('milisegons'), 1000) or parse_duration(durada.get('text'))
|
duration = int_or_none(durada.get('milisegons'), 1000) or parse_duration(durada.get('text'))
|
||||||
timestamp = parse_iso8601(informacio.get('data_emissio', {}).get('utc'))
|
tematica = try_get(informacio, lambda x: x['tematica']['text'])
|
||||||
|
|
||||||
|
timestamp = None
|
||||||
|
data_utc = try_get(informacio, lambda x: x['data_emissio']['utc'])
|
||||||
|
try:
|
||||||
|
timezone, data_utc = extract_timezone(data_utc)
|
||||||
|
timestamp = calendar.timegm((datetime.datetime.strptime(
|
||||||
|
data_utc, '%Y-%d-%mT%H:%M:%S') - timezone).timetuple())
|
||||||
|
except TypeError:
|
||||||
|
pass
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
subtitols = media.get('subtitols', {})
|
subtitols = media.get('subtitols') or []
|
||||||
if subtitols:
|
if isinstance(subtitols, dict):
|
||||||
sub_url = subtitols.get('url')
|
subtitols = [subtitols]
|
||||||
|
for st in subtitols:
|
||||||
|
sub_url = st.get('url')
|
||||||
if sub_url:
|
if sub_url:
|
||||||
subtitles.setdefault(
|
subtitles.setdefault(
|
||||||
subtitols.get('iso') or subtitols.get('text') or 'ca', []).append({
|
st.get('iso') or st.get('text') or 'ca', []).append({
|
||||||
'url': sub_url,
|
'url': sub_url,
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -97,6 +128,16 @@ class CCMAIE(InfoExtractor):
|
|||||||
'height': int_or_none(imatges.get('alcada')),
|
'height': int_or_none(imatges.get('alcada')),
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
age_limit = None
|
||||||
|
codi_etic = try_get(informacio, lambda x: x['codi_etic']['id'])
|
||||||
|
if codi_etic:
|
||||||
|
codi_etic_s = codi_etic.split('_')
|
||||||
|
if len(codi_etic_s) == 2:
|
||||||
|
if codi_etic_s[1] == 'TP':
|
||||||
|
age_limit = 0
|
||||||
|
else:
|
||||||
|
age_limit = int_or_none(codi_etic_s[1])
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': media_id,
|
'id': media_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
@@ -106,4 +147,9 @@ class CCMAIE(InfoExtractor):
|
|||||||
'thumbnails': thumbnails,
|
'thumbnails': thumbnails,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
|
'age_limit': age_limit,
|
||||||
|
'alt_title': informacio.get('titol_complet'),
|
||||||
|
'episode_number': int_or_none(informacio.get('capitol')),
|
||||||
|
'categories': [tematica] if tematica else None,
|
||||||
|
'series': informacio.get('programa'),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -95,8 +95,11 @@ class CDAIE(InfoExtractor):
|
|||||||
if 'Ten film jest dostępny dla użytkowników premium' in webpage:
|
if 'Ten film jest dostępny dla użytkowników premium' in webpage:
|
||||||
raise ExtractorError('This video is only available for premium users.', expected=True)
|
raise ExtractorError('This video is only available for premium users.', expected=True)
|
||||||
|
|
||||||
|
if re.search(r'niedostępn[ey] w(?: |\s+)Twoim kraju\s*<', webpage):
|
||||||
|
self.raise_geo_restricted()
|
||||||
|
|
||||||
need_confirm_age = False
|
need_confirm_age = False
|
||||||
if self._html_search_regex(r'(<form[^>]+action="/a/validatebirth")',
|
if self._html_search_regex(r'(<form[^>]+action="[^"]*/a/validatebirth[^"]*")',
|
||||||
webpage, 'birthday validate form', default=None):
|
webpage, 'birthday validate form', default=None):
|
||||||
webpage = self._download_age_confirm_page(
|
webpage = self._download_age_confirm_page(
|
||||||
url, video_id, note='Confirming age')
|
url, video_id, note='Confirming age')
|
||||||
@@ -130,6 +133,8 @@ class CDAIE(InfoExtractor):
|
|||||||
'age_limit': 18 if need_confirm_age else 0,
|
'age_limit': 18 if need_confirm_age else 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info = self._search_json_ld(webpage, video_id, default={})
|
||||||
|
|
||||||
# Source: https://www.cda.pl/js/player.js?t=1606154898
|
# Source: https://www.cda.pl/js/player.js?t=1606154898
|
||||||
def decrypt_file(a):
|
def decrypt_file(a):
|
||||||
for p in ('_XDDD', '_CDA', '_ADC', '_CXD', '_QWE', '_Q5', '_IKSDE'):
|
for p in ('_XDDD', '_CDA', '_ADC', '_CXD', '_QWE', '_Q5', '_IKSDE'):
|
||||||
@@ -194,7 +199,7 @@ class CDAIE(InfoExtractor):
|
|||||||
handler = self._download_webpage
|
handler = self._download_webpage
|
||||||
|
|
||||||
webpage = handler(
|
webpage = handler(
|
||||||
self._BASE_URL + href, video_id,
|
urljoin(self._BASE_URL, href), video_id,
|
||||||
'Downloading %s version information' % resolution, fatal=False)
|
'Downloading %s version information' % resolution, fatal=False)
|
||||||
if not webpage:
|
if not webpage:
|
||||||
# Manually report warning because empty page is returned when
|
# Manually report warning because empty page is returned when
|
||||||
@@ -206,6 +211,4 @@ class CDAIE(InfoExtractor):
|
|||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
info = self._search_json_ld(webpage, video_id, default={})
|
|
||||||
|
|
||||||
return merge_dicts(info_dict, info)
|
return merge_dicts(info_dict, info)
|
||||||
|
|||||||
@@ -96,7 +96,10 @@ class CNNIE(TurnerBaseIE):
|
|||||||
config['data_src'] % path, page_title, {
|
config['data_src'] % path, page_title, {
|
||||||
'default': {
|
'default': {
|
||||||
'media_src': config['media_src'],
|
'media_src': config['media_src'],
|
||||||
}
|
},
|
||||||
|
'f4m': {
|
||||||
|
'host': 'cnn-vh.akamaihd.net',
|
||||||
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,142 +1,51 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .mtv import MTVServicesInfoExtractor
|
from .mtv import MTVServicesInfoExtractor
|
||||||
from .common import InfoExtractor
|
|
||||||
|
|
||||||
|
|
||||||
class ComedyCentralIE(MTVServicesInfoExtractor):
|
class ComedyCentralIE(MTVServicesInfoExtractor):
|
||||||
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
|
_VALID_URL = r'https?://(?:www\.)?cc\.com/(?:episodes|video(?:-clips)?)/(?P<id>[0-9a-z]{6})'
|
||||||
(video-clips|episodes|cc-studios|video-collections|shows(?=/[^/]+/(?!full-episodes)))
|
|
||||||
/(?P<title>.*)'''
|
|
||||||
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
|
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
|
'url': 'http://www.cc.com/video-clips/5ke9v2/the-daily-show-with-trevor-noah-doc-rivers-and-steve-ballmer---the-nba-player-strike',
|
||||||
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
|
'md5': 'b8acb347177c680ff18a292aa2166f80',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
|
'id': '89ccc86e-1b02-4f83-b0c9-1d9592ecd025',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'CC:Stand-Up|August 18, 2013|1|0101|Uncensored - Too Good of a Mother',
|
'title': 'The Daily Show with Trevor Noah|August 28, 2020|25|25149|Doc Rivers and Steve Ballmer - The NBA Player Strike',
|
||||||
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
|
'description': 'md5:5334307c433892b85f4f5e5ac9ef7498',
|
||||||
'timestamp': 1376798400,
|
'timestamp': 1598670000,
|
||||||
'upload_date': '20130818',
|
'upload_date': '20200829',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/interviews/6yx39d/exclusive-rand-paul-extended-interview',
|
'url': 'http://www.cc.com/episodes/pnzzci/drawn-together--american-idol--parody-clip-show-season-3-ep-314',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
|
||||||
|
|
||||||
|
|
||||||
class ComedyCentralFullEpisodesIE(MTVServicesInfoExtractor):
|
|
||||||
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
|
|
||||||
(?:full-episodes|shows(?=/[^/]+/full-episodes))
|
|
||||||
/(?P<id>[^?]+)'''
|
|
||||||
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
|
|
||||||
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://www.cc.com/full-episodes/pv391a/the-daily-show-with-trevor-noah-november-28--2016---ryan-speedo-green-season-22-ep-22028',
|
|
||||||
'info_dict': {
|
|
||||||
'description': 'Donald Trump is accused of exploiting his president-elect status for personal gain, Cuban leader Fidel Castro dies, and Ryan Speedo Green discusses "Sing for Your Life."',
|
|
||||||
'title': 'November 28, 2016 - Ryan Speedo Green',
|
|
||||||
},
|
|
||||||
'playlist_count': 4,
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes',
|
'url': 'https://www.cc.com/video/k3sdvm/the-daily-show-with-jon-stewart-exclusive-the-fourth-estate',
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
playlist_id = self._match_id(url)
|
|
||||||
webpage = self._download_webpage(url, playlist_id)
|
|
||||||
mgid = self._extract_triforce_mgid(webpage, data_zone='t2_lc_promo1')
|
|
||||||
videos_info = self._get_videos_info(mgid)
|
|
||||||
return videos_info
|
|
||||||
|
|
||||||
|
|
||||||
class ToshIE(MTVServicesInfoExtractor):
|
|
||||||
IE_DESC = 'Tosh.0'
|
|
||||||
_VALID_URL = r'^https?://tosh\.cc\.com/video-(?:clips|collections)/[^/]+/(?P<videotitle>[^/?#]+)'
|
|
||||||
_FEED_URL = 'http://tosh.cc.com/feeds/mrss'
|
|
||||||
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://tosh.cc.com/video-clips/68g93d/twitter-users-share-summer-plans',
|
|
||||||
'info_dict': {
|
|
||||||
'description': 'Tosh asked fans to share their summer plans.',
|
|
||||||
'title': 'Twitter Users Share Summer Plans',
|
|
||||||
},
|
|
||||||
'playlist': [{
|
|
||||||
'md5': 'f269e88114c1805bb6d7653fecea9e06',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '90498ec2-ed00-11e0-aca6-0026b9414f30',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Tosh.0|June 9, 2077|2|211|Twitter Users Share Summer Plans',
|
|
||||||
'description': 'Tosh asked fans to share their summer plans.',
|
|
||||||
'thumbnail': r're:^https?://.*\.jpg',
|
|
||||||
# It's really reported to be published on year 2077
|
|
||||||
'upload_date': '20770610',
|
|
||||||
'timestamp': 3390510600,
|
|
||||||
'subtitles': {
|
|
||||||
'en': 'mincount:3',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
}, {
|
|
||||||
'url': 'http://tosh.cc.com/video-collections/x2iz7k/just-plain-foul/m5q4fp',
|
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
|
||||||
class ComedyCentralTVIE(MTVServicesInfoExtractor):
|
class ComedyCentralTVIE(MTVServicesInfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?comedycentral\.tv/(?:staffeln|shows)/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:www\.)?comedycentral\.tv/folgen/(?P<id>[0-9a-z]{6})'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.comedycentral.tv/staffeln/7436-the-mindy-project-staffel-4',
|
'url': 'https://www.comedycentral.tv/folgen/pxdpec/josh-investigates-klimawandel-staffel-1-ep-1',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'local_playlist-f99b626bdfe13568579a',
|
'id': '15907dc3-ec3c-11e8-a442-0e40cf2fc285',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'Episode_the-mindy-project_shows_season-4_episode-3_full-episode_part1',
|
'title': 'Josh Investigates',
|
||||||
|
'description': 'Steht uns das Ende der Welt bevor?',
|
||||||
},
|
},
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.comedycentral.tv/shows/1074-workaholics',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.comedycentral.tv/shows/1727-the-mindy-project/bonus',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
}]
|
||||||
|
_FEED_URL = 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed'
|
||||||
|
_GEO_COUNTRIES = ['DE']
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _get_feed_query(self, uri):
|
||||||
video_id = self._match_id(url)
|
return {
|
||||||
|
'accountOverride': 'intl.mtvi.com',
|
||||||
webpage = self._download_webpage(url, video_id)
|
'arcEp': 'web.cc.tv',
|
||||||
|
'ep': 'b9032c3a',
|
||||||
mrss_url = self._search_regex(
|
'imageEp': 'web.cc.tv',
|
||||||
r'data-mrss=(["\'])(?P<url>(?:(?!\1).)+)\1',
|
'mgid': uri,
|
||||||
webpage, 'mrss url', group='url')
|
|
||||||
|
|
||||||
return self._get_videos_info_from_url(mrss_url, video_id)
|
|
||||||
|
|
||||||
|
|
||||||
class ComedyCentralShortnameIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'^:(?P<id>tds|thedailyshow|theopposition)$'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': ':tds',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': ':thedailyshow',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': ':theopposition',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
shortcut_map = {
|
|
||||||
'tds': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes',
|
|
||||||
'thedailyshow': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes',
|
|
||||||
'theopposition': 'http://www.cc.com/shows/the-opposition-with-jordan-klepper/full-episodes',
|
|
||||||
}
|
}
|
||||||
return self.url_result(shortcut_map[video_id])
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ import math
|
|||||||
|
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_cookiejar_Cookie,
|
compat_cookiejar_Cookie,
|
||||||
compat_cookies,
|
compat_cookies_SimpleCookie,
|
||||||
compat_etree_Element,
|
compat_etree_Element,
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_getpass,
|
compat_getpass,
|
||||||
@@ -230,8 +230,10 @@ class InfoExtractor(object):
|
|||||||
uploader: Full name of the video uploader.
|
uploader: Full name of the video uploader.
|
||||||
license: License name the video is licensed under.
|
license: License name the video is licensed under.
|
||||||
creator: The creator of the video.
|
creator: The creator of the video.
|
||||||
|
release_timestamp: UNIX timestamp of the moment the video was released.
|
||||||
release_date: The date (YYYYMMDD) when the video was released.
|
release_date: The date (YYYYMMDD) when the video was released.
|
||||||
timestamp: UNIX timestamp of the moment the video became available.
|
timestamp: UNIX timestamp of the moment the video became available
|
||||||
|
(uploaded).
|
||||||
upload_date: Video upload date (YYYYMMDD).
|
upload_date: Video upload date (YYYYMMDD).
|
||||||
If not explicitly set, calculated from timestamp.
|
If not explicitly set, calculated from timestamp.
|
||||||
uploader_id: Nickname or id of the video uploader.
|
uploader_id: Nickname or id of the video uploader.
|
||||||
@@ -336,8 +338,8 @@ class InfoExtractor(object):
|
|||||||
object, each element of which is a valid dictionary by this specification.
|
object, each element of which is a valid dictionary by this specification.
|
||||||
|
|
||||||
Additionally, playlists can have "id", "title", "description", "uploader",
|
Additionally, playlists can have "id", "title", "description", "uploader",
|
||||||
"uploader_id", "uploader_url" attributes with the same semantics as videos
|
"uploader_id", "uploader_url", "duration" attributes with the same semantics
|
||||||
(see above).
|
as videos (see above).
|
||||||
|
|
||||||
|
|
||||||
_type "multi_video" indicates that there are multiple videos that
|
_type "multi_video" indicates that there are multiple videos that
|
||||||
@@ -1237,8 +1239,16 @@ class InfoExtractor(object):
|
|||||||
'ViewAction': 'view',
|
'ViewAction': 'view',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def extract_interaction_type(e):
|
||||||
|
interaction_type = e.get('interactionType')
|
||||||
|
if isinstance(interaction_type, dict):
|
||||||
|
interaction_type = interaction_type.get('@type')
|
||||||
|
return str_or_none(interaction_type)
|
||||||
|
|
||||||
def extract_interaction_statistic(e):
|
def extract_interaction_statistic(e):
|
||||||
interaction_statistic = e.get('interactionStatistic')
|
interaction_statistic = e.get('interactionStatistic')
|
||||||
|
if isinstance(interaction_statistic, dict):
|
||||||
|
interaction_statistic = [interaction_statistic]
|
||||||
if not isinstance(interaction_statistic, list):
|
if not isinstance(interaction_statistic, list):
|
||||||
return
|
return
|
||||||
for is_e in interaction_statistic:
|
for is_e in interaction_statistic:
|
||||||
@@ -1246,8 +1256,8 @@ class InfoExtractor(object):
|
|||||||
continue
|
continue
|
||||||
if is_e.get('@type') != 'InteractionCounter':
|
if is_e.get('@type') != 'InteractionCounter':
|
||||||
continue
|
continue
|
||||||
interaction_type = is_e.get('interactionType')
|
interaction_type = extract_interaction_type(is_e)
|
||||||
if not isinstance(interaction_type, compat_str):
|
if not interaction_type:
|
||||||
continue
|
continue
|
||||||
# For interaction count some sites provide string instead of
|
# For interaction count some sites provide string instead of
|
||||||
# an integer (as per spec) with non digit characters (e.g. ",")
|
# an integer (as per spec) with non digit characters (e.g. ",")
|
||||||
@@ -1265,6 +1275,7 @@ class InfoExtractor(object):
|
|||||||
|
|
||||||
def extract_video_object(e):
|
def extract_video_object(e):
|
||||||
assert e['@type'] == 'VideoObject'
|
assert e['@type'] == 'VideoObject'
|
||||||
|
author = e.get('author')
|
||||||
info.update({
|
info.update({
|
||||||
'url': url_or_none(e.get('contentUrl')),
|
'url': url_or_none(e.get('contentUrl')),
|
||||||
'title': unescapeHTML(e.get('name')),
|
'title': unescapeHTML(e.get('name')),
|
||||||
@@ -1272,7 +1283,11 @@ class InfoExtractor(object):
|
|||||||
'thumbnail': url_or_none(e.get('thumbnailUrl') or e.get('thumbnailURL')),
|
'thumbnail': url_or_none(e.get('thumbnailUrl') or e.get('thumbnailURL')),
|
||||||
'duration': parse_duration(e.get('duration')),
|
'duration': parse_duration(e.get('duration')),
|
||||||
'timestamp': unified_timestamp(e.get('uploadDate')),
|
'timestamp': unified_timestamp(e.get('uploadDate')),
|
||||||
'uploader': str_or_none(e.get('author')),
|
# author can be an instance of 'Organization' or 'Person' types.
|
||||||
|
# both types can have 'name' property(inherited from 'Thing' type). [1]
|
||||||
|
# however some websites are using 'Text' type instead.
|
||||||
|
# 1. https://schema.org/VideoObject
|
||||||
|
'uploader': author.get('name') if isinstance(author, dict) else author if isinstance(author, compat_str) else None,
|
||||||
'filesize': float_or_none(e.get('contentSize')),
|
'filesize': float_or_none(e.get('contentSize')),
|
||||||
'tbr': int_or_none(e.get('bitrate')),
|
'tbr': int_or_none(e.get('bitrate')),
|
||||||
'width': int_or_none(e.get('width')),
|
'width': int_or_none(e.get('width')),
|
||||||
@@ -2056,7 +2071,7 @@ class InfoExtractor(object):
|
|||||||
})
|
})
|
||||||
return entries
|
return entries
|
||||||
|
|
||||||
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}, data=None, headers={}, query={}):
|
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
|
||||||
res = self._download_xml_handle(
|
res = self._download_xml_handle(
|
||||||
mpd_url, video_id,
|
mpd_url, video_id,
|
||||||
note=note or 'Downloading MPD manifest',
|
note=note or 'Downloading MPD manifest',
|
||||||
@@ -2070,10 +2085,9 @@ class InfoExtractor(object):
|
|||||||
mpd_base_url = base_url(urlh.geturl())
|
mpd_base_url = base_url(urlh.geturl())
|
||||||
|
|
||||||
return self._parse_mpd_formats(
|
return self._parse_mpd_formats(
|
||||||
mpd_doc, mpd_id=mpd_id, mpd_base_url=mpd_base_url,
|
mpd_doc, mpd_id, mpd_base_url, mpd_url)
|
||||||
formats_dict=formats_dict, mpd_url=mpd_url)
|
|
||||||
|
|
||||||
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
|
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None):
|
||||||
"""
|
"""
|
||||||
Parse formats from MPD manifest.
|
Parse formats from MPD manifest.
|
||||||
References:
|
References:
|
||||||
@@ -2351,15 +2365,7 @@ class InfoExtractor(object):
|
|||||||
else:
|
else:
|
||||||
# Assuming direct URL to unfragmented media.
|
# Assuming direct URL to unfragmented media.
|
||||||
f['url'] = base_url
|
f['url'] = base_url
|
||||||
|
formats.append(f)
|
||||||
# According to [1, 5.3.5.2, Table 7, page 35] @id of Representation
|
|
||||||
# is not necessarily unique within a Period thus formats with
|
|
||||||
# the same `format_id` are quite possible. There are numerous examples
|
|
||||||
# of such manifests (see https://github.com/ytdl-org/youtube-dl/issues/15111,
|
|
||||||
# https://github.com/ytdl-org/youtube-dl/issues/13919)
|
|
||||||
full_info = formats_dict.get(representation_id, {}).copy()
|
|
||||||
full_info.update(f)
|
|
||||||
formats.append(full_info)
|
|
||||||
else:
|
else:
|
||||||
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
|
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
|
||||||
return formats
|
return formats
|
||||||
@@ -2597,6 +2603,13 @@ class InfoExtractor(object):
|
|||||||
return entries
|
return entries
|
||||||
|
|
||||||
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
|
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
|
||||||
|
signed = 'hdnea=' in manifest_url
|
||||||
|
if not signed:
|
||||||
|
# https://learn.akamai.com/en-us/webhelp/media-services-on-demand/stream-packaging-user-guide/GUID-BE6C0F73-1E06-483B-B0EA-57984B91B7F9.html
|
||||||
|
manifest_url = re.sub(
|
||||||
|
r'(?:b=[\d,-]+|(?:__a__|attributes)=off|__b__=\d+)&?',
|
||||||
|
'', manifest_url).strip('?')
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
|
||||||
hdcore_sign = 'hdcore=3.7.0'
|
hdcore_sign = 'hdcore=3.7.0'
|
||||||
@@ -2622,7 +2635,7 @@ class InfoExtractor(object):
|
|||||||
formats.extend(m3u8_formats)
|
formats.extend(m3u8_formats)
|
||||||
|
|
||||||
http_host = hosts.get('http')
|
http_host = hosts.get('http')
|
||||||
if http_host and m3u8_formats and 'hdnea=' not in m3u8_url:
|
if http_host and m3u8_formats and not signed:
|
||||||
REPL_REGEX = r'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
|
REPL_REGEX = r'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
|
||||||
qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
|
qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
|
||||||
qualities_length = len(qualities)
|
qualities_length = len(qualities)
|
||||||
@@ -2888,10 +2901,10 @@ class InfoExtractor(object):
|
|||||||
self._downloader.cookiejar.set_cookie(cookie)
|
self._downloader.cookiejar.set_cookie(cookie)
|
||||||
|
|
||||||
def _get_cookies(self, url):
|
def _get_cookies(self, url):
|
||||||
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
|
""" Return a compat_cookies_SimpleCookie with the cookies for the url """
|
||||||
req = sanitized_Request(url)
|
req = sanitized_Request(url)
|
||||||
self._downloader.cookiejar.add_cookie_header(req)
|
self._downloader.cookiejar.add_cookie_header(req)
|
||||||
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
|
return compat_cookies_SimpleCookie(req.get_header('Cookie'))
|
||||||
|
|
||||||
def _apply_first_set_cookie_header(self, url_handle, cookie):
|
def _apply_first_set_cookie_header(self, url_handle, cookie):
|
||||||
"""
|
"""
|
||||||
|
|||||||
148
youtube_dl/extractor/cpac.py
Normal file
148
youtube_dl/extractor/cpac.py
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_str
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
str_or_none,
|
||||||
|
try_get,
|
||||||
|
unified_timestamp,
|
||||||
|
update_url_query,
|
||||||
|
urljoin,
|
||||||
|
)
|
||||||
|
|
||||||
|
# compat_range
|
||||||
|
try:
|
||||||
|
if callable(xrange):
|
||||||
|
range = xrange
|
||||||
|
except (NameError, TypeError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class CPACIE(InfoExtractor):
|
||||||
|
IE_NAME = 'cpac'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?cpac\.ca/(?P<fr>l-)?episode\?id=(?P<id>[\da-f]{8}(?:-[\da-f]{4}){3}-[\da-f]{12})'
|
||||||
|
_TEST = {
|
||||||
|
# 'url': 'http://www.cpac.ca/en/programs/primetime-politics/episodes/65490909',
|
||||||
|
'url': 'https://www.cpac.ca/episode?id=fc7edcae-4660-47e1-ba61-5b7f29a9db0f',
|
||||||
|
'md5': 'e46ad699caafd7aa6024279f2614e8fa',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'fc7edcae-4660-47e1-ba61-5b7f29a9db0f',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'upload_date': '20220215',
|
||||||
|
'title': 'News Conference to Celebrate National Kindness Week – February 15, 2022',
|
||||||
|
'description': 'md5:466a206abd21f3a6f776cdef290c23fb',
|
||||||
|
'timestamp': 1644901200,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'format': 'bestvideo',
|
||||||
|
'hls_prefer_native': True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
url_lang = 'fr' if '/l-episode?' in url else 'en'
|
||||||
|
|
||||||
|
content = self._download_json(
|
||||||
|
'https://www.cpac.ca/api/1/services/contentModel.json?url=/site/website/episode/index.xml&crafterSite=cpacca&id=' + video_id,
|
||||||
|
video_id)
|
||||||
|
video_url = try_get(content, lambda x: x['page']['details']['videoUrl'], compat_str)
|
||||||
|
formats = []
|
||||||
|
if video_url:
|
||||||
|
content = content['page']
|
||||||
|
title = str_or_none(content['details']['title_%s_t' % (url_lang, )])
|
||||||
|
formats = self._extract_m3u8_formats(video_url, video_id, m3u8_id='hls', ext='mp4')
|
||||||
|
for fmt in formats:
|
||||||
|
# prefer language to match URL
|
||||||
|
fmt_lang = fmt.get('language')
|
||||||
|
if fmt_lang == url_lang:
|
||||||
|
fmt['language_preference'] = 10
|
||||||
|
elif not fmt_lang:
|
||||||
|
fmt['language_preference'] = -1
|
||||||
|
else:
|
||||||
|
fmt['language_preference'] = -10
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
category = str_or_none(content['details']['category_%s_t' % (url_lang, )])
|
||||||
|
|
||||||
|
def is_live(v_type):
|
||||||
|
return (v_type == 'live') if v_type is not None else None
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'formats': formats,
|
||||||
|
'title': title,
|
||||||
|
'description': str_or_none(content['details'].get('description_%s_t' % (url_lang, ))),
|
||||||
|
'timestamp': unified_timestamp(content['details'].get('liveDateTime')),
|
||||||
|
'category': [category] if category else None,
|
||||||
|
'thumbnail': urljoin(url, str_or_none(content['details'].get('image_%s_s' % (url_lang, )))),
|
||||||
|
'is_live': is_live(content['details'].get('type')),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class CPACPlaylistIE(InfoExtractor):
|
||||||
|
IE_NAME = 'cpac:playlist'
|
||||||
|
_VALID_URL = r'(?i)https?://(?:www\.)?cpac\.ca/(?:program|search|(?P<fr>emission|rechercher))\?(?:[^&]+&)*?(?P<id>(?:id=\d+|programId=\d+|key=[^&]+))'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.cpac.ca/program?id=6',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'id=6',
|
||||||
|
'title': 'Headline Politics',
|
||||||
|
'description': 'Watch CPAC’s signature long-form coverage of the day’s pressing political events as they unfold.',
|
||||||
|
},
|
||||||
|
'playlist_count': 10,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.cpac.ca/search?key=hudson&type=all&order=desc',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'key=hudson',
|
||||||
|
'title': 'hudson',
|
||||||
|
},
|
||||||
|
'playlist_count': 22,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.cpac.ca/search?programId=50',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'programId=50',
|
||||||
|
'title': '50',
|
||||||
|
},
|
||||||
|
'playlist_count': 9,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.cpac.ca/emission?id=6',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.cpac.ca/rechercher?key=hudson&type=all&order=desc',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
url_lang = 'fr' if any(x in url for x in ('/emission?', '/rechercher?')) else 'en'
|
||||||
|
pl_type, list_type = ('program', 'itemList') if any(x in url for x in ('/program?', '/emission?')) else ('search', 'searchResult')
|
||||||
|
api_url = (
|
||||||
|
'https://www.cpac.ca/api/1/services/contentModel.json?url=/site/website/%s/index.xml&crafterSite=cpacca&%s'
|
||||||
|
% (pl_type, video_id, ))
|
||||||
|
content = self._download_json(api_url, video_id)
|
||||||
|
entries = []
|
||||||
|
total_pages = int_or_none(try_get(content, lambda x: x['page'][list_type]['totalPages']), default=1)
|
||||||
|
for page in range(1, total_pages + 1):
|
||||||
|
if page > 1:
|
||||||
|
api_url = update_url_query(api_url, {'page': '%d' % (page, ), })
|
||||||
|
content = self._download_json(
|
||||||
|
api_url, video_id,
|
||||||
|
note='Downloading continuation - %d' % (page, ),
|
||||||
|
fatal=False)
|
||||||
|
|
||||||
|
for item in try_get(content, lambda x: x['page'][list_type]['item'], list) or []:
|
||||||
|
episode_url = urljoin(url, try_get(item, lambda x: x['url_%s_s' % (url_lang, )]))
|
||||||
|
if episode_url:
|
||||||
|
entries.append(episode_url)
|
||||||
|
|
||||||
|
return self.playlist_result(
|
||||||
|
(self.url_result(entry) for entry in entries),
|
||||||
|
playlist_id=video_id,
|
||||||
|
playlist_title=try_get(content, lambda x: x['page']['program']['title_%s_t' % (url_lang, )]) or video_id.split('=')[-1],
|
||||||
|
playlist_description=try_get(content, lambda x: x['page']['program']['description_%s_t' % (url_lang, )]),
|
||||||
|
)
|
||||||
@@ -8,11 +8,14 @@ from ..utils import (
|
|||||||
ExtractorError,
|
ExtractorError,
|
||||||
extract_attributes,
|
extract_attributes,
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
|
get_element_by_attribute,
|
||||||
get_element_by_class,
|
get_element_by_class,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
merge_dicts,
|
merge_dicts,
|
||||||
|
parse_iso8601,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
|
str_to_int,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
)
|
)
|
||||||
from .senateisvp import SenateISVPIE
|
from .senateisvp import SenateISVPIE
|
||||||
@@ -116,8 +119,30 @@ class CSpanIE(InfoExtractor):
|
|||||||
jwsetup, video_id, require_title=False, m3u8_id='hls',
|
jwsetup, video_id, require_title=False, m3u8_id='hls',
|
||||||
base_url=url)
|
base_url=url)
|
||||||
add_referer(info['formats'])
|
add_referer(info['formats'])
|
||||||
|
for subtitles in info['subtitles'].values():
|
||||||
|
for subtitle in subtitles:
|
||||||
|
ext = determine_ext(subtitle['url'])
|
||||||
|
if ext == 'php':
|
||||||
|
ext = 'vtt'
|
||||||
|
subtitle['ext'] = ext
|
||||||
ld_info = self._search_json_ld(webpage, video_id, default={})
|
ld_info = self._search_json_ld(webpage, video_id, default={})
|
||||||
return merge_dicts(info, ld_info)
|
title = get_element_by_class('video-page-title', webpage) or \
|
||||||
|
self._og_search_title(webpage)
|
||||||
|
description = get_element_by_attribute('itemprop', 'description', webpage) or \
|
||||||
|
self._html_search_meta(['og:description', 'description'], webpage)
|
||||||
|
return merge_dicts(info, ld_info, {
|
||||||
|
'title': title,
|
||||||
|
'thumbnail': get_element_by_attribute('itemprop', 'thumbnailUrl', webpage),
|
||||||
|
'description': description,
|
||||||
|
'timestamp': parse_iso8601(get_element_by_attribute('itemprop', 'uploadDate', webpage)),
|
||||||
|
'location': get_element_by_attribute('itemprop', 'contentLocation', webpage),
|
||||||
|
'duration': int_or_none(self._search_regex(
|
||||||
|
r'jwsetup\.seclength\s*=\s*(\d+);',
|
||||||
|
webpage, 'duration', fatal=False)),
|
||||||
|
'view_count': str_to_int(self._search_regex(
|
||||||
|
r"<span[^>]+class='views'[^>]*>([\d,]+)\s+Views</span>",
|
||||||
|
webpage, 'views', fatal=False)),
|
||||||
|
})
|
||||||
|
|
||||||
# Obsolete
|
# Obsolete
|
||||||
# We first look for clipid, because clipprog always appears before
|
# We first look for clipid, because clipprog always appears before
|
||||||
|
|||||||
52
youtube_dl/extractor/ctv.py
Normal file
52
youtube_dl/extractor/ctv.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class CTVIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?ctv\.ca/(?P<id>(?:show|movie)s/[^/]+/[^/?#&]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.ctv.ca/shows/your-morning/wednesday-december-23-2020-s5e88',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '2102249',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Wednesday, December 23, 2020',
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
'description': 'Your Morning delivers original perspectives and unique insights into the headlines of the day.',
|
||||||
|
'timestamp': 1608732000,
|
||||||
|
'upload_date': '20201223',
|
||||||
|
'series': 'Your Morning',
|
||||||
|
'season': '2020-2021',
|
||||||
|
'season_number': 5,
|
||||||
|
'episode_number': 88,
|
||||||
|
'tags': ['Your Morning'],
|
||||||
|
'categories': ['Talk Show'],
|
||||||
|
'duration': 7467.126,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.ctv.ca/movies/adam-sandlers-eight-crazy-nights/adam-sandlers-eight-crazy-nights',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
content = self._download_json(
|
||||||
|
'https://www.ctv.ca/space-graphql/graphql', display_id, query={
|
||||||
|
'query': '''{
|
||||||
|
resolvedPath(path: "/%s") {
|
||||||
|
lastSegment {
|
||||||
|
content {
|
||||||
|
... on AxisContent {
|
||||||
|
axisId
|
||||||
|
videoPlayerDestCode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}''' % display_id,
|
||||||
|
})['data']['resolvedPath']['lastSegment']['content']
|
||||||
|
video_id = content['axisId']
|
||||||
|
return self.url_result(
|
||||||
|
'9c9media:%s:%s' % (content['videoPlayerDestCode'], video_id),
|
||||||
|
'NineCNineMedia', video_id)
|
||||||
@@ -25,12 +25,12 @@ class CuriosityStreamBaseIE(InfoExtractor):
|
|||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'%s said: %s' % (self.IE_NAME, error), expected=True)
|
'%s said: %s' % (self.IE_NAME, error), expected=True)
|
||||||
|
|
||||||
def _call_api(self, path, video_id):
|
def _call_api(self, path, video_id, query=None):
|
||||||
headers = {}
|
headers = {}
|
||||||
if self._auth_token:
|
if self._auth_token:
|
||||||
headers['X-Auth-Token'] = self._auth_token
|
headers['X-Auth-Token'] = self._auth_token
|
||||||
result = self._download_json(
|
result = self._download_json(
|
||||||
self._API_BASE_URL + path, video_id, headers=headers)
|
self._API_BASE_URL + path, video_id, headers=headers, query=query)
|
||||||
self._handle_errors(result)
|
self._handle_errors(result)
|
||||||
return result['data']
|
return result['data']
|
||||||
|
|
||||||
@@ -52,62 +52,75 @@ class CuriosityStreamIE(CuriosityStreamBaseIE):
|
|||||||
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/video/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/video/(?P<id>\d+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'https://app.curiositystream.com/video/2',
|
'url': 'https://app.curiositystream.com/video/2',
|
||||||
'md5': '262bb2f257ff301115f1973540de8983',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2',
|
'id': '2',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'How Did You Develop The Internet?',
|
'title': 'How Did You Develop The Internet?',
|
||||||
'description': 'Vint Cerf, Google\'s Chief Internet Evangelist, describes how he and Bob Kahn created the internet.',
|
'description': 'Vint Cerf, Google\'s Chief Internet Evangelist, describes how he and Bob Kahn created the internet.',
|
||||||
}
|
},
|
||||||
|
'params': {
|
||||||
|
'format': 'bestvideo',
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
media = self._call_api('media/' + video_id, video_id)
|
|
||||||
title = media['title']
|
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for encoding in media.get('encodings', []):
|
for encoding_format in ('m3u8', 'mpd'):
|
||||||
m3u8_url = encoding.get('master_playlist_url')
|
media = self._call_api('media/' + video_id, video_id, query={
|
||||||
if m3u8_url:
|
'encodingsNew': 'true',
|
||||||
formats.extend(self._extract_m3u8_formats(
|
'encodingsFormat': encoding_format,
|
||||||
m3u8_url, video_id, 'mp4', 'm3u8_native',
|
})
|
||||||
m3u8_id='hls', fatal=False))
|
for encoding in media.get('encodings', []):
|
||||||
encoding_url = encoding.get('url')
|
playlist_url = encoding.get('master_playlist_url')
|
||||||
file_url = encoding.get('file_url')
|
if encoding_format == 'm3u8':
|
||||||
if not encoding_url and not file_url:
|
# use `m3u8` entry_protocol until EXT-X-MAP is properly supported by `m3u8_native` entry_protocol
|
||||||
continue
|
formats.extend(self._extract_m3u8_formats(
|
||||||
f = {
|
playlist_url, video_id, 'mp4',
|
||||||
'width': int_or_none(encoding.get('width')),
|
m3u8_id='hls', fatal=False))
|
||||||
'height': int_or_none(encoding.get('height')),
|
elif encoding_format == 'mpd':
|
||||||
'vbr': int_or_none(encoding.get('video_bitrate')),
|
formats.extend(self._extract_mpd_formats(
|
||||||
'abr': int_or_none(encoding.get('audio_bitrate')),
|
playlist_url, video_id, mpd_id='dash', fatal=False))
|
||||||
'filesize': int_or_none(encoding.get('size_in_bytes')),
|
encoding_url = encoding.get('url')
|
||||||
'vcodec': encoding.get('video_codec'),
|
file_url = encoding.get('file_url')
|
||||||
'acodec': encoding.get('audio_codec'),
|
if not encoding_url and not file_url:
|
||||||
'container': encoding.get('container_type'),
|
|
||||||
}
|
|
||||||
for f_url in (encoding_url, file_url):
|
|
||||||
if not f_url:
|
|
||||||
continue
|
continue
|
||||||
fmt = f.copy()
|
f = {
|
||||||
rtmp = re.search(r'^(?P<url>rtmpe?://(?P<host>[^/]+)/(?P<app>.+))/(?P<playpath>mp[34]:.+)$', f_url)
|
'width': int_or_none(encoding.get('width')),
|
||||||
if rtmp:
|
'height': int_or_none(encoding.get('height')),
|
||||||
fmt.update({
|
'vbr': int_or_none(encoding.get('video_bitrate')),
|
||||||
'url': rtmp.group('url'),
|
'abr': int_or_none(encoding.get('audio_bitrate')),
|
||||||
'play_path': rtmp.group('playpath'),
|
'filesize': int_or_none(encoding.get('size_in_bytes')),
|
||||||
'app': rtmp.group('app'),
|
'vcodec': encoding.get('video_codec'),
|
||||||
'ext': 'flv',
|
'acodec': encoding.get('audio_codec'),
|
||||||
'format_id': 'rtmp',
|
'container': encoding.get('container_type'),
|
||||||
})
|
}
|
||||||
else:
|
for f_url in (encoding_url, file_url):
|
||||||
fmt.update({
|
if not f_url:
|
||||||
'url': f_url,
|
continue
|
||||||
'format_id': 'http',
|
fmt = f.copy()
|
||||||
})
|
rtmp = re.search(r'^(?P<url>rtmpe?://(?P<host>[^/]+)/(?P<app>.+))/(?P<playpath>mp[34]:.+)$', f_url)
|
||||||
formats.append(fmt)
|
if rtmp:
|
||||||
|
fmt.update({
|
||||||
|
'url': rtmp.group('url'),
|
||||||
|
'play_path': rtmp.group('playpath'),
|
||||||
|
'app': rtmp.group('app'),
|
||||||
|
'ext': 'flv',
|
||||||
|
'format_id': 'rtmp',
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
fmt.update({
|
||||||
|
'url': f_url,
|
||||||
|
'format_id': 'http',
|
||||||
|
})
|
||||||
|
formats.append(fmt)
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
title = media['title']
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
for closed_caption in media.get('closed_captions', []):
|
for closed_caption in media.get('closed_captions', []):
|
||||||
sub_url = closed_caption.get('file')
|
sub_url = closed_caption.get('file')
|
||||||
@@ -132,7 +145,7 @@ class CuriosityStreamIE(CuriosityStreamBaseIE):
|
|||||||
|
|
||||||
class CuriosityStreamCollectionIE(CuriosityStreamBaseIE):
|
class CuriosityStreamCollectionIE(CuriosityStreamBaseIE):
|
||||||
IE_NAME = 'curiositystream:collection'
|
IE_NAME = 'curiositystream:collection'
|
||||||
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/(?:collection|series)/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/(?:collections?|series)/(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://app.curiositystream.com/collection/2',
|
'url': 'https://app.curiositystream.com/collection/2',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -140,10 +153,13 @@ class CuriosityStreamCollectionIE(CuriosityStreamBaseIE):
|
|||||||
'title': 'Curious Minds: The Internet',
|
'title': 'Curious Minds: The Internet',
|
||||||
'description': 'How is the internet shaping our lives in the 21st Century?',
|
'description': 'How is the internet shaping our lives in the 21st Century?',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 17,
|
'playlist_mincount': 16,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://curiositystream.com/series/2',
|
'url': 'https://curiositystream.com/series/2',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://curiositystream.com/collections/36',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|||||||
@@ -32,6 +32,18 @@ class DigitallySpeakingIE(InfoExtractor):
|
|||||||
# From http://www.gdcvault.com/play/1013700/Advanced-Material
|
# From http://www.gdcvault.com/play/1013700/Advanced-Material
|
||||||
'url': 'http://sevt.dispeak.com/ubm/gdc/eur10/xml/11256_1282118587281VNIT.xml',
|
'url': 'http://sevt.dispeak.com/ubm/gdc/eur10/xml/11256_1282118587281VNIT.xml',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# From https://gdcvault.com/play/1016624, empty speakerVideo
|
||||||
|
'url': 'https://sevt.dispeak.com/ubm/gdc/online12/xml/201210-822101_1349794556671DDDD.xml',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '201210-822101_1349794556671DDDD',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Pre-launch - Preparing to Take the Plunge',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# From http://www.gdcvault.com/play/1014846/Conference-Keynote-Shigeru, empty slideVideo
|
||||||
|
'url': 'http://events.digitallyspeaking.com/gdc/project25/xml/p25-miyamoto1999_1282467389849HSVB.xml',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _parse_mp4(self, metadata):
|
def _parse_mp4(self, metadata):
|
||||||
@@ -84,26 +96,20 @@ class DigitallySpeakingIE(InfoExtractor):
|
|||||||
'vcodec': 'none',
|
'vcodec': 'none',
|
||||||
'format_id': audio.get('code'),
|
'format_id': audio.get('code'),
|
||||||
})
|
})
|
||||||
slide_video_path = xpath_text(metadata, './slideVideo', fatal=True)
|
for video_key, format_id, preference in (
|
||||||
formats.append({
|
('slide', 'slides', -2), ('speaker', 'speaker', -1)):
|
||||||
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
|
video_path = xpath_text(metadata, './%sVideo' % video_key)
|
||||||
'play_path': remove_end(slide_video_path, '.flv'),
|
if not video_path:
|
||||||
'ext': 'flv',
|
continue
|
||||||
'format_note': 'slide deck video',
|
formats.append({
|
||||||
'quality': -2,
|
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
|
||||||
'preference': -2,
|
'play_path': remove_end(video_path, '.flv'),
|
||||||
'format_id': 'slides',
|
'ext': 'flv',
|
||||||
})
|
'format_note': '%s video' % video_key,
|
||||||
speaker_video_path = xpath_text(metadata, './speakerVideo', fatal=True)
|
'quality': preference,
|
||||||
formats.append({
|
'preference': preference,
|
||||||
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
|
'format_id': format_id,
|
||||||
'play_path': remove_end(speaker_video_path, '.flv'),
|
})
|
||||||
'ext': 'flv',
|
|
||||||
'format_note': 'speaker video',
|
|
||||||
'quality': -1,
|
|
||||||
'preference': -1,
|
|
||||||
'format_id': 'speaker',
|
|
||||||
})
|
|
||||||
return formats
|
return formats
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@@ -10,16 +11,23 @@ from ..utils import (
|
|||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
strip_or_none,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class DPlayIE(InfoExtractor):
|
class DPlayIE(InfoExtractor):
|
||||||
|
_PATH_REGEX = r'/(?P<id>[^/]+/[^/?#]+)'
|
||||||
_VALID_URL = r'''(?x)https?://
|
_VALID_URL = r'''(?x)https?://
|
||||||
(?P<domain>
|
(?P<domain>
|
||||||
(?:www\.)?(?P<host>dplay\.(?P<country>dk|fi|jp|se|no))|
|
(?:www\.)?(?P<host>d
|
||||||
|
(?:
|
||||||
|
play\.(?P<country>dk|fi|jp|se|no)|
|
||||||
|
iscoveryplus\.(?P<plus_country>dk|es|fi|it|se|no)
|
||||||
|
)
|
||||||
|
)|
|
||||||
(?P<subdomain_country>es|it)\.dplay\.com
|
(?P<subdomain_country>es|it)\.dplay\.com
|
||||||
)/[^/]+/(?P<id>[^/]+/[^/?#]+)'''
|
)/[^/]+''' + _PATH_REGEX
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# non geo restricted, via secure api, unsigned download hls URL
|
# non geo restricted, via secure api, unsigned download hls URL
|
||||||
@@ -126,58 +134,99 @@ class DPlayIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'https://www.dplay.jp/video/gold-rush/24086',
|
'url': 'https://www.dplay.jp/video/gold-rush/24086',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.discoveryplus.se/videos/nugammalt-77-handelser-som-format-sverige/nugammalt-77-handelser-som-format-sverige-101',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.discoveryplus.dk/videoer/ted-bundy-mind-of-a-monster/ted-bundy-mind-of-a-monster',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.discoveryplus.no/videoer/i-kongens-klr/sesong-1-episode-7',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.discoveryplus.it/videos/biografie-imbarazzanti/luigi-di-maio-la-psicosi-di-stanislawskij',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.discoveryplus.es/videos/la-fiebre-del-oro/temporada-8-episodio-1',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.discoveryplus.fi/videot/shifting-gears-with-aaron-kaufman/episode-16',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
def _process_errors(self, e, geo_countries):
|
||||||
|
info = self._parse_json(e.cause.read().decode('utf-8'), None)
|
||||||
|
error = info['errors'][0]
|
||||||
|
error_code = error.get('code')
|
||||||
|
if error_code == 'access.denied.geoblocked':
|
||||||
|
self.raise_geo_restricted(countries=geo_countries)
|
||||||
|
elif error_code in ('access.denied.missingpackage', 'invalid.token'):
|
||||||
|
raise ExtractorError(
|
||||||
|
'This video is only available for registered users. You may want to use --cookies.', expected=True)
|
||||||
|
raise ExtractorError(info['errors'][0]['detail'], expected=True)
|
||||||
|
|
||||||
|
def _update_disco_api_headers(self, headers, disco_base, display_id, realm):
|
||||||
|
headers['Authorization'] = 'Bearer ' + self._download_json(
|
||||||
|
disco_base + 'token', display_id, 'Downloading token',
|
||||||
|
query={
|
||||||
|
'realm': realm,
|
||||||
|
})['data']['attributes']['token']
|
||||||
|
|
||||||
|
def _download_video_playback_info(self, disco_base, video_id, headers):
|
||||||
|
streaming = self._download_json(
|
||||||
|
disco_base + 'playback/videoPlaybackInfo/' + video_id,
|
||||||
|
video_id, headers=headers)['data']['attributes']['streaming']
|
||||||
|
streaming_list = []
|
||||||
|
for format_id, format_dict in streaming.items():
|
||||||
|
streaming_list.append({
|
||||||
|
'type': format_id,
|
||||||
|
'url': format_dict.get('url'),
|
||||||
|
})
|
||||||
|
return streaming_list
|
||||||
|
|
||||||
def _get_disco_api_info(self, url, display_id, disco_host, realm, country):
|
def _get_disco_api_info(self, url, display_id, disco_host, realm, country):
|
||||||
geo_countries = [country.upper()]
|
geo_countries = [country.upper()]
|
||||||
self._initialize_geo_bypass({
|
self._initialize_geo_bypass({
|
||||||
'countries': geo_countries,
|
'countries': geo_countries,
|
||||||
})
|
})
|
||||||
disco_base = 'https://%s/' % disco_host
|
disco_base = 'https://%s/' % disco_host
|
||||||
token = self._download_json(
|
|
||||||
disco_base + 'token', display_id, 'Downloading token',
|
|
||||||
query={
|
|
||||||
'realm': realm,
|
|
||||||
})['data']['attributes']['token']
|
|
||||||
headers = {
|
headers = {
|
||||||
'Referer': url,
|
'Referer': url,
|
||||||
'Authorization': 'Bearer ' + token,
|
|
||||||
}
|
}
|
||||||
video = self._download_json(
|
self._update_disco_api_headers(headers, disco_base, display_id, realm)
|
||||||
disco_base + 'content/videos/' + display_id, display_id,
|
try:
|
||||||
headers=headers, query={
|
video = self._download_json(
|
||||||
'fields[channel]': 'name',
|
disco_base + 'content/videos/' + display_id, display_id,
|
||||||
'fields[image]': 'height,src,width',
|
headers=headers, query={
|
||||||
'fields[show]': 'name',
|
'fields[channel]': 'name',
|
||||||
'fields[tag]': 'name',
|
'fields[image]': 'height,src,width',
|
||||||
'fields[video]': 'description,episodeNumber,name,publishStart,seasonNumber,videoDuration',
|
'fields[show]': 'name',
|
||||||
'include': 'images,primaryChannel,show,tags'
|
'fields[tag]': 'name',
|
||||||
})
|
'fields[video]': 'description,episodeNumber,name,publishStart,seasonNumber,videoDuration',
|
||||||
|
'include': 'images,primaryChannel,show,tags'
|
||||||
|
})
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
|
||||||
|
self._process_errors(e, geo_countries)
|
||||||
|
raise
|
||||||
video_id = video['data']['id']
|
video_id = video['data']['id']
|
||||||
info = video['data']['attributes']
|
info = video['data']['attributes']
|
||||||
title = info['name'].strip()
|
title = info['name'].strip()
|
||||||
formats = []
|
formats = []
|
||||||
try:
|
try:
|
||||||
streaming = self._download_json(
|
streaming = self._download_video_playback_info(
|
||||||
disco_base + 'playback/videoPlaybackInfo/' + video_id,
|
disco_base, video_id, headers)
|
||||||
display_id, headers=headers)['data']['attributes']['streaming']
|
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
||||||
info = self._parse_json(e.cause.read().decode('utf-8'), display_id)
|
self._process_errors(e, geo_countries)
|
||||||
error = info['errors'][0]
|
|
||||||
error_code = error.get('code')
|
|
||||||
if error_code == 'access.denied.geoblocked':
|
|
||||||
self.raise_geo_restricted(countries=geo_countries)
|
|
||||||
elif error_code == 'access.denied.missingpackage':
|
|
||||||
self.raise_login_required()
|
|
||||||
raise ExtractorError(info['errors'][0]['detail'], expected=True)
|
|
||||||
raise
|
raise
|
||||||
for format_id, format_dict in streaming.items():
|
for format_dict in streaming:
|
||||||
if not isinstance(format_dict, dict):
|
if not isinstance(format_dict, dict):
|
||||||
continue
|
continue
|
||||||
format_url = format_dict.get('url')
|
format_url = format_dict.get('url')
|
||||||
if not format_url:
|
if not format_url:
|
||||||
continue
|
continue
|
||||||
|
format_id = format_dict.get('type')
|
||||||
ext = determine_ext(format_url)
|
ext = determine_ext(format_url)
|
||||||
if format_id == 'dash' or ext == 'mpd':
|
if format_id == 'dash' or ext == 'mpd':
|
||||||
formats.extend(self._extract_mpd_formats(
|
formats.extend(self._extract_mpd_formats(
|
||||||
@@ -225,7 +274,7 @@ class DPlayIE(InfoExtractor):
|
|||||||
'id': video_id,
|
'id': video_id,
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': info.get('description'),
|
'description': strip_or_none(info.get('description')),
|
||||||
'duration': float_or_none(info.get('videoDuration'), 1000),
|
'duration': float_or_none(info.get('videoDuration'), 1000),
|
||||||
'timestamp': unified_timestamp(info.get('publishStart')),
|
'timestamp': unified_timestamp(info.get('publishStart')),
|
||||||
'series': series,
|
'series': series,
|
||||||
@@ -241,7 +290,80 @@ class DPlayIE(InfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
display_id = mobj.group('id')
|
display_id = mobj.group('id')
|
||||||
domain = mobj.group('domain').lstrip('www.')
|
domain = mobj.group('domain').lstrip('www.')
|
||||||
country = mobj.group('country') or mobj.group('subdomain_country')
|
country = mobj.group('country') or mobj.group('subdomain_country') or mobj.group('plus_country')
|
||||||
host = 'disco-api.' + domain if domain.startswith('dplay.') else 'eu2-prod.disco-api.com'
|
host = 'disco-api.' + domain if domain[0] == 'd' else 'eu2-prod.disco-api.com'
|
||||||
return self._get_disco_api_info(
|
return self._get_disco_api_info(
|
||||||
url, display_id, host, 'dplay' + country, country)
|
url, display_id, host, 'dplay' + country, country)
|
||||||
|
|
||||||
|
|
||||||
|
class DiscoveryPlusIE(DPlayIE):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?discoveryplus\.com/video' + DPlayIE._PATH_REGEX
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.discoveryplus.com/video/property-brothers-forever-home/food-and-family',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1140794',
|
||||||
|
'display_id': 'property-brothers-forever-home/food-and-family',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Food and Family',
|
||||||
|
'description': 'The brothers help a Richmond family expand their single-level home.',
|
||||||
|
'duration': 2583.113,
|
||||||
|
'timestamp': 1609304400,
|
||||||
|
'upload_date': '20201230',
|
||||||
|
'creator': 'HGTV',
|
||||||
|
'series': 'Property Brothers: Forever Home',
|
||||||
|
'season_number': 1,
|
||||||
|
'episode_number': 1,
|
||||||
|
},
|
||||||
|
'skip': 'Available for Premium users',
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _update_disco_api_headers(self, headers, disco_base, display_id, realm):
|
||||||
|
headers['x-disco-client'] = 'WEB:UNKNOWN:dplus_us:15.0.0'
|
||||||
|
|
||||||
|
def _download_video_playback_info(self, disco_base, video_id, headers):
|
||||||
|
return self._download_json(
|
||||||
|
disco_base + 'playback/v3/videoPlaybackInfo',
|
||||||
|
video_id, headers=headers, data=json.dumps({
|
||||||
|
'deviceInfo': {
|
||||||
|
'adBlocker': False,
|
||||||
|
},
|
||||||
|
'videoId': video_id,
|
||||||
|
'wisteriaProperties': {
|
||||||
|
'platform': 'desktop',
|
||||||
|
'product': 'dplus_us',
|
||||||
|
},
|
||||||
|
}).encode('utf-8'))['data']['attributes']['streaming']
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
return self._get_disco_api_info(
|
||||||
|
url, display_id, 'us1-prod-direct.discoveryplus.com', 'go', 'us')
|
||||||
|
|
||||||
|
|
||||||
|
class HGTVDeIE(DPlayIE):
|
||||||
|
_VALID_URL = r'https?://de\.hgtv\.com/sendungen' + DPlayIE._PATH_REGEX
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://de.hgtv.com/sendungen/tiny-house-klein-aber-oho/wer-braucht-schon-eine-toilette/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '151205',
|
||||||
|
'display_id': 'tiny-house-klein-aber-oho/wer-braucht-schon-eine-toilette',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Wer braucht schon eine Toilette',
|
||||||
|
'description': 'md5:05b40a27e7aed2c9172de34d459134e2',
|
||||||
|
'duration': 1177.024,
|
||||||
|
'timestamp': 1595705400,
|
||||||
|
'upload_date': '20200725',
|
||||||
|
'creator': 'HGTV',
|
||||||
|
'series': 'Tiny House - klein, aber oho',
|
||||||
|
'season_number': 3,
|
||||||
|
'episode_number': 3,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'format': 'bestvideo',
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
return self._get_disco_api_info(
|
||||||
|
url, display_id, 'eu1-prod.disco-api.com', 'hgtv', 'de')
|
||||||
|
|||||||
@@ -1,193 +1,43 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
from .zdf import ZDFIE
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
int_or_none,
|
|
||||||
unified_strdate,
|
|
||||||
xpath_text,
|
|
||||||
determine_ext,
|
|
||||||
float_or_none,
|
|
||||||
ExtractorError,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class DreiSatIE(InfoExtractor):
|
class DreiSatIE(ZDFIE):
|
||||||
IE_NAME = '3sat'
|
IE_NAME = '3sat'
|
||||||
_GEO_COUNTRIES = ['DE']
|
_VALID_URL = r'https?://(?:www\.)?3sat\.de/(?:[^/]+/)*(?P<id>[^/?#&]+)\.html'
|
||||||
_VALID_URL = r'https?://(?:www\.)?3sat\.de/mediathek/(?:(?:index|mediathek)\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)'
|
_TESTS = [{
|
||||||
_TESTS = [
|
# Same as https://www.zdf.de/dokumentation/ab-18/10-wochen-sommer-102.html
|
||||||
{
|
'url': 'https://www.3sat.de/film/ab-18/10-wochen-sommer-108.html',
|
||||||
'url': 'http://www.3sat.de/mediathek/index.php?mode=play&obj=45918',
|
'md5': '0aff3e7bc72c8813f5e0fae333316a1d',
|
||||||
'md5': 'be37228896d30a88f315b638900a026e',
|
'info_dict': {
|
||||||
'info_dict': {
|
'id': '141007_ab18_10wochensommer_film',
|
||||||
'id': '45918',
|
'ext': 'mp4',
|
||||||
'ext': 'mp4',
|
'title': 'Ab 18! - 10 Wochen Sommer',
|
||||||
'title': 'Waidmannsheil',
|
'description': 'md5:8253f41dc99ce2c3ff892dac2d65fe26',
|
||||||
'description': 'md5:cce00ca1d70e21425e72c86a98a56817',
|
'duration': 2660,
|
||||||
'uploader': 'SCHWEIZWEIT',
|
'timestamp': 1608604200,
|
||||||
'uploader_id': '100000210',
|
'upload_date': '20201222',
|
||||||
'upload_date': '20140913'
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True, # m3u8 downloads
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
{
|
}, {
|
||||||
'url': 'http://www.3sat.de/mediathek/mediathek.php?mode=play&obj=51066',
|
'url': 'https://www.3sat.de/gesellschaft/schweizweit/waidmannsheil-100.html',
|
||||||
'only_matching': True,
|
'info_dict': {
|
||||||
|
'id': '140913_sendung_schweizweit',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Waidmannsheil',
|
||||||
|
'description': 'md5:cce00ca1d70e21425e72c86a98a56817',
|
||||||
|
'timestamp': 1410623100,
|
||||||
|
'upload_date': '20140913'
|
||||||
},
|
},
|
||||||
]
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
|
|
||||||
param_groups = {}
|
|
||||||
for param_group in smil.findall(self._xpath_ns('./head/paramGroup', namespace)):
|
|
||||||
group_id = param_group.get(self._xpath_ns(
|
|
||||||
'id', 'http://www.w3.org/XML/1998/namespace'))
|
|
||||||
params = {}
|
|
||||||
for param in param_group:
|
|
||||||
params[param.get('name')] = param.get('value')
|
|
||||||
param_groups[group_id] = params
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
for video in smil.findall(self._xpath_ns('.//video', namespace)):
|
|
||||||
src = video.get('src')
|
|
||||||
if not src:
|
|
||||||
continue
|
|
||||||
bitrate = int_or_none(self._search_regex(r'_(\d+)k', src, 'bitrate', None)) or float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
|
|
||||||
group_id = video.get('paramGroup')
|
|
||||||
param_group = param_groups[group_id]
|
|
||||||
for proto in param_group['protocols'].split(','):
|
|
||||||
formats.append({
|
|
||||||
'url': '%s://%s' % (proto, param_group['host']),
|
|
||||||
'app': param_group['app'],
|
|
||||||
'play_path': src,
|
|
||||||
'ext': 'flv',
|
|
||||||
'format_id': '%s-%d' % (proto, bitrate),
|
|
||||||
'tbr': bitrate,
|
|
||||||
})
|
|
||||||
self._sort_formats(formats)
|
|
||||||
return formats
|
|
||||||
|
|
||||||
def extract_from_xml_url(self, video_id, xml_url):
|
|
||||||
doc = self._download_xml(
|
|
||||||
xml_url, video_id,
|
|
||||||
note='Downloading video info',
|
|
||||||
errnote='Failed to download video info')
|
|
||||||
|
|
||||||
status_code = xpath_text(doc, './status/statuscode')
|
|
||||||
if status_code and status_code != 'ok':
|
|
||||||
if status_code == 'notVisibleAnymore':
|
|
||||||
message = 'Video %s is not available' % video_id
|
|
||||||
else:
|
|
||||||
message = '%s returned error: %s' % (self.IE_NAME, status_code)
|
|
||||||
raise ExtractorError(message, expected=True)
|
|
||||||
|
|
||||||
title = xpath_text(doc, './/information/title', 'title', True)
|
|
||||||
|
|
||||||
urls = []
|
|
||||||
formats = []
|
|
||||||
for fnode in doc.findall('.//formitaeten/formitaet'):
|
|
||||||
video_url = xpath_text(fnode, 'url')
|
|
||||||
if not video_url or video_url in urls:
|
|
||||||
continue
|
|
||||||
urls.append(video_url)
|
|
||||||
|
|
||||||
is_available = 'http://www.metafilegenerator' not in video_url
|
|
||||||
geoloced = 'static_geoloced_online' in video_url
|
|
||||||
if not is_available or geoloced:
|
|
||||||
continue
|
|
||||||
|
|
||||||
format_id = fnode.attrib['basetype']
|
|
||||||
format_m = re.match(r'''(?x)
|
|
||||||
(?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
|
|
||||||
(?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
|
|
||||||
''', format_id)
|
|
||||||
|
|
||||||
ext = determine_ext(video_url, None) or format_m.group('container')
|
|
||||||
|
|
||||||
if ext == 'meta':
|
|
||||||
continue
|
|
||||||
elif ext == 'smil':
|
|
||||||
formats.extend(self._extract_smil_formats(
|
|
||||||
video_url, video_id, fatal=False))
|
|
||||||
elif ext == 'm3u8':
|
|
||||||
# the certificates are misconfigured (see
|
|
||||||
# https://github.com/ytdl-org/youtube-dl/issues/8665)
|
|
||||||
if video_url.startswith('https://'):
|
|
||||||
continue
|
|
||||||
formats.extend(self._extract_m3u8_formats(
|
|
||||||
video_url, video_id, 'mp4', 'm3u8_native',
|
|
||||||
m3u8_id=format_id, fatal=False))
|
|
||||||
elif ext == 'f4m':
|
|
||||||
formats.extend(self._extract_f4m_formats(
|
|
||||||
video_url, video_id, f4m_id=format_id, fatal=False))
|
|
||||||
else:
|
|
||||||
quality = xpath_text(fnode, './quality')
|
|
||||||
if quality:
|
|
||||||
format_id += '-' + quality
|
|
||||||
|
|
||||||
abr = int_or_none(xpath_text(fnode, './audioBitrate'), 1000)
|
|
||||||
vbr = int_or_none(xpath_text(fnode, './videoBitrate'), 1000)
|
|
||||||
|
|
||||||
tbr = int_or_none(self._search_regex(
|
|
||||||
r'_(\d+)k', video_url, 'bitrate', None))
|
|
||||||
if tbr and vbr and not abr:
|
|
||||||
abr = tbr - vbr
|
|
||||||
|
|
||||||
formats.append({
|
|
||||||
'format_id': format_id,
|
|
||||||
'url': video_url,
|
|
||||||
'ext': ext,
|
|
||||||
'acodec': format_m.group('acodec'),
|
|
||||||
'vcodec': format_m.group('vcodec'),
|
|
||||||
'abr': abr,
|
|
||||||
'vbr': vbr,
|
|
||||||
'tbr': tbr,
|
|
||||||
'width': int_or_none(xpath_text(fnode, './width')),
|
|
||||||
'height': int_or_none(xpath_text(fnode, './height')),
|
|
||||||
'filesize': int_or_none(xpath_text(fnode, './filesize')),
|
|
||||||
'protocol': format_m.group('proto').lower(),
|
|
||||||
})
|
|
||||||
|
|
||||||
geolocation = xpath_text(doc, './/details/geolocation')
|
|
||||||
if not formats and geolocation and geolocation != 'none':
|
|
||||||
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
|
|
||||||
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
thumbnails = []
|
|
||||||
for node in doc.findall('.//teaserimages/teaserimage'):
|
|
||||||
thumbnail_url = node.text
|
|
||||||
if not thumbnail_url:
|
|
||||||
continue
|
|
||||||
thumbnail = {
|
|
||||||
'url': thumbnail_url,
|
|
||||||
}
|
|
||||||
thumbnail_key = node.get('key')
|
|
||||||
if thumbnail_key:
|
|
||||||
m = re.match('^([0-9]+)x([0-9]+)$', thumbnail_key)
|
|
||||||
if m:
|
|
||||||
thumbnail['width'] = int(m.group(1))
|
|
||||||
thumbnail['height'] = int(m.group(2))
|
|
||||||
thumbnails.append(thumbnail)
|
|
||||||
|
|
||||||
upload_date = unified_strdate(xpath_text(doc, './/details/airtime'))
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'description': xpath_text(doc, './/information/detail'),
|
|
||||||
'duration': int_or_none(xpath_text(doc, './/details/lengthSec')),
|
|
||||||
'thumbnails': thumbnails,
|
|
||||||
'uploader': xpath_text(doc, './/details/originChannelTitle'),
|
|
||||||
'uploader_id': xpath_text(doc, './/details/originChannelId'),
|
|
||||||
'upload_date': upload_date,
|
|
||||||
'formats': formats,
|
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
def _real_extract(self, url):
|
# Same as https://www.zdf.de/filme/filme-sonstige/der-hauptmann-112.html
|
||||||
video_id = self._match_id(url)
|
'url': 'https://www.3sat.de/film/spielfilm/der-hauptmann-100.html',
|
||||||
details_url = 'http://www.3sat.de/mediathek/xmlservice/web/beitragsDetails?id=%s' % video_id
|
'only_matching': True,
|
||||||
return self.extract_from_xml_url(video_id, details_url)
|
}, {
|
||||||
|
# Same as https://www.zdf.de/wissen/nano/nano-21-mai-2019-102.html, equal media ids
|
||||||
|
'url': 'https://www.3sat.de/wissen/nano/nano-21-mai-2019-102.html',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|||||||
@@ -12,26 +12,35 @@ from ..utils import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class EggheadCourseIE(InfoExtractor):
|
class EggheadBaseIE(InfoExtractor):
|
||||||
|
def _call_api(self, path, video_id, resource, fatal=True):
|
||||||
|
return self._download_json(
|
||||||
|
'https://app.egghead.io/api/v1/' + path,
|
||||||
|
video_id, 'Downloading %s JSON' % resource, fatal=fatal)
|
||||||
|
|
||||||
|
|
||||||
|
class EggheadCourseIE(EggheadBaseIE):
|
||||||
IE_DESC = 'egghead.io course'
|
IE_DESC = 'egghead.io course'
|
||||||
IE_NAME = 'egghead:course'
|
IE_NAME = 'egghead:course'
|
||||||
_VALID_URL = r'https://egghead\.io/courses/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https://(?:app\.)?egghead\.io/(?:course|playlist)s/(?P<id>[^/?#&]+)'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'https://egghead.io/courses/professor-frisby-introduces-composable-functional-javascript',
|
'url': 'https://egghead.io/courses/professor-frisby-introduces-composable-functional-javascript',
|
||||||
'playlist_count': 29,
|
'playlist_count': 29,
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '72',
|
'id': '432655',
|
||||||
'title': 'Professor Frisby Introduces Composable Functional JavaScript',
|
'title': 'Professor Frisby Introduces Composable Functional JavaScript',
|
||||||
'description': 're:(?s)^This course teaches the ubiquitous.*You\'ll start composing functionality before you know it.$',
|
'description': 're:(?s)^This course teaches the ubiquitous.*You\'ll start composing functionality before you know it.$',
|
||||||
},
|
},
|
||||||
}
|
}, {
|
||||||
|
'url': 'https://app.egghead.io/playlists/professor-frisby-introduces-composable-functional-javascript',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
playlist_id = self._match_id(url)
|
playlist_id = self._match_id(url)
|
||||||
|
series_path = 'series/' + playlist_id
|
||||||
lessons = self._download_json(
|
lessons = self._call_api(
|
||||||
'https://egghead.io/api/v1/series/%s/lessons' % playlist_id,
|
series_path + '/lessons', playlist_id, 'course lessons')
|
||||||
playlist_id, 'Downloading course lessons JSON')
|
|
||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
for lesson in lessons:
|
for lesson in lessons:
|
||||||
@@ -44,9 +53,8 @@ class EggheadCourseIE(InfoExtractor):
|
|||||||
entries.append(self.url_result(
|
entries.append(self.url_result(
|
||||||
lesson_url, ie=EggheadLessonIE.ie_key(), video_id=lesson_id))
|
lesson_url, ie=EggheadLessonIE.ie_key(), video_id=lesson_id))
|
||||||
|
|
||||||
course = self._download_json(
|
course = self._call_api(
|
||||||
'https://egghead.io/api/v1/series/%s' % playlist_id,
|
series_path, playlist_id, 'course', False) or {}
|
||||||
playlist_id, 'Downloading course JSON', fatal=False) or {}
|
|
||||||
|
|
||||||
playlist_id = course.get('id')
|
playlist_id = course.get('id')
|
||||||
if playlist_id:
|
if playlist_id:
|
||||||
@@ -57,10 +65,10 @@ class EggheadCourseIE(InfoExtractor):
|
|||||||
course.get('description'))
|
course.get('description'))
|
||||||
|
|
||||||
|
|
||||||
class EggheadLessonIE(InfoExtractor):
|
class EggheadLessonIE(EggheadBaseIE):
|
||||||
IE_DESC = 'egghead.io lesson'
|
IE_DESC = 'egghead.io lesson'
|
||||||
IE_NAME = 'egghead:lesson'
|
IE_NAME = 'egghead:lesson'
|
||||||
_VALID_URL = r'https://egghead\.io/(?:api/v1/)?lessons/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https://(?:app\.)?egghead\.io/(?:api/v1/)?lessons/(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://egghead.io/lessons/javascript-linear-data-flow-with-container-style-types-box',
|
'url': 'https://egghead.io/lessons/javascript-linear-data-flow-with-container-style-types-box',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -74,7 +82,7 @@ class EggheadLessonIE(InfoExtractor):
|
|||||||
'upload_date': '20161209',
|
'upload_date': '20161209',
|
||||||
'duration': 304,
|
'duration': 304,
|
||||||
'view_count': 0,
|
'view_count': 0,
|
||||||
'tags': ['javascript', 'free'],
|
'tags': 'count:2',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
@@ -83,13 +91,16 @@ class EggheadLessonIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'https://egghead.io/api/v1/lessons/react-add-redux-to-a-react-application',
|
'url': 'https://egghead.io/api/v1/lessons/react-add-redux-to-a-react-application',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://app.egghead.io/lessons/javascript-linear-data-flow-with-container-style-types-box',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
|
|
||||||
lesson = self._download_json(
|
lesson = self._call_api(
|
||||||
'https://egghead.io/api/v1/lessons/%s' % display_id, display_id)
|
'lessons/' + display_id, display_id, 'lesson')
|
||||||
|
|
||||||
lesson_id = compat_str(lesson['id'])
|
lesson_id = compat_str(lesson['id'])
|
||||||
title = lesson['title']
|
title = lesson['title']
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class EpornerIE(InfoExtractor):
|
class EpornerIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?eporner\.com/(?:hd-porn|embed)/(?P<id>\w+)(?:/(?P<display_id>[\w-]+))?'
|
_VALID_URL = r'https?://(?:www\.)?eporner\.com/(?:(?:hd-porn|embed)/|video-)(?P<id>\w+)(?:/(?P<display_id>[\w-]+))?'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
|
'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
|
||||||
'md5': '39d486f046212d8e1b911c52ab4691f8',
|
'md5': '39d486f046212d8e1b911c52ab4691f8',
|
||||||
@@ -43,7 +43,10 @@ class EpornerIE(InfoExtractor):
|
|||||||
'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0',
|
'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0',
|
'url': 'http://www.eporner.com/embed/3YRUtzMcWn0',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.eporner.com/video-FJsA19J3Y3H/one-of-the-greats/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@@ -57,7 +60,7 @@ class EpornerIE(InfoExtractor):
|
|||||||
video_id = self._match_id(urlh.geturl())
|
video_id = self._match_id(urlh.geturl())
|
||||||
|
|
||||||
hash = self._search_regex(
|
hash = self._search_regex(
|
||||||
r'hash\s*:\s*["\']([\da-f]{32})', webpage, 'hash')
|
r'hash\s*[:=]\s*["\']([\da-f]{32})', webpage, 'hash')
|
||||||
|
|
||||||
title = self._og_search_title(webpage, default=None) or self._html_search_regex(
|
title = self._og_search_title(webpage, default=None) or self._html_search_regex(
|
||||||
r'<title>(.+?) - EPORNER', webpage, 'title')
|
r'<title>(.+?) - EPORNER', webpage, 'title')
|
||||||
@@ -115,8 +118,8 @@ class EpornerIE(InfoExtractor):
|
|||||||
duration = parse_duration(self._html_search_meta(
|
duration = parse_duration(self._html_search_meta(
|
||||||
'duration', webpage, default=None))
|
'duration', webpage, default=None))
|
||||||
view_count = str_to_int(self._search_regex(
|
view_count = str_to_int(self._search_regex(
|
||||||
r'id="cinemaviews">\s*([0-9,]+)\s*<small>views',
|
r'id=["\']cinemaviews1["\'][^>]*>\s*([0-9,]+)',
|
||||||
webpage, 'view count', fatal=False))
|
webpage, 'view count', default=None))
|
||||||
|
|
||||||
return merge_dicts(json_ld, {
|
return merge_dicts(json_ld, {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from .common import InfoExtractor
|
|||||||
from ..compat import compat_urllib_parse_urlencode
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
unescapeHTML
|
merge_dicts,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -24,7 +24,8 @@ class EroProfileIE(InfoExtractor):
|
|||||||
'title': 'sexy babe softcore',
|
'title': 'sexy babe softcore',
|
||||||
'thumbnail': r're:https?://.*\.jpg',
|
'thumbnail': r're:https?://.*\.jpg',
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
},
|
||||||
|
'skip': 'Video not found',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.eroprofile.com/m/videos/view/Try-It-On-Pee_cut_2-wmv-4shared-com-file-sharing-download-movie-file',
|
'url': 'http://www.eroprofile.com/m/videos/view/Try-It-On-Pee_cut_2-wmv-4shared-com-file-sharing-download-movie-file',
|
||||||
'md5': '1baa9602ede46ce904c431f5418d8916',
|
'md5': '1baa9602ede46ce904c431f5418d8916',
|
||||||
@@ -77,19 +78,15 @@ class EroProfileIE(InfoExtractor):
|
|||||||
[r"glbUpdViews\s*\('\d*','(\d+)'", r'p/report/video/(\d+)'],
|
[r"glbUpdViews\s*\('\d*','(\d+)'", r'p/report/video/(\d+)'],
|
||||||
webpage, 'video id', default=None)
|
webpage, 'video id', default=None)
|
||||||
|
|
||||||
video_url = unescapeHTML(self._search_regex(
|
|
||||||
r'<source src="([^"]+)', webpage, 'video url'))
|
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r'Title:</th><td>([^<]+)</td>', webpage, 'title')
|
(r'Title:</th><td>([^<]+)</td>', r'<h1[^>]*>(.+?)</h1>'),
|
||||||
thumbnail = self._search_regex(
|
webpage, 'title')
|
||||||
r'onclick="showVideoPlayer\(\)"><img src="([^"]+)',
|
|
||||||
webpage, 'thumbnail', fatal=False)
|
|
||||||
|
|
||||||
return {
|
info = self._parse_html5_media_entries(url, webpage, video_id)[0]
|
||||||
|
|
||||||
|
return merge_dicts(info, {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
'url': video_url,
|
|
||||||
'title': title,
|
'title': title,
|
||||||
'thumbnail': thumbnail,
|
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
})
|
||||||
|
|||||||
@@ -1,77 +0,0 @@
|
|||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
ExtractorError,
|
|
||||||
sanitized_Request,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class EveryonesMixtapeIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?everyonesmixtape\.com/#/mix/(?P<id>[0-9a-zA-Z]+)(?:/(?P<songnr>[0-9]))?$'
|
|
||||||
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '5bfseWNmlds',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': "Passion Pit - \"Sleepyhead\" (Official Music Video)",
|
|
||||||
'uploader': 'FKR.TV',
|
|
||||||
'uploader_id': 'frenchkissrecords',
|
|
||||||
'description': "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
|
|
||||||
'upload_date': '20081015'
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True, # This is simply YouTube
|
|
||||||
}
|
|
||||||
}, {
|
|
||||||
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'm7m0jJAbMQi',
|
|
||||||
'title': 'Driving',
|
|
||||||
},
|
|
||||||
'playlist_count': 24
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
playlist_id = mobj.group('id')
|
|
||||||
|
|
||||||
pllist_url = 'http://everyonesmixtape.com/mixtape.php?a=getMixes&u=-1&linked=%s&explore=' % playlist_id
|
|
||||||
pllist_req = sanitized_Request(pllist_url)
|
|
||||||
pllist_req.add_header('X-Requested-With', 'XMLHttpRequest')
|
|
||||||
|
|
||||||
playlist_list = self._download_json(
|
|
||||||
pllist_req, playlist_id, note='Downloading playlist metadata')
|
|
||||||
try:
|
|
||||||
playlist_no = next(playlist['id']
|
|
||||||
for playlist in playlist_list
|
|
||||||
if playlist['code'] == playlist_id)
|
|
||||||
except StopIteration:
|
|
||||||
raise ExtractorError('Playlist id not found')
|
|
||||||
|
|
||||||
pl_url = 'http://everyonesmixtape.com/mixtape.php?a=getMix&id=%s&userId=null&code=' % playlist_no
|
|
||||||
pl_req = sanitized_Request(pl_url)
|
|
||||||
pl_req.add_header('X-Requested-With', 'XMLHttpRequest')
|
|
||||||
playlist = self._download_json(
|
|
||||||
pl_req, playlist_id, note='Downloading playlist info')
|
|
||||||
|
|
||||||
entries = [{
|
|
||||||
'_type': 'url',
|
|
||||||
'url': t['url'],
|
|
||||||
'title': t['title'],
|
|
||||||
} for t in playlist['tracks']]
|
|
||||||
|
|
||||||
if mobj.group('songnr'):
|
|
||||||
songnr = int(mobj.group('songnr')) - 1
|
|
||||||
return entries[songnr]
|
|
||||||
|
|
||||||
playlist_title = playlist['mixData']['name']
|
|
||||||
return {
|
|
||||||
'_type': 'playlist',
|
|
||||||
'id': playlist_id,
|
|
||||||
'title': playlist_title,
|
|
||||||
'entries': entries,
|
|
||||||
}
|
|
||||||
@@ -33,6 +33,8 @@ from .aenetworks import (
|
|||||||
AENetworksCollectionIE,
|
AENetworksCollectionIE,
|
||||||
AENetworksShowIE,
|
AENetworksShowIE,
|
||||||
HistoryTopicIE,
|
HistoryTopicIE,
|
||||||
|
HistoryPlayerIE,
|
||||||
|
BiographyIE,
|
||||||
)
|
)
|
||||||
from .afreecatv import AfreecaTVIE
|
from .afreecatv import AfreecaTVIE
|
||||||
from .airmozilla import AirMozillaIE
|
from .airmozilla import AirMozillaIE
|
||||||
@@ -40,12 +42,19 @@ from .aljazeera import AlJazeeraIE
|
|||||||
from .alphaporno import AlphaPornoIE
|
from .alphaporno import AlphaPornoIE
|
||||||
from .amara import AmaraIE
|
from .amara import AmaraIE
|
||||||
from .amcnetworks import AMCNetworksIE
|
from .amcnetworks import AMCNetworksIE
|
||||||
from .americastestkitchen import AmericasTestKitchenIE
|
from .americastestkitchen import (
|
||||||
|
AmericasTestKitchenIE,
|
||||||
|
AmericasTestKitchenSeasonIE,
|
||||||
|
)
|
||||||
from .animeondemand import AnimeOnDemandIE
|
from .animeondemand import AnimeOnDemandIE
|
||||||
from .anvato import AnvatoIE
|
from .anvato import AnvatoIE
|
||||||
from .aol import AolIE
|
from .aol import AolIE
|
||||||
from .allocine import AllocineIE
|
from .allocine import AllocineIE
|
||||||
from .aliexpress import AliExpressLiveIE
|
from .aliexpress import AliExpressLiveIE
|
||||||
|
from .alsace20tv import (
|
||||||
|
Alsace20TVIE,
|
||||||
|
Alsace20TVEmbedIE,
|
||||||
|
)
|
||||||
from .apa import APAIE
|
from .apa import APAIE
|
||||||
from .aparat import AparatIE
|
from .aparat import AparatIE
|
||||||
from .appleconnect import AppleConnectIE
|
from .appleconnect import AppleConnectIE
|
||||||
@@ -53,7 +62,9 @@ from .appletrailers import (
|
|||||||
AppleTrailersIE,
|
AppleTrailersIE,
|
||||||
AppleTrailersSectionIE,
|
AppleTrailersSectionIE,
|
||||||
)
|
)
|
||||||
|
from .applepodcasts import ApplePodcastsIE
|
||||||
from .archiveorg import ArchiveOrgIE
|
from .archiveorg import ArchiveOrgIE
|
||||||
|
from .arcpublishing import ArcPublishingIE
|
||||||
from .arkena import ArkenaIE
|
from .arkena import ArkenaIE
|
||||||
from .ard import (
|
from .ard import (
|
||||||
ARDBetaMediathekIE,
|
ARDBetaMediathekIE,
|
||||||
@@ -64,7 +75,9 @@ from .arte import (
|
|||||||
ArteTVIE,
|
ArteTVIE,
|
||||||
ArteTVEmbedIE,
|
ArteTVEmbedIE,
|
||||||
ArteTVPlaylistIE,
|
ArteTVPlaylistIE,
|
||||||
|
ArteTVCategoryIE,
|
||||||
)
|
)
|
||||||
|
from .arnes import ArnesIE
|
||||||
from .asiancrush import (
|
from .asiancrush import (
|
||||||
AsianCrushIE,
|
AsianCrushIE,
|
||||||
AsianCrushPlaylistIE,
|
AsianCrushPlaylistIE,
|
||||||
@@ -83,11 +96,13 @@ from .awaan import (
|
|||||||
)
|
)
|
||||||
from .azmedien import AZMedienIE
|
from .azmedien import AZMedienIE
|
||||||
from .baidu import BaiduVideoIE
|
from .baidu import BaiduVideoIE
|
||||||
|
from .bandaichannel import BandaiChannelIE
|
||||||
from .bandcamp import BandcampIE, BandcampAlbumIE, BandcampWeeklyIE
|
from .bandcamp import BandcampIE, BandcampAlbumIE, BandcampWeeklyIE
|
||||||
from .bbc import (
|
from .bbc import (
|
||||||
BBCCoUkIE,
|
BBCCoUkIE,
|
||||||
BBCCoUkArticleIE,
|
BBCCoUkArticleIE,
|
||||||
BBCCoUkIPlayerPlaylistIE,
|
BBCCoUkIPlayerEpisodesIE,
|
||||||
|
BBCCoUkIPlayerGroupIE,
|
||||||
BBCCoUkPlaylistIE,
|
BBCCoUkPlaylistIE,
|
||||||
BBCIE,
|
BBCIE,
|
||||||
)
|
)
|
||||||
@@ -97,7 +112,14 @@ from .bellmedia import BellMediaIE
|
|||||||
from .beatport import BeatportIE
|
from .beatport import BeatportIE
|
||||||
from .bet import BetIE
|
from .bet import BetIE
|
||||||
from .bfi import BFIPlayerIE
|
from .bfi import BFIPlayerIE
|
||||||
|
from .bfmtv import (
|
||||||
|
BFMTVIE,
|
||||||
|
BFMTVLiveIE,
|
||||||
|
BFMTVArticleIE,
|
||||||
|
)
|
||||||
|
from .bibeltv import BibelTVIE
|
||||||
from .bigflix import BigflixIE
|
from .bigflix import BigflixIE
|
||||||
|
from .bigo import BigoIE
|
||||||
from .bild import BildIE
|
from .bild import BildIE
|
||||||
from .bilibili import (
|
from .bilibili import (
|
||||||
BiliBiliIE,
|
BiliBiliIE,
|
||||||
@@ -116,9 +138,9 @@ from .bleacherreport import (
|
|||||||
BleacherReportIE,
|
BleacherReportIE,
|
||||||
BleacherReportCMSIE,
|
BleacherReportCMSIE,
|
||||||
)
|
)
|
||||||
from .blinkx import BlinkxIE
|
|
||||||
from .bloomberg import BloombergIE
|
from .bloomberg import BloombergIE
|
||||||
from .bokecc import BokeCCIE
|
from .bokecc import BokeCCIE
|
||||||
|
from .bongacams import BongaCamsIE
|
||||||
from .bostonglobe import BostonGlobeIE
|
from .bostonglobe import BostonGlobeIE
|
||||||
from .box import BoxIE
|
from .box import BoxIE
|
||||||
from .bpb import BpbIE
|
from .bpb import BpbIE
|
||||||
@@ -149,6 +171,7 @@ from .canvas import (
|
|||||||
CanvasIE,
|
CanvasIE,
|
||||||
CanvasEenIE,
|
CanvasEenIE,
|
||||||
VrtNUIE,
|
VrtNUIE,
|
||||||
|
DagelijkseKostIE,
|
||||||
)
|
)
|
||||||
from .carambatv import (
|
from .carambatv import (
|
||||||
CarambaTVIE,
|
CarambaTVIE,
|
||||||
@@ -163,14 +186,21 @@ from .cbc import (
|
|||||||
CBCOlympicsIE,
|
CBCOlympicsIE,
|
||||||
)
|
)
|
||||||
from .cbs import CBSIE
|
from .cbs import CBSIE
|
||||||
from .cbslocal import CBSLocalIE
|
from .cbslocal import (
|
||||||
|
CBSLocalIE,
|
||||||
|
CBSLocalArticleIE,
|
||||||
|
)
|
||||||
from .cbsinteractive import CBSInteractiveIE
|
from .cbsinteractive import CBSInteractiveIE
|
||||||
from .cbsnews import (
|
from .cbsnews import (
|
||||||
CBSNewsEmbedIE,
|
CBSNewsEmbedIE,
|
||||||
CBSNewsIE,
|
CBSNewsIE,
|
||||||
CBSNewsLiveVideoIE,
|
CBSNewsLiveVideoIE,
|
||||||
)
|
)
|
||||||
from .cbssports import CBSSportsIE
|
from .cbssports import (
|
||||||
|
CBSSportsEmbedIE,
|
||||||
|
CBSSportsIE,
|
||||||
|
TwentyFourSevenSportsIE,
|
||||||
|
)
|
||||||
from .ccc import (
|
from .ccc import (
|
||||||
CCCIE,
|
CCCIE,
|
||||||
CCCPlaylistIE,
|
CCCPlaylistIE,
|
||||||
@@ -218,11 +248,8 @@ from .cnn import (
|
|||||||
)
|
)
|
||||||
from .coub import CoubIE
|
from .coub import CoubIE
|
||||||
from .comedycentral import (
|
from .comedycentral import (
|
||||||
ComedyCentralFullEpisodesIE,
|
|
||||||
ComedyCentralIE,
|
ComedyCentralIE,
|
||||||
ComedyCentralShortnameIE,
|
|
||||||
ComedyCentralTVIE,
|
ComedyCentralTVIE,
|
||||||
ToshIE,
|
|
||||||
)
|
)
|
||||||
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
|
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
|
||||||
from .commonprotocols import (
|
from .commonprotocols import (
|
||||||
@@ -232,6 +259,10 @@ from .commonprotocols import (
|
|||||||
from .condenast import CondeNastIE
|
from .condenast import CondeNastIE
|
||||||
from .contv import CONtvIE
|
from .contv import CONtvIE
|
||||||
from .corus import CorusIE
|
from .corus import CorusIE
|
||||||
|
from .cpac import (
|
||||||
|
CPACIE,
|
||||||
|
CPACPlaylistIE,
|
||||||
|
)
|
||||||
from .cracked import CrackedIE
|
from .cracked import CrackedIE
|
||||||
from .crackle import CrackleIE
|
from .crackle import CrackleIE
|
||||||
from .crooksandliars import CrooksAndLiarsIE
|
from .crooksandliars import CrooksAndLiarsIE
|
||||||
@@ -241,6 +272,7 @@ from .crunchyroll import (
|
|||||||
)
|
)
|
||||||
from .cspan import CSpanIE
|
from .cspan import CSpanIE
|
||||||
from .ctsnews import CtsNewsIE
|
from .ctsnews import CtsNewsIE
|
||||||
|
from .ctv import CTVIE
|
||||||
from .ctvnews import CTVNewsIE
|
from .ctvnews import CTVNewsIE
|
||||||
from .cultureunplugged import CultureUnpluggedIE
|
from .cultureunplugged import CultureUnpluggedIE
|
||||||
from .curiositystream import (
|
from .curiositystream import (
|
||||||
@@ -272,7 +304,11 @@ from .douyutv import (
|
|||||||
DouyuShowIE,
|
DouyuShowIE,
|
||||||
DouyuTVIE,
|
DouyuTVIE,
|
||||||
)
|
)
|
||||||
from .dplay import DPlayIE
|
from .dplay import (
|
||||||
|
DPlayIE,
|
||||||
|
DiscoveryPlusIE,
|
||||||
|
HGTVDeIE,
|
||||||
|
)
|
||||||
from .dreisat import DreiSatIE
|
from .dreisat import DreiSatIE
|
||||||
from .drbonanza import DRBonanzaIE
|
from .drbonanza import DRBonanzaIE
|
||||||
from .drtuber import DrTuberIE
|
from .drtuber import DrTuberIE
|
||||||
@@ -327,7 +363,6 @@ from .espn import (
|
|||||||
)
|
)
|
||||||
from .esri import EsriVideoIE
|
from .esri import EsriVideoIE
|
||||||
from .europa import EuropaIE
|
from .europa import EuropaIE
|
||||||
from .everyonesmixtape import EveryonesMixtapeIE
|
|
||||||
from .expotv import ExpoTVIE
|
from .expotv import ExpoTVIE
|
||||||
from .expressen import ExpressenIE
|
from .expressen import ExpressenIE
|
||||||
from .extremetube import ExtremeTubeIE
|
from .extremetube import ExtremeTubeIE
|
||||||
@@ -395,7 +430,6 @@ from .fujitv import FujiTVFODPlus7IE
|
|||||||
from .funimation import FunimationIE
|
from .funimation import FunimationIE
|
||||||
from .funk import FunkIE
|
from .funk import FunkIE
|
||||||
from .fusion import FusionIE
|
from .fusion import FusionIE
|
||||||
from .fxnetworks import FXNetworksIE
|
|
||||||
from .gaia import GaiaIE
|
from .gaia import GaiaIE
|
||||||
from .gameinformer import GameInformerIE
|
from .gameinformer import GameInformerIE
|
||||||
from .gamespot import GameSpotIE
|
from .gamespot import GameSpotIE
|
||||||
@@ -403,6 +437,7 @@ from .gamestar import GameStarIE
|
|||||||
from .gaskrank import GaskrankIE
|
from .gaskrank import GaskrankIE
|
||||||
from .gazeta import GazetaIE
|
from .gazeta import GazetaIE
|
||||||
from .gdcvault import GDCVaultIE
|
from .gdcvault import GDCVaultIE
|
||||||
|
from .gedidigital import GediDigitalIE
|
||||||
from .generic import GenericIE
|
from .generic import GenericIE
|
||||||
from .gfycat import GfycatIE
|
from .gfycat import GfycatIE
|
||||||
from .giantbomb import GiantBombIE
|
from .giantbomb import GiantBombIE
|
||||||
@@ -416,7 +451,10 @@ from .go import GoIE
|
|||||||
from .godtube import GodTubeIE
|
from .godtube import GodTubeIE
|
||||||
from .golem import GolemIE
|
from .golem import GolemIE
|
||||||
from .googledrive import GoogleDriveIE
|
from .googledrive import GoogleDriveIE
|
||||||
from .googleplus import GooglePlusIE
|
from .googlepodcasts import (
|
||||||
|
GooglePodcastsIE,
|
||||||
|
GooglePodcastsFeedIE,
|
||||||
|
)
|
||||||
from .googlesearch import GoogleSearchIE
|
from .googlesearch import GoogleSearchIE
|
||||||
from .goshgay import GoshgayIE
|
from .goshgay import GoshgayIE
|
||||||
from .gputechconf import GPUTechConfIE
|
from .gputechconf import GPUTechConfIE
|
||||||
@@ -441,6 +479,7 @@ from .hotstar import (
|
|||||||
)
|
)
|
||||||
from .howcast import HowcastIE
|
from .howcast import HowcastIE
|
||||||
from .howstuffworks import HowStuffWorksIE
|
from .howstuffworks import HowStuffWorksIE
|
||||||
|
from .hrfernsehen import HRFernsehenIE
|
||||||
from .hrti import (
|
from .hrti import (
|
||||||
HRTiIE,
|
HRTiIE,
|
||||||
HRTiPlaylistIE,
|
HRTiPlaylistIE,
|
||||||
@@ -454,8 +493,12 @@ from .hungama import (
|
|||||||
from .hypem import HypemIE
|
from .hypem import HypemIE
|
||||||
from .ign import (
|
from .ign import (
|
||||||
IGNIE,
|
IGNIE,
|
||||||
OneUPIE,
|
IGNVideoIE,
|
||||||
PCMagIE,
|
IGNArticleIE,
|
||||||
|
)
|
||||||
|
from .iheart import (
|
||||||
|
IHeartRadioIE,
|
||||||
|
IHeartRadioPodcastIE,
|
||||||
)
|
)
|
||||||
from .imdb import (
|
from .imdb import (
|
||||||
ImdbIE,
|
ImdbIE,
|
||||||
@@ -501,18 +544,21 @@ from .joj import JojIE
|
|||||||
from .jwplatform import JWPlatformIE
|
from .jwplatform import JWPlatformIE
|
||||||
from .kakao import KakaoIE
|
from .kakao import KakaoIE
|
||||||
from .kaltura import KalturaIE
|
from .kaltura import KalturaIE
|
||||||
from .kanalplay import KanalPlayIE
|
|
||||||
from .kankan import KankanIE
|
from .kankan import KankanIE
|
||||||
from .karaoketv import KaraoketvIE
|
from .karaoketv import KaraoketvIE
|
||||||
from .karrierevideos import KarriereVideosIE
|
from .karrierevideos import KarriereVideosIE
|
||||||
from .keezmovies import KeezMoviesIE
|
from .keezmovies import KeezMoviesIE
|
||||||
from .ketnet import KetnetIE
|
from .ketnet import KetnetIE
|
||||||
from .khanacademy import KhanAcademyIE
|
from .khanacademy import (
|
||||||
|
KhanAcademyIE,
|
||||||
|
KhanAcademyUnitIE,
|
||||||
|
)
|
||||||
from .kickstarter import KickStarterIE
|
from .kickstarter import KickStarterIE
|
||||||
from .kinja import KinjaEmbedIE
|
from .kinja import KinjaEmbedIE
|
||||||
from .kinopoisk import KinoPoiskIE
|
from .kinopoisk import KinoPoiskIE
|
||||||
from .konserthusetplay import KonserthusetPlayIE
|
from .konserthusetplay import KonserthusetPlayIE
|
||||||
from .krasview import KrasViewIE
|
from .krasview import KrasViewIE
|
||||||
|
from .kth import KTHIE
|
||||||
from .ku6 import Ku6IE
|
from .ku6 import Ku6IE
|
||||||
from .kusi import KUSIIE
|
from .kusi import KUSIIE
|
||||||
from .kuwo import (
|
from .kuwo import (
|
||||||
@@ -564,7 +610,11 @@ from .limelight import (
|
|||||||
LimelightChannelIE,
|
LimelightChannelIE,
|
||||||
LimelightChannelListIE,
|
LimelightChannelListIE,
|
||||||
)
|
)
|
||||||
from .line import LineTVIE
|
from .line import (
|
||||||
|
LineTVIE,
|
||||||
|
LineLiveIE,
|
||||||
|
LineLiveChannelIE,
|
||||||
|
)
|
||||||
from .linkedin import (
|
from .linkedin import (
|
||||||
LinkedInLearningIE,
|
LinkedInLearningIE,
|
||||||
LinkedInLearningCourseIE,
|
LinkedInLearningCourseIE,
|
||||||
@@ -572,10 +622,6 @@ from .linkedin import (
|
|||||||
from .linuxacademy import LinuxAcademyIE
|
from .linuxacademy import LinuxAcademyIE
|
||||||
from .litv import LiTVIE
|
from .litv import LiTVIE
|
||||||
from .livejournal import LiveJournalIE
|
from .livejournal import LiveJournalIE
|
||||||
from .liveleak import (
|
|
||||||
LiveLeakIE,
|
|
||||||
LiveLeakEmbedIE,
|
|
||||||
)
|
|
||||||
from .livestream import (
|
from .livestream import (
|
||||||
LivestreamIE,
|
LivestreamIE,
|
||||||
LivestreamOriginalIE,
|
LivestreamOriginalIE,
|
||||||
@@ -601,6 +647,7 @@ from .mangomolo import (
|
|||||||
MangomoloLiveIE,
|
MangomoloLiveIE,
|
||||||
)
|
)
|
||||||
from .manyvids import ManyVidsIE
|
from .manyvids import ManyVidsIE
|
||||||
|
from .maoritv import MaoriTVIE
|
||||||
from .markiza import (
|
from .markiza import (
|
||||||
MarkizaIE,
|
MarkizaIE,
|
||||||
MarkizaPageIE,
|
MarkizaPageIE,
|
||||||
@@ -629,6 +676,11 @@ from .microsoftvirtualacademy import (
|
|||||||
MicrosoftVirtualAcademyIE,
|
MicrosoftVirtualAcademyIE,
|
||||||
MicrosoftVirtualAcademyCourseIE,
|
MicrosoftVirtualAcademyCourseIE,
|
||||||
)
|
)
|
||||||
|
from .minds import (
|
||||||
|
MindsIE,
|
||||||
|
MindsChannelIE,
|
||||||
|
MindsGroupIE,
|
||||||
|
)
|
||||||
from .ministrygrid import MinistryGridIE
|
from .ministrygrid import MinistryGridIE
|
||||||
from .minoto import MinotoIE
|
from .minoto import MinotoIE
|
||||||
from .miomio import MioMioIE
|
from .miomio import MioMioIE
|
||||||
@@ -639,7 +691,10 @@ from .mixcloud import (
|
|||||||
MixcloudUserIE,
|
MixcloudUserIE,
|
||||||
MixcloudPlaylistIE,
|
MixcloudPlaylistIE,
|
||||||
)
|
)
|
||||||
from .mlb import MLBIE
|
from .mlb import (
|
||||||
|
MLBIE,
|
||||||
|
MLBVideoIE,
|
||||||
|
)
|
||||||
from .mnet import MnetIE
|
from .mnet import MnetIE
|
||||||
from .moevideo import MoeVideoIE
|
from .moevideo import MoeVideoIE
|
||||||
from .mofosex import (
|
from .mofosex import (
|
||||||
@@ -679,9 +734,15 @@ from .nationalgeographic import (
|
|||||||
NationalGeographicTVIE,
|
NationalGeographicTVIE,
|
||||||
)
|
)
|
||||||
from .naver import NaverIE
|
from .naver import NaverIE
|
||||||
from .nba import NBAIE
|
from .nba import (
|
||||||
|
NBAWatchEmbedIE,
|
||||||
|
NBAWatchIE,
|
||||||
|
NBAWatchCollectionIE,
|
||||||
|
NBAEmbedIE,
|
||||||
|
NBAIE,
|
||||||
|
NBAChannelIE,
|
||||||
|
)
|
||||||
from .nbc import (
|
from .nbc import (
|
||||||
CSNNEIE,
|
|
||||||
NBCIE,
|
NBCIE,
|
||||||
NBCNewsIE,
|
NBCNewsIE,
|
||||||
NBCOlympicsIE,
|
NBCOlympicsIE,
|
||||||
@@ -724,8 +785,14 @@ from .nexx import (
|
|||||||
NexxIE,
|
NexxIE,
|
||||||
NexxEmbedIE,
|
NexxEmbedIE,
|
||||||
)
|
)
|
||||||
from .nfl import NFLIE
|
from .nfl import (
|
||||||
from .nhk import NhkVodIE
|
NFLIE,
|
||||||
|
NFLArticleIE,
|
||||||
|
)
|
||||||
|
from .nhk import (
|
||||||
|
NhkVodIE,
|
||||||
|
NhkVodProgramIE,
|
||||||
|
)
|
||||||
from .nhl import NHLIE
|
from .nhl import NHLIE
|
||||||
from .nick import (
|
from .nick import (
|
||||||
NickIE,
|
NickIE,
|
||||||
@@ -734,14 +801,20 @@ from .nick import (
|
|||||||
NickNightIE,
|
NickNightIE,
|
||||||
NickRuIE,
|
NickRuIE,
|
||||||
)
|
)
|
||||||
from .niconico import NiconicoIE, NiconicoPlaylistIE
|
from .niconico import (
|
||||||
|
NiconicoIE,
|
||||||
|
NiconicoPlaylistIE,
|
||||||
|
NiconicoUserIE,
|
||||||
|
NicovideoSearchIE,
|
||||||
|
NicovideoSearchDateIE,
|
||||||
|
NicovideoSearchURLIE,
|
||||||
|
)
|
||||||
from .ninecninemedia import NineCNineMediaIE
|
from .ninecninemedia import NineCNineMediaIE
|
||||||
from .ninegag import NineGagIE
|
from .ninegag import NineGagIE
|
||||||
from .ninenow import NineNowIE
|
from .ninenow import NineNowIE
|
||||||
from .nintendo import NintendoIE
|
from .nintendo import NintendoIE
|
||||||
from .njpwworld import NJPWWorldIE
|
from .njpwworld import NJPWWorldIE
|
||||||
from .nobelprize import NobelPrizeIE
|
from .nobelprize import NobelPrizeIE
|
||||||
from .noco import NocoIE
|
|
||||||
from .nonktube import NonkTubeIE
|
from .nonktube import NonkTubeIE
|
||||||
from .noovo import NoovoIE
|
from .noovo import NoovoIE
|
||||||
from .normalboots import NormalbootsIE
|
from .normalboots import NormalbootsIE
|
||||||
@@ -774,6 +847,7 @@ from .nrk import (
|
|||||||
NRKSkoleIE,
|
NRKSkoleIE,
|
||||||
NRKTVIE,
|
NRKTVIE,
|
||||||
NRKTVDirekteIE,
|
NRKTVDirekteIE,
|
||||||
|
NRKRadioPodkastIE,
|
||||||
NRKTVEpisodeIE,
|
NRKTVEpisodeIE,
|
||||||
NRKTVEpisodesIE,
|
NRKTVEpisodesIE,
|
||||||
NRKTVSeasonIE,
|
NRKTVSeasonIE,
|
||||||
@@ -828,6 +902,11 @@ from .packtpub import (
|
|||||||
PacktPubIE,
|
PacktPubIE,
|
||||||
PacktPubCourseIE,
|
PacktPubCourseIE,
|
||||||
)
|
)
|
||||||
|
from .palcomp3 import (
|
||||||
|
PalcoMP3IE,
|
||||||
|
PalcoMP3ArtistIE,
|
||||||
|
PalcoMP3VideoIE,
|
||||||
|
)
|
||||||
from .pandoratv import PandoraTVIE
|
from .pandoratv import PandoraTVIE
|
||||||
from .parliamentliveuk import ParliamentLiveUKIE
|
from .parliamentliveuk import ParliamentLiveUKIE
|
||||||
from .patreon import PatreonIE
|
from .patreon import PatreonIE
|
||||||
@@ -861,6 +940,7 @@ from .platzi import (
|
|||||||
from .playfm import PlayFMIE
|
from .playfm import PlayFMIE
|
||||||
from .playplustv import PlayPlusTVIE
|
from .playplustv import PlayPlusTVIE
|
||||||
from .plays import PlaysTVIE
|
from .plays import PlaysTVIE
|
||||||
|
from .playstuff import PlayStuffIE
|
||||||
from .playtvak import PlaytvakIE
|
from .playtvak import PlaytvakIE
|
||||||
from .playvid import PlayvidIE
|
from .playvid import PlayvidIE
|
||||||
from .playwire import PlaywireIE
|
from .playwire import PlaywireIE
|
||||||
@@ -985,6 +1065,7 @@ from .safari import (
|
|||||||
SafariApiIE,
|
SafariApiIE,
|
||||||
SafariCourseIE,
|
SafariCourseIE,
|
||||||
)
|
)
|
||||||
|
from .samplefocus import SampleFocusIE
|
||||||
from .sapo import SapoIE
|
from .sapo import SapoIE
|
||||||
from .savefrom import SaveFromIE
|
from .savefrom import SaveFromIE
|
||||||
from .sbs import SBSIE
|
from .sbs import SBSIE
|
||||||
@@ -1017,6 +1098,11 @@ from .shared import (
|
|||||||
VivoIE,
|
VivoIE,
|
||||||
)
|
)
|
||||||
from .showroomlive import ShowRoomLiveIE
|
from .showroomlive import ShowRoomLiveIE
|
||||||
|
from .simplecast import (
|
||||||
|
SimplecastIE,
|
||||||
|
SimplecastEpisodeIE,
|
||||||
|
SimplecastPodcastIE,
|
||||||
|
)
|
||||||
from .sina import SinaIE
|
from .sina import SinaIE
|
||||||
from .sixplay import SixPlayIE
|
from .sixplay import SixPlayIE
|
||||||
from .skyit import (
|
from .skyit import (
|
||||||
@@ -1037,6 +1123,7 @@ from .skynewsarabia import (
|
|||||||
from .sky import (
|
from .sky import (
|
||||||
SkyNewsIE,
|
SkyNewsIE,
|
||||||
SkySportsIE,
|
SkySportsIE,
|
||||||
|
SkySportsNewsIE,
|
||||||
)
|
)
|
||||||
from .slideshare import SlideshareIE
|
from .slideshare import SlideshareIE
|
||||||
from .slideslive import SlidesLiveIE
|
from .slideslive import SlidesLiveIE
|
||||||
@@ -1074,10 +1161,17 @@ from .spike import (
|
|||||||
BellatorIE,
|
BellatorIE,
|
||||||
ParamountNetworkIE,
|
ParamountNetworkIE,
|
||||||
)
|
)
|
||||||
from .stitcher import StitcherIE
|
from .stitcher import (
|
||||||
|
StitcherIE,
|
||||||
|
StitcherShowIE,
|
||||||
|
)
|
||||||
from .sport5 import Sport5IE
|
from .sport5 import Sport5IE
|
||||||
from .sportbox import SportBoxIE
|
from .sportbox import SportBoxIE
|
||||||
from .sportdeutschland import SportDeutschlandIE
|
from .sportdeutschland import SportDeutschlandIE
|
||||||
|
from .spotify import (
|
||||||
|
SpotifyIE,
|
||||||
|
SpotifyShowIE,
|
||||||
|
)
|
||||||
from .spreaker import (
|
from .spreaker import (
|
||||||
SpreakerIE,
|
SpreakerIE,
|
||||||
SpreakerPageIE,
|
SpreakerPageIE,
|
||||||
@@ -1093,6 +1187,11 @@ from .srgssr import (
|
|||||||
from .srmediathek import SRMediathekIE
|
from .srmediathek import SRMediathekIE
|
||||||
from .stanfordoc import StanfordOpenClassroomIE
|
from .stanfordoc import StanfordOpenClassroomIE
|
||||||
from .steam import SteamIE
|
from .steam import SteamIE
|
||||||
|
from .storyfire import (
|
||||||
|
StoryFireIE,
|
||||||
|
StoryFireUserIE,
|
||||||
|
StoryFireSeriesIE,
|
||||||
|
)
|
||||||
from .streamable import StreamableIE
|
from .streamable import StreamableIE
|
||||||
from .streamcloud import StreamcloudIE
|
from .streamcloud import StreamcloudIE
|
||||||
from .streamcz import StreamCZIE
|
from .streamcz import StreamCZIE
|
||||||
@@ -1118,7 +1217,6 @@ from .tagesschau import (
|
|||||||
TagesschauIE,
|
TagesschauIE,
|
||||||
)
|
)
|
||||||
from .tass import TassIE
|
from .tass import TassIE
|
||||||
from .tastytrade import TastyTradeIE
|
|
||||||
from .tbs import TBSIE
|
from .tbs import TBSIE
|
||||||
from .tdslifeway import TDSLifewayIE
|
from .tdslifeway import TDSLifewayIE
|
||||||
from .teachable import (
|
from .teachable import (
|
||||||
@@ -1192,6 +1290,10 @@ from .toutv import TouTvIE
|
|||||||
from .toypics import ToypicsUserIE, ToypicsIE
|
from .toypics import ToypicsUserIE, ToypicsIE
|
||||||
from .traileraddict import TrailerAddictIE
|
from .traileraddict import TrailerAddictIE
|
||||||
from .trilulilu import TriluliluIE
|
from .trilulilu import TriluliluIE
|
||||||
|
from .trovo import (
|
||||||
|
TrovoIE,
|
||||||
|
TrovoVodIE,
|
||||||
|
)
|
||||||
from .trunews import TruNewsIE
|
from .trunews import TruNewsIE
|
||||||
from .trutv import TruTVIE
|
from .trutv import TruTVIE
|
||||||
from .tube8 import Tube8IE
|
from .tube8 import Tube8IE
|
||||||
@@ -1210,6 +1312,7 @@ from .tv2 import (
|
|||||||
TV2IE,
|
TV2IE,
|
||||||
TV2ArticleIE,
|
TV2ArticleIE,
|
||||||
KatsomoIE,
|
KatsomoIE,
|
||||||
|
MTVUutisetArticleIE,
|
||||||
)
|
)
|
||||||
from .tv2dk import (
|
from .tv2dk import (
|
||||||
TV2DKIE,
|
TV2DKIE,
|
||||||
@@ -1218,6 +1321,10 @@ from .tv2dk import (
|
|||||||
from .tv2hu import TV2HuIE
|
from .tv2hu import TV2HuIE
|
||||||
from .tv4 import TV4IE
|
from .tv4 import TV4IE
|
||||||
from .tv5mondeplus import TV5MondePlusIE
|
from .tv5mondeplus import TV5MondePlusIE
|
||||||
|
from .tv5unis import (
|
||||||
|
TV5UnisVideoIE,
|
||||||
|
TV5UnisIE,
|
||||||
|
)
|
||||||
from .tva import (
|
from .tva import (
|
||||||
TVAIE,
|
TVAIE,
|
||||||
QubIE,
|
QubIE,
|
||||||
@@ -1344,7 +1451,6 @@ from .vidme import (
|
|||||||
VidmeUserIE,
|
VidmeUserIE,
|
||||||
VidmeUserLikesIE,
|
VidmeUserLikesIE,
|
||||||
)
|
)
|
||||||
from .vidzi import VidziIE
|
|
||||||
from .vier import VierIE, VierVideosIE
|
from .vier import VierIE, VierVideosIE
|
||||||
from .viewlift import (
|
from .viewlift import (
|
||||||
ViewLiftIE,
|
ViewLiftIE,
|
||||||
@@ -1404,10 +1510,14 @@ from .vrv import (
|
|||||||
VRVSeriesIE,
|
VRVSeriesIE,
|
||||||
)
|
)
|
||||||
from .vshare import VShareIE
|
from .vshare import VShareIE
|
||||||
|
from .vtm import VTMIE
|
||||||
from .medialaan import MedialaanIE
|
from .medialaan import MedialaanIE
|
||||||
from .vube import VubeIE
|
from .vube import VubeIE
|
||||||
from .vuclip import VuClipIE
|
from .vuclip import VuClipIE
|
||||||
from .vvvvid import VVVVIDIE
|
from .vvvvid import (
|
||||||
|
VVVVIDIE,
|
||||||
|
VVVVIDShowIE,
|
||||||
|
)
|
||||||
from .vyborymos import VyboryMosIE
|
from .vyborymos import VyboryMosIE
|
||||||
from .vzaar import VzaarIE
|
from .vzaar import VzaarIE
|
||||||
from .wakanim import WakanimIE
|
from .wakanim import WakanimIE
|
||||||
@@ -1438,7 +1548,10 @@ from .weibo import (
|
|||||||
WeiboMobileIE
|
WeiboMobileIE
|
||||||
)
|
)
|
||||||
from .weiqitv import WeiqiTVIE
|
from .weiqitv import WeiqiTVIE
|
||||||
from .wistia import WistiaIE
|
from .wistia import (
|
||||||
|
WistiaIE,
|
||||||
|
WistiaPlaylistIE,
|
||||||
|
)
|
||||||
from .worldstarhiphop import WorldStarHipHopIE
|
from .worldstarhiphop import WorldStarHipHopIE
|
||||||
from .wsj import (
|
from .wsj import (
|
||||||
WSJIE,
|
WSJIE,
|
||||||
@@ -1512,7 +1625,7 @@ from .youtube import (
|
|||||||
YoutubeRecommendedIE,
|
YoutubeRecommendedIE,
|
||||||
YoutubeSearchDateIE,
|
YoutubeSearchDateIE,
|
||||||
YoutubeSearchIE,
|
YoutubeSearchIE,
|
||||||
#YoutubeSearchURLIE,
|
YoutubeSearchURLIE,
|
||||||
YoutubeSubscriptionsIE,
|
YoutubeSubscriptionsIE,
|
||||||
YoutubeTruncatedIDIE,
|
YoutubeTruncatedIDIE,
|
||||||
YoutubeTruncatedURLIE,
|
YoutubeTruncatedURLIE,
|
||||||
@@ -1521,7 +1634,6 @@ from .youtube import (
|
|||||||
YoutubeWatchLaterIE,
|
YoutubeWatchLaterIE,
|
||||||
)
|
)
|
||||||
from .zapiks import ZapiksIE
|
from .zapiks import ZapiksIE
|
||||||
from .zaq1 import Zaq1IE
|
|
||||||
from .zattoo import (
|
from .zattoo import (
|
||||||
BBVTVIE,
|
BBVTVIE,
|
||||||
EinsUndEinsTVIE,
|
EinsUndEinsTVIE,
|
||||||
@@ -1542,5 +1654,10 @@ from .zattoo import (
|
|||||||
ZattooLiveIE,
|
ZattooLiveIE,
|
||||||
)
|
)
|
||||||
from .zdf import ZDFIE, ZDFChannelIE
|
from .zdf import ZDFIE, ZDFChannelIE
|
||||||
from .zingmp3 import ZingMp3IE
|
from .zhihu import ZhihuIE
|
||||||
|
from .zingmp3 import (
|
||||||
|
ZingMp3IE,
|
||||||
|
ZingMp3AlbumIE,
|
||||||
|
)
|
||||||
|
from .zoom import ZoomIE
|
||||||
from .zype import ZypeIE
|
from .zype import ZypeIE
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import json
|
||||||
import re
|
import re
|
||||||
import socket
|
import socket
|
||||||
|
|
||||||
@@ -8,6 +9,7 @@ from .common import InfoExtractor
|
|||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_http_client,
|
compat_http_client,
|
||||||
|
compat_str,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urllib_parse_unquote_plus,
|
compat_urllib_parse_unquote_plus,
|
||||||
@@ -47,7 +49,8 @@ class FacebookIE(InfoExtractor):
|
|||||||
)\?(?:.*?)(?:v|video_id|story_fbid)=|
|
)\?(?:.*?)(?:v|video_id|story_fbid)=|
|
||||||
[^/]+/videos/(?:[^/]+/)?|
|
[^/]+/videos/(?:[^/]+/)?|
|
||||||
[^/]+/posts/|
|
[^/]+/posts/|
|
||||||
groups/[^/]+/permalink/
|
groups/[^/]+/permalink/|
|
||||||
|
watchparty/
|
||||||
)|
|
)|
|
||||||
facebook:
|
facebook:
|
||||||
)
|
)
|
||||||
@@ -280,8 +283,18 @@ class FacebookIE(InfoExtractor):
|
|||||||
# data.video.creation_story.attachments[].media
|
# data.video.creation_story.attachments[].media
|
||||||
'url': 'https://www.facebook.com/watch/live/?v=1823658634322275',
|
'url': 'https://www.facebook.com/watch/live/?v=1823658634322275',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.facebook.com/watchparty/211641140192478',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '211641140192478',
|
||||||
|
},
|
||||||
|
'playlist_count': 1,
|
||||||
|
'skip': 'Requires logging in',
|
||||||
}]
|
}]
|
||||||
_SUPPORTED_PAGLETS_REGEX = r'(?:pagelet_group_mall|permalink_video_pagelet|hyperfeed_story_id_[0-9a-f]+)'
|
_SUPPORTED_PAGLETS_REGEX = r'(?:pagelet_group_mall|permalink_video_pagelet|hyperfeed_story_id_[0-9a-f]+)'
|
||||||
|
_api_config = {
|
||||||
|
'graphURI': '/api/graphql/'
|
||||||
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _extract_urls(webpage):
|
def _extract_urls(webpage):
|
||||||
@@ -405,6 +418,17 @@ class FacebookIE(InfoExtractor):
|
|||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
def extract_relay_data(_filter):
|
||||||
|
return self._parse_json(self._search_regex(
|
||||||
|
r'handleWithCustomApplyEach\([^,]+,\s*({.*?%s.*?})\);' % _filter,
|
||||||
|
webpage, 'replay data', default='{}'), video_id, fatal=False) or {}
|
||||||
|
|
||||||
|
def extract_relay_prefetched_data(_filter):
|
||||||
|
replay_data = extract_relay_data(_filter)
|
||||||
|
for require in (replay_data.get('require') or []):
|
||||||
|
if require[0] == 'RelayPrefetchedStreamCache':
|
||||||
|
return try_get(require, lambda x: x[3][1]['__bbox']['result']['data'], dict) or {}
|
||||||
|
|
||||||
if not video_data:
|
if not video_data:
|
||||||
server_js_data = self._parse_json(self._search_regex([
|
server_js_data = self._parse_json(self._search_regex([
|
||||||
r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+' + self._SUPPORTED_PAGLETS_REGEX,
|
r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+' + self._SUPPORTED_PAGLETS_REGEX,
|
||||||
@@ -413,87 +437,83 @@ class FacebookIE(InfoExtractor):
|
|||||||
video_data = extract_from_jsmods_instances(server_js_data)
|
video_data = extract_from_jsmods_instances(server_js_data)
|
||||||
|
|
||||||
if not video_data:
|
if not video_data:
|
||||||
graphql_data = self._parse_json(self._search_regex(
|
data = extract_relay_prefetched_data(
|
||||||
r'handleWithCustomApplyEach\([^,]+,\s*({.*?"(?:dash_manifest|playable_url(?:_quality_hd)?)"\s*:\s*"[^"]+".*?})\);',
|
r'"(?:dash_manifest|playable_url(?:_quality_hd)?)"\s*:\s*"[^"]+"')
|
||||||
webpage, 'graphql data', default='{}'), video_id, fatal=False) or {}
|
if data:
|
||||||
for require in (graphql_data.get('require') or []):
|
entries = []
|
||||||
if require[0] == 'RelayPrefetchedStreamCache':
|
|
||||||
entries = []
|
|
||||||
|
|
||||||
def parse_graphql_video(video):
|
def parse_graphql_video(video):
|
||||||
formats = []
|
formats = []
|
||||||
q = qualities(['sd', 'hd'])
|
q = qualities(['sd', 'hd'])
|
||||||
for (suffix, format_id) in [('', 'sd'), ('_quality_hd', 'hd')]:
|
for (suffix, format_id) in [('', 'sd'), ('_quality_hd', 'hd')]:
|
||||||
playable_url = video.get('playable_url' + suffix)
|
playable_url = video.get('playable_url' + suffix)
|
||||||
if not playable_url:
|
if not playable_url:
|
||||||
continue
|
continue
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': format_id,
|
'format_id': format_id,
|
||||||
'quality': q(format_id),
|
'quality': q(format_id),
|
||||||
'url': playable_url,
|
'url': playable_url,
|
||||||
})
|
})
|
||||||
extract_dash_manifest(video, formats)
|
extract_dash_manifest(video, formats)
|
||||||
process_formats(formats)
|
process_formats(formats)
|
||||||
v_id = video.get('videoId') or video.get('id') or video_id
|
v_id = video.get('videoId') or video.get('id') or video_id
|
||||||
info = {
|
info = {
|
||||||
'id': v_id,
|
'id': v_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'thumbnail': try_get(video, lambda x: x['thumbnailImage']['uri']),
|
'thumbnail': try_get(video, lambda x: x['thumbnailImage']['uri']),
|
||||||
'uploader_id': try_get(video, lambda x: x['owner']['id']),
|
'uploader_id': try_get(video, lambda x: x['owner']['id']),
|
||||||
'timestamp': int_or_none(video.get('publish_time')),
|
'timestamp': int_or_none(video.get('publish_time')),
|
||||||
'duration': float_or_none(video.get('playable_duration_in_ms'), 1000),
|
'duration': float_or_none(video.get('playable_duration_in_ms'), 1000),
|
||||||
}
|
}
|
||||||
description = try_get(video, lambda x: x['savable_description']['text'])
|
description = try_get(video, lambda x: x['savable_description']['text'])
|
||||||
title = video.get('name')
|
title = video.get('name')
|
||||||
if title:
|
if title:
|
||||||
info.update({
|
info.update({
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
})
|
})
|
||||||
else:
|
else:
|
||||||
info['title'] = description or 'Facebook video #%s' % v_id
|
info['title'] = description or 'Facebook video #%s' % v_id
|
||||||
entries.append(info)
|
entries.append(info)
|
||||||
|
|
||||||
def parse_attachment(attachment, key='media'):
|
def parse_attachment(attachment, key='media'):
|
||||||
media = attachment.get(key) or {}
|
media = attachment.get(key) or {}
|
||||||
if media.get('__typename') == 'Video':
|
if media.get('__typename') == 'Video':
|
||||||
return parse_graphql_video(media)
|
return parse_graphql_video(media)
|
||||||
|
|
||||||
data = try_get(require, lambda x: x[3][1]['__bbox']['result']['data'], dict) or {}
|
nodes = data.get('nodes') or []
|
||||||
|
node = data.get('node') or {}
|
||||||
|
if not nodes and node:
|
||||||
|
nodes.append(node)
|
||||||
|
for node in nodes:
|
||||||
|
story = try_get(node, lambda x: x['comet_sections']['content']['story'], dict) or {}
|
||||||
|
attachments = try_get(story, [
|
||||||
|
lambda x: x['attached_story']['attachments'],
|
||||||
|
lambda x: x['attachments']
|
||||||
|
], list) or []
|
||||||
|
for attachment in attachments:
|
||||||
|
attachment = try_get(attachment, lambda x: x['style_type_renderer']['attachment'], dict)
|
||||||
|
ns = try_get(attachment, lambda x: x['all_subattachments']['nodes'], list) or []
|
||||||
|
for n in ns:
|
||||||
|
parse_attachment(n)
|
||||||
|
parse_attachment(attachment)
|
||||||
|
|
||||||
nodes = data.get('nodes') or []
|
edges = try_get(data, lambda x: x['mediaset']['currMedia']['edges'], list) or []
|
||||||
node = data.get('node') or {}
|
for edge in edges:
|
||||||
if not nodes and node:
|
parse_attachment(edge, key='node')
|
||||||
nodes.append(node)
|
|
||||||
for node in nodes:
|
|
||||||
story = try_get(node, lambda x: x['comet_sections']['content']['story'], dict) or {}
|
|
||||||
attachments = try_get(story, [
|
|
||||||
lambda x: x['attached_story']['attachments'],
|
|
||||||
lambda x: x['attachments']
|
|
||||||
], list) or []
|
|
||||||
for attachment in attachments:
|
|
||||||
attachment = try_get(attachment, lambda x: x['style_type_renderer']['attachment'], dict)
|
|
||||||
ns = try_get(attachment, lambda x: x['all_subattachments']['nodes'], list) or []
|
|
||||||
for n in ns:
|
|
||||||
parse_attachment(n)
|
|
||||||
parse_attachment(attachment)
|
|
||||||
|
|
||||||
edges = try_get(data, lambda x: x['mediaset']['currMedia']['edges'], list) or []
|
video = data.get('video') or {}
|
||||||
for edge in edges:
|
if video:
|
||||||
parse_attachment(edge, key='node')
|
attachments = try_get(video, [
|
||||||
|
lambda x: x['story']['attachments'],
|
||||||
|
lambda x: x['creation_story']['attachments']
|
||||||
|
], list) or []
|
||||||
|
for attachment in attachments:
|
||||||
|
parse_attachment(attachment)
|
||||||
|
if not entries:
|
||||||
|
parse_graphql_video(video)
|
||||||
|
|
||||||
video = data.get('video') or {}
|
return self.playlist_result(entries, video_id)
|
||||||
if video:
|
|
||||||
attachments = try_get(video, [
|
|
||||||
lambda x: x['story']['attachments'],
|
|
||||||
lambda x: x['creation_story']['attachments']
|
|
||||||
], list) or []
|
|
||||||
for attachment in attachments:
|
|
||||||
parse_attachment(attachment)
|
|
||||||
if not entries:
|
|
||||||
parse_graphql_video(video)
|
|
||||||
|
|
||||||
return self.playlist_result(entries, video_id)
|
|
||||||
|
|
||||||
if not video_data:
|
if not video_data:
|
||||||
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
|
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
|
||||||
@@ -501,9 +521,49 @@ class FacebookIE(InfoExtractor):
|
|||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
|
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
|
||||||
expected=True)
|
expected=True)
|
||||||
elif '>You must log in to continue' in webpage:
|
elif any(p in webpage for p in (
|
||||||
|
'>You must log in to continue',
|
||||||
|
'id="login_form"',
|
||||||
|
'id="loginbutton"')):
|
||||||
self.raise_login_required()
|
self.raise_login_required()
|
||||||
|
|
||||||
|
if not video_data and '/watchparty/' in url:
|
||||||
|
post_data = {
|
||||||
|
'doc_id': 3731964053542869,
|
||||||
|
'variables': json.dumps({
|
||||||
|
'livingRoomID': video_id,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
|
||||||
|
prefetched_data = extract_relay_prefetched_data(r'"login_data"\s*:\s*{')
|
||||||
|
if prefetched_data:
|
||||||
|
lsd = try_get(prefetched_data, lambda x: x['login_data']['lsd'], dict)
|
||||||
|
if lsd:
|
||||||
|
post_data[lsd['name']] = lsd['value']
|
||||||
|
|
||||||
|
relay_data = extract_relay_data(r'\[\s*"RelayAPIConfigDefaults"\s*,')
|
||||||
|
for define in (relay_data.get('define') or []):
|
||||||
|
if define[0] == 'RelayAPIConfigDefaults':
|
||||||
|
self._api_config = define[2]
|
||||||
|
|
||||||
|
living_room = self._download_json(
|
||||||
|
urljoin(url, self._api_config['graphURI']), video_id,
|
||||||
|
data=urlencode_postdata(post_data))['data']['living_room']
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
for edge in (try_get(living_room, lambda x: x['recap']['watched_content']['edges']) or []):
|
||||||
|
video = try_get(edge, lambda x: x['node']['video']) or {}
|
||||||
|
v_id = video.get('id')
|
||||||
|
if not v_id:
|
||||||
|
continue
|
||||||
|
v_id = compat_str(v_id)
|
||||||
|
entries.append(self.url_result(
|
||||||
|
self._VIDEO_PAGE_TEMPLATE % v_id,
|
||||||
|
self.ie_key(), v_id, video.get('name')))
|
||||||
|
|
||||||
|
return self.playlist_result(entries, video_id)
|
||||||
|
|
||||||
|
if not video_data:
|
||||||
# Video info not in first request, do a secondary request using
|
# Video info not in first request, do a secondary request using
|
||||||
# tahoe player specific URL
|
# tahoe player specific URL
|
||||||
tahoe_data = self._download_webpage(
|
tahoe_data = self._download_webpage(
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user