mirror of
https://github.com/ytdl-org/youtube-dl
synced 2025-10-25 09:38:38 +09:00
Compare commits
3681 Commits
2015.06.25
...
2016.05.30
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
abbb2938fa | ||
|
|
f657b1a5f2 | ||
|
|
86a52881c6 | ||
|
|
8267423652 | ||
|
|
917a3196f8 | ||
|
|
56bd028a0f | ||
|
|
681b923b5c | ||
|
|
9ed6d8c6c5 | ||
|
|
f3fb420b82 | ||
|
|
165e3561e9 | ||
|
|
27f17c0eab | ||
|
|
44c8892369 | ||
|
|
f574103d7c | ||
|
|
6d138e98e3 | ||
|
|
2a329110b9 | ||
|
|
2bee7b25f3 | ||
|
|
92cf872a48 | ||
|
|
6461f2b7ec | ||
|
|
807cf7b07f | ||
|
|
de7d76af52 | ||
|
|
11c70deba7 | ||
|
|
f36532404d | ||
|
|
77b8b4e696 | ||
|
|
2615fa7584 | ||
|
|
fac2af3c51 | ||
|
|
6f8cb24219 | ||
|
|
448bb5f333 | ||
|
|
293c255688 | ||
|
|
ac88d2316e | ||
|
|
5950cb1d6d | ||
|
|
761052db92 | ||
|
|
240b60453e | ||
|
|
85b0fe7d64 | ||
|
|
0a5685b26f | ||
|
|
6f748df43f | ||
|
|
b410cb83d4 | ||
|
|
da9d82840a | ||
|
|
4ee0b8afdb | ||
|
|
1de32771e1 | ||
|
|
688c634b7d | ||
|
|
0d6ee97508 | ||
|
|
6b43132ce9 | ||
|
|
a4690b3244 | ||
|
|
444417edb5 | ||
|
|
277c7465f5 | ||
|
|
25bcd3550e | ||
|
|
a4760d204f | ||
|
|
e8593f346a | ||
|
|
05b651e3a5 | ||
|
|
42a7439717 | ||
|
|
b1e9ebd080 | ||
|
|
0c50eeb987 | ||
|
|
4b464a6a78 | ||
|
|
5db9df622f | ||
|
|
5181759c0d | ||
|
|
e54373204a | ||
|
|
102810ef04 | ||
|
|
78d3b3e213 | ||
|
|
7a46542f97 | ||
|
|
eb7941e3e6 | ||
|
|
db3b8b2103 | ||
|
|
c5f5155100 | ||
|
|
4a12077855 | ||
|
|
a4a7c44bd3 | ||
|
|
70346165fe | ||
|
|
c776b99691 | ||
|
|
e9297256d4 | ||
|
|
e5871c672b | ||
|
|
9b06b0fb92 | ||
|
|
4f3a25c2b4 | ||
|
|
21a19aa94d | ||
|
|
c6b9cf05e1 | ||
|
|
4d8819d249 | ||
|
|
898f4b49cc | ||
|
|
0150a00f33 | ||
|
|
c8831015f4 | ||
|
|
92d221ad48 | ||
|
|
0db9a05f88 | ||
|
|
e03b35b8f9 | ||
|
|
d2fee3c99e | ||
|
|
598869afb1 | ||
|
|
7e642e4fd6 | ||
|
|
c8cc3745fb | ||
|
|
4c718d3c50 | ||
|
|
115c65793a | ||
|
|
661d46b28f | ||
|
|
5ce3d5bd1b | ||
|
|
612b5f403e | ||
|
|
9f54e692d2 | ||
|
|
7b2fcbfd4e | ||
|
|
16da9bbc29 | ||
|
|
c8602b2f9b | ||
|
|
b219f5e51b | ||
|
|
1846e9ade0 | ||
|
|
6756602be6 | ||
|
|
6c114b1210 | ||
|
|
7ded6545ed | ||
|
|
aa5957ac49 | ||
|
|
64413f7563 | ||
|
|
45f160a43c | ||
|
|
36ca2c55db | ||
|
|
f0c96af9cb | ||
|
|
31a70191e7 | ||
|
|
ad96b4c8f5 | ||
|
|
043dc9d36f | ||
|
|
52f7c75cff | ||
|
|
f6e588afc0 | ||
|
|
a001296703 | ||
|
|
2cbd8c6781 | ||
|
|
8585dc4cdc | ||
|
|
dd81769c62 | ||
|
|
46bc9b7d7c | ||
|
|
b78531a36a | ||
|
|
11e6a0b641 | ||
|
|
15cda1ef77 | ||
|
|
055f0d3d06 | ||
|
|
cdd94c2eae | ||
|
|
36755d9d69 | ||
|
|
f7199423e5 | ||
|
|
a0a81918f1 | ||
|
|
5572d598a5 | ||
|
|
cec9727c7f | ||
|
|
79298173c5 | ||
|
|
69c9cc2716 | ||
|
|
ed56f26039 | ||
|
|
6f41b2bcf1 | ||
|
|
cda6d47aad | ||
|
|
5d39176f6d | ||
|
|
5c86bfe70f | ||
|
|
364cf465dd | ||
|
|
ca950f49e9 | ||
|
|
89ac4a19e6 | ||
|
|
640eea0a0c | ||
|
|
bd1e484448 | ||
|
|
a834622b89 | ||
|
|
707bb426b1 | ||
|
|
66e7ace17a | ||
|
|
791ff52f75 | ||
|
|
98d560f205 | ||
|
|
afcc317800 | ||
|
|
b5abf86148 | ||
|
|
134c6ea856 | ||
|
|
0730be9022 | ||
|
|
96c2e3e909 | ||
|
|
f196508f7b | ||
|
|
cc1028aa6d | ||
|
|
ad55e10165 | ||
|
|
18cf6381f6 | ||
|
|
cdf32ff15d | ||
|
|
99d79b8692 | ||
|
|
b9e7bc55da | ||
|
|
d8d540cf0d | ||
|
|
0df79d552a | ||
|
|
0db3a66162 | ||
|
|
7581bfc958 | ||
|
|
f388f616c1 | ||
|
|
a3fa6024d6 | ||
|
|
1b405bb47d | ||
|
|
7e8ddca1bb | ||
|
|
778a1ccca7 | ||
|
|
4540515cb3 | ||
|
|
e0741fd449 | ||
|
|
e73b9c65e2 | ||
|
|
702ccf2dc0 | ||
|
|
28b4f73620 | ||
|
|
c2876afafe | ||
|
|
6ddb4888d2 | ||
|
|
fa5cb8d021 | ||
|
|
e21f17fc86 | ||
|
|
edaa23f822 | ||
|
|
d5ae6bb501 | ||
|
|
51fb4995a5 | ||
|
|
9e9cd7248d | ||
|
|
72f3289ac4 | ||
|
|
71aff18809 | ||
|
|
dab0daeeb0 | ||
|
|
4350b74545 | ||
|
|
2937590e8b | ||
|
|
fad7bbec3a | ||
|
|
e62d9c5caa | ||
|
|
20cfdcc910 | ||
|
|
1292638754 | ||
|
|
fe40f9eef2 | ||
|
|
6104cc2985 | ||
|
|
c15c47d19b | ||
|
|
965fefdcd8 | ||
|
|
3951e7eb93 | ||
|
|
f1f6f5aa5e | ||
|
|
eb785b856f | ||
|
|
c52f4efaee | ||
|
|
f23a92a0ce | ||
|
|
3b01a9fbb6 | ||
|
|
9c072d38c6 | ||
|
|
3e169233da | ||
|
|
f5436c5d9e | ||
|
|
5c24873a9e | ||
|
|
00c21c225d | ||
|
|
d013b26719 | ||
|
|
e2eca6f65e | ||
|
|
a0904c5d80 | ||
|
|
cb1fa58813 | ||
|
|
3fd6332c05 | ||
|
|
401d147893 | ||
|
|
e2ee97dcd5 | ||
|
|
f745403b5b | ||
|
|
3e80e6f40d | ||
|
|
25cb7a0eeb | ||
|
|
abc97b5eda | ||
|
|
04e88ca2ca | ||
|
|
6f59aa934b | ||
|
|
109db8ea64 | ||
|
|
915620fd68 | ||
|
|
ac12e888f9 | ||
|
|
b1c6a5bac8 | ||
|
|
7d08f6073d | ||
|
|
758a059241 | ||
|
|
4f8c56eb4e | ||
|
|
9da526aae7 | ||
|
|
75b81df3af | ||
|
|
aabdc83d6e | ||
|
|
2a48e6f01a | ||
|
|
203a3c0e6a | ||
|
|
d36724cca4 | ||
|
|
15fc0658f7 | ||
|
|
e960c3c223 | ||
|
|
bc7e77a04b | ||
|
|
964f49336f | ||
|
|
57d8e32a3e | ||
|
|
4174552391 | ||
|
|
80bc4106af | ||
|
|
7759be38da | ||
|
|
a0a309b973 | ||
|
|
c587cbb793 | ||
|
|
6c52a86f54 | ||
|
|
8a92e51c60 | ||
|
|
f0e14fdd43 | ||
|
|
df5f4e8888 | ||
|
|
7960b0563b | ||
|
|
5c9ced9504 | ||
|
|
31c4448f6e | ||
|
|
79a2e94e79 | ||
|
|
686cc89634 | ||
|
|
9508738f9a | ||
|
|
78a3ff33ab | ||
|
|
881dbc86c4 | ||
|
|
8e7d004888 | ||
|
|
9618c44824 | ||
|
|
516ea41a7d | ||
|
|
e2bd301ce7 | ||
|
|
0c9d288ba0 | ||
|
|
e0da32df6e | ||
|
|
174aba3223 | ||
|
|
0d66bd0eab | ||
|
|
4bd143a3a0 | ||
|
|
6f27bf1c74 | ||
|
|
68bb2fef95 | ||
|
|
854cc54bc1 | ||
|
|
651ad35ce0 | ||
|
|
6a0f9a24d0 | ||
|
|
9cf79e8f4b | ||
|
|
2844b09336 | ||
|
|
1a2b377cc2 | ||
|
|
4c1b2e5c0e | ||
|
|
9e1b96ae40 | ||
|
|
fc35cd9e0c | ||
|
|
339fe7228a | ||
|
|
ea7e7fecbd | ||
|
|
d00b93d58c | ||
|
|
93f7a31bf3 | ||
|
|
33a1ec950c | ||
|
|
4e0c0c1508 | ||
|
|
89c0dc9a5f | ||
|
|
f628d800fb | ||
|
|
11fa3d7f99 | ||
|
|
d41ee7b774 | ||
|
|
e0e9bbb0e9 | ||
|
|
7691184a31 | ||
|
|
35cd2f4c25 | ||
|
|
350d7963db | ||
|
|
cbc032c8b7 | ||
|
|
69c4cde4ba | ||
|
|
ca278a182b | ||
|
|
373e1230e4 | ||
|
|
cd63d091ce | ||
|
|
0571ffda7d | ||
|
|
5556047465 | ||
|
|
65a3bfb379 | ||
|
|
cef3f3011f | ||
|
|
e9c6cdf4a1 | ||
|
|
00a17a9e12 | ||
|
|
8312b1a3d1 | ||
|
|
6ff4469528 | ||
|
|
68835d687a | ||
|
|
9d186afac8 | ||
|
|
151d98130b | ||
|
|
b24d6336a7 | ||
|
|
065216d94f | ||
|
|
67167920db | ||
|
|
14638e2915 | ||
|
|
1910077ed7 | ||
|
|
5819edef03 | ||
|
|
f5535ed0e3 | ||
|
|
31ff3c074e | ||
|
|
72670c39de | ||
|
|
683d892bf9 | ||
|
|
497971cd4a | ||
|
|
e757fb3d05 | ||
|
|
0ba9e3ca22 | ||
|
|
4b53762914 | ||
|
|
eebe6b382e | ||
|
|
0cbcbdd89d | ||
|
|
7f776fa4b5 | ||
|
|
eb5ad31ce1 | ||
|
|
a5941305b6 | ||
|
|
f8dddaf456 | ||
|
|
618c71dc64 | ||
|
|
52af8f222b | ||
|
|
3cc8649c9d | ||
|
|
dcf094d626 | ||
|
|
5b5d7cc11e | ||
|
|
2ac2cbc0a3 | ||
|
|
a7e03861e8 | ||
|
|
046ea04a7d | ||
|
|
7464360379 | ||
|
|
175c2e9ec3 | ||
|
|
f1f879098a | ||
|
|
c9fd530670 | ||
|
|
749b0046a8 | ||
|
|
e3de3d6f2f | ||
|
|
ad58942d57 | ||
|
|
4645432d7a | ||
|
|
6bdc2d5358 | ||
|
|
2beff95da5 | ||
|
|
abc1723edd | ||
|
|
b248e6485b | ||
|
|
d6712378e7 | ||
|
|
fb72ec58ae | ||
|
|
c83a352227 | ||
|
|
e9063b5de9 | ||
|
|
594b0c4c69 | ||
|
|
eb9ee19422 | ||
|
|
a1394b820d | ||
|
|
aa9dc24f5a | ||
|
|
51762e1a31 | ||
|
|
8b38f2ac40 | ||
|
|
a82398bd72 | ||
|
|
c14dc00df3 | ||
|
|
03dd60ca41 | ||
|
|
0738187f9b | ||
|
|
a956cb6306 | ||
|
|
a8062eabcd | ||
|
|
2a7dee8cc5 | ||
|
|
d9ed362116 | ||
|
|
4f54958097 | ||
|
|
2a7c38831c | ||
|
|
949b6497cc | ||
|
|
2c21152ca7 | ||
|
|
fda9a1ca9e | ||
|
|
864d5e7231 | ||
|
|
5448b781f6 | ||
|
|
e239413fbc | ||
|
|
fd0ff8bad8 | ||
|
|
397ec446f3 | ||
|
|
29a7e8f6f8 | ||
|
|
eb01e97e10 | ||
|
|
cb7d4d0efd | ||
|
|
c80037918b | ||
|
|
237a41108a | ||
|
|
e962ae15d3 | ||
|
|
7c36ea7d54 | ||
|
|
9260cf1d97 | ||
|
|
bdbb8530c7 | ||
|
|
09a9fadb84 | ||
|
|
bf09af3acb | ||
|
|
88296ac326 | ||
|
|
870d525848 | ||
|
|
6577112890 | ||
|
|
1988647dda | ||
|
|
a292cba256 | ||
|
|
982e518a96 | ||
|
|
748e730099 | ||
|
|
b6c0d4f431 | ||
|
|
acaff49575 | ||
|
|
1da19488f9 | ||
|
|
442c4d361f | ||
|
|
ec59d657e7 | ||
|
|
99ef96f84c | ||
|
|
4dccea8ad0 | ||
|
|
2c0d9c6217 | ||
|
|
12a5134596 | ||
|
|
16e633a5d7 | ||
|
|
494ab6db73 | ||
|
|
107701fcfc | ||
|
|
f77970765a | ||
|
|
81215d5652 | ||
|
|
241a318f27 | ||
|
|
4fdf082375 | ||
|
|
1b6182d8f7 | ||
|
|
7bab22a402 | ||
|
|
0f97fb4d00 | ||
|
|
b1cf58f48f | ||
|
|
3014b0ae83 | ||
|
|
b9f2fdd37f | ||
|
|
bbb3f730bb | ||
|
|
d868f43c58 | ||
|
|
21525bb8ca | ||
|
|
d8f103159f | ||
|
|
663ee5f0a9 | ||
|
|
b6b950bf58 | ||
|
|
11e60fcad8 | ||
|
|
c23533a100 | ||
|
|
0dafea02e6 | ||
|
|
5d6360c3b7 | ||
|
|
5e5c30c3fd | ||
|
|
9154c87fc4 | ||
|
|
ef0e4e7bc0 | ||
|
|
67d46a3f90 | ||
|
|
bec47a0748 | ||
|
|
36b7d9dbfa | ||
|
|
8c65e4a527 | ||
|
|
6ad2ef8b7c | ||
|
|
00b426d66d | ||
|
|
0de968b584 | ||
|
|
0841d5013c | ||
|
|
a71fca8577 | ||
|
|
ee94e7e66d | ||
|
|
759e37c9e6 | ||
|
|
ae65567102 | ||
|
|
c394b4f4cb | ||
|
|
260c7036ba | ||
|
|
f74197a074 | ||
|
|
f3a58d46bf | ||
|
|
b6612c9b11 | ||
|
|
7e176effb2 | ||
|
|
4a252cc2d2 | ||
|
|
f0ec61b525 | ||
|
|
66d40ae3a5 | ||
|
|
e6da9240d4 | ||
|
|
dd91dfcd67 | ||
|
|
c773082692 | ||
|
|
9c250931f5 | ||
|
|
56f1750049 | ||
|
|
f2159c9815 | ||
|
|
b0cf2e7c1b | ||
|
|
74b47d00c3 | ||
|
|
8cb57bab8e | ||
|
|
e1bf277e19 | ||
|
|
ce599d5a7e | ||
|
|
9e28538726 | ||
|
|
404284132c | ||
|
|
5565be9dd9 | ||
|
|
b3a9474ad1 | ||
|
|
86475d59b1 | ||
|
|
73d93f948e | ||
|
|
f5d8743e0a | ||
|
|
d1c4e4ba15 | ||
|
|
f141fefab7 | ||
|
|
8334637f4a | ||
|
|
b0ba11cc64 | ||
|
|
b8f67449ec | ||
|
|
75af5d59ae | ||
|
|
b969d12490 | ||
|
|
6d67169509 | ||
|
|
dcaf00fb3e | ||
|
|
f896e1ccef | ||
|
|
c96eca426b | ||
|
|
466a614537 | ||
|
|
ffa2cecf72 | ||
|
|
a837416025 | ||
|
|
c9d448876f | ||
|
|
8865b8abfd | ||
|
|
c77a0c01cb | ||
|
|
12355ac473 | ||
|
|
49f523ca50 | ||
|
|
4a903b93a9 | ||
|
|
13267a2be3 | ||
|
|
134c207e3f | ||
|
|
0f56bd2178 | ||
|
|
dfbc7f7f3f | ||
|
|
7d58ea7c5b | ||
|
|
452908b257 | ||
|
|
5899e988d5 | ||
|
|
4a121d29bb | ||
|
|
7ebc36900d | ||
|
|
d7eb052fa2 | ||
|
|
a6d6722c8f | ||
|
|
66fa495868 | ||
|
|
443285aabe | ||
|
|
de728757ad | ||
|
|
f44c276842 | ||
|
|
a1fa60a934 | ||
|
|
49caf3307f | ||
|
|
6a801f4470 | ||
|
|
61dd350a04 | ||
|
|
eb9c3edd5e | ||
|
|
95153a960d | ||
|
|
6c4c7539f2 | ||
|
|
c991106706 | ||
|
|
dae2a058de | ||
|
|
c05025fdd7 | ||
|
|
bfe96d7bea | ||
|
|
ab481b48e5 | ||
|
|
92c7f3157a | ||
|
|
cacd996662 | ||
|
|
bffb245a48 | ||
|
|
680efb6723 | ||
|
|
5a9858bfa9 | ||
|
|
8a5dc1c1e1 | ||
|
|
e0986e31cf | ||
|
|
6b97ca96fc | ||
|
|
c1ce6acdd7 | ||
|
|
0d778b1db9 | ||
|
|
779822d945 | ||
|
|
1b3d5e05a8 | ||
|
|
e52d7f85f2 | ||
|
|
568d2f78d6 | ||
|
|
2f2fcf1a33 | ||
|
|
bacec0397f | ||
|
|
3c6c7e7d7e | ||
|
|
fb38aa8b53 | ||
|
|
18da24634c | ||
|
|
a134426d61 | ||
|
|
a64c0c9b06 | ||
|
|
56019444cb | ||
|
|
a1ff3cd5f9 | ||
|
|
9a32e80477 | ||
|
|
536a55dabd | ||
|
|
ed6fb8b804 | ||
|
|
3afef2e3fc | ||
|
|
e90d175436 | ||
|
|
7a93ab5f3f | ||
|
|
c41cf65d4a | ||
|
|
ec4a4c6fcc | ||
|
|
be0c7009fb | ||
|
|
92d5477d84 | ||
|
|
8790249c68 | ||
|
|
416930d450 | ||
|
|
65150b41bb | ||
|
|
e42f413716 | ||
|
|
40a056d85d | ||
|
|
e7d77efb9d | ||
|
|
995cf05c96 | ||
|
|
5bf28d7864 | ||
|
|
8c7d6e8e22 | ||
|
|
6d4fc66bfc | ||
|
|
23576edbfc | ||
|
|
4d4cd35f48 | ||
|
|
3aac9b2fb1 | ||
|
|
e47d19e991 | ||
|
|
41f5492fbc | ||
|
|
2defa7d75a | ||
|
|
bbc26c8a01 | ||
|
|
b507cc925b | ||
|
|
db8ee7ec05 | ||
|
|
08136dc138 | ||
|
|
fe7ef95e91 | ||
|
|
5f705baf5e | ||
|
|
0750b2491f | ||
|
|
df634be2ed | ||
|
|
6d628fafca | ||
|
|
0f28777f58 | ||
|
|
329c1eae54 | ||
|
|
9aaaf8e8e8 | ||
|
|
04819db58e | ||
|
|
79ba9140dc | ||
|
|
75d572e9fb | ||
|
|
791d6aaecc | ||
|
|
81de73e5b4 | ||
|
|
83cedc1cf2 | ||
|
|
244cd04237 | ||
|
|
fbdaced256 | ||
|
|
a3373823e1 | ||
|
|
03caa463e7 | ||
|
|
3f64379eda | ||
|
|
3e0c3d14d9 | ||
|
|
d8873d4def | ||
|
|
db1c969da5 | ||
|
|
1e02bc7ba2 | ||
|
|
63c55e9f22 | ||
|
|
f9b1529af8 | ||
|
|
961fc024d2 | ||
|
|
b53a06e3b9 | ||
|
|
4ecc1fc638 | ||
|
|
5b012dfce8 | ||
|
|
8369942773 | ||
|
|
86f3b66cec | ||
|
|
6bb4600717 | ||
|
|
41d06b0424 | ||
|
|
15d260ebaa | ||
|
|
ed0291d153 | ||
|
|
81da8cbc45 | ||
|
|
5299bc3f91 | ||
|
|
c9c39c22c5 | ||
|
|
d84b48e3f1 | ||
|
|
dd17041c82 | ||
|
|
fea7295b14 | ||
|
|
9cf01f7f30 | ||
|
|
ce548296fe | ||
|
|
c02ec7d430 | ||
|
|
6b820a2376 | ||
|
|
e621a344e6 | ||
|
|
3ae6f8fec1 | ||
|
|
597d52fadb | ||
|
|
afca767d19 | ||
|
|
6e359a1534 | ||
|
|
607619bc90 | ||
|
|
0b7bfc9422 | ||
|
|
7168a6c874 | ||
|
|
034947dd1e | ||
|
|
3c0de33ad7 | ||
|
|
89924f8230 | ||
|
|
a39c68f7e5 | ||
|
|
4a5a67ca25 | ||
|
|
8751da85a7 | ||
|
|
3bf1df51fd | ||
|
|
3842a3e652 | ||
|
|
7710bdf4e8 | ||
|
|
8d9dd3c34b | ||
|
|
33f3040a3e | ||
|
|
03442072c0 | ||
|
|
c8b13fec02 | ||
|
|
87d105ac6c | ||
|
|
3454139576 | ||
|
|
3a23bae9cc | ||
|
|
8f9a477e7f | ||
|
|
a1cf3e38a3 | ||
|
|
a122e7080b | ||
|
|
b22ca76204 | ||
|
|
f7df343b4a | ||
|
|
19dbaeece3 | ||
|
|
395fd4b08a | ||
|
|
8018028d0f | ||
|
|
00322ad4fd | ||
|
|
4cf3489c6e | ||
|
|
b24ab3e341 | ||
|
|
af4116f4f0 | ||
|
|
f973e5d54e | ||
|
|
62f55aa68a | ||
|
|
02d7634d24 | ||
|
|
48dce58ca9 | ||
|
|
efcba804f6 | ||
|
|
6dee688e6d | ||
|
|
eedb7ba536 | ||
|
|
dcf77cf1a7 | ||
|
|
17bcc626bf | ||
|
|
b5a5bbf376 | ||
|
|
e68d3a010f | ||
|
|
d10fe8358c | ||
|
|
d6c340cae5 | ||
|
|
5964b598ff | ||
|
|
62cdb96f51 | ||
|
|
e289d6d62c | ||
|
|
6e6bc8dae5 | ||
|
|
15707c7e02 | ||
|
|
2156f16ca7 | ||
|
|
4db441de72 | ||
|
|
0be8314dc8 | ||
|
|
d7f62b049a | ||
|
|
3bb3356812 | ||
|
|
3f15fec1d1 | ||
|
|
98e68806fb | ||
|
|
e031768666 | ||
|
|
5eb7db4ee9 | ||
|
|
f0e83681d9 | ||
|
|
ff9d5d0938 | ||
|
|
d041a73674 | ||
|
|
f07e276a04 | ||
|
|
993271da0a | ||
|
|
369e7e3ff0 | ||
|
|
5767b4eeae | ||
|
|
622d19160b | ||
|
|
32d88410eb | ||
|
|
5a51775a58 | ||
|
|
87696e78d7 | ||
|
|
c4096e8aea | ||
|
|
fc27ea9464 | ||
|
|
088e1aac59 | ||
|
|
81f36eba88 | ||
|
|
2d60465e44 | ||
|
|
4333d56494 | ||
|
|
882c699296 | ||
|
|
efbed08dc2 | ||
|
|
7da2c87119 | ||
|
|
c6ca11f1b3 | ||
|
|
2beeb286e1 | ||
|
|
cc7397b04d | ||
|
|
bc5d16b302 | ||
|
|
85c637b737 | ||
|
|
5c69f7a479 | ||
|
|
ff5873b72d | ||
|
|
065c4b27bf | ||
|
|
1600ed1ff9 | ||
|
|
5886b38d73 | ||
|
|
0cef27ad25 | ||
|
|
12af4beb3e | ||
|
|
9016d76f71 | ||
|
|
3c5d183c19 | ||
|
|
3e8bb9a972 | ||
|
|
daef04a4e7 | ||
|
|
7caae128a7 | ||
|
|
2648918c81 | ||
|
|
920d318d3c | ||
|
|
9e3c2f1d74 | ||
|
|
2bfeee69b9 | ||
|
|
664bcd80b9 | ||
|
|
3c20208eff | ||
|
|
db264e3cc3 | ||
|
|
d396f30467 | ||
|
|
96a9f22d98 | ||
|
|
40025ee2a3 | ||
|
|
3ff63fb365 | ||
|
|
5c7cd37ebd | ||
|
|
298c04b464 | ||
|
|
d95114dd83 | ||
|
|
94dcade8f8 | ||
|
|
fa023ccb2c | ||
|
|
e36f4aa72b | ||
|
|
9261e347cc | ||
|
|
f1ced6df51 | ||
|
|
8b0d7a66ef | ||
|
|
3aec71766d | ||
|
|
16a8b7986b | ||
|
|
617e58d850 | ||
|
|
e33baba0dd | ||
|
|
721f26b821 | ||
|
|
52bb437e41 | ||
|
|
782b1b5bd1 | ||
|
|
0d769bcb78 | ||
|
|
4cd70099ea | ||
|
|
09fc33198a | ||
|
|
4c3b16d5d1 | ||
|
|
d5aacf9a90 | ||
|
|
19e2617a6f | ||
|
|
edd9b71c2c | ||
|
|
5940862d5a | ||
|
|
de6c51e88e | ||
|
|
303dcdb995 | ||
|
|
20938f768b | ||
|
|
955737b2d4 | ||
|
|
263eff9537 | ||
|
|
cae21032ab | ||
|
|
6187091532 | ||
|
|
0d33166ec5 | ||
|
|
87c03c6bd2 | ||
|
|
4c92fd2e83 | ||
|
|
e3d17b3c07 | ||
|
|
810c10baa1 | ||
|
|
57f7e3c62d | ||
|
|
0d0e282912 | ||
|
|
85e8f26b82 | ||
|
|
b57fecfddd | ||
|
|
8c97e7efb6 | ||
|
|
cc162f6a0a | ||
|
|
cf45ed786e | ||
|
|
574b2a7393 | ||
|
|
9f02ff537c | ||
|
|
0436ec0e7a | ||
|
|
11f12195af | ||
|
|
a646a8cf98 | ||
|
|
63f41d3821 | ||
|
|
c5229f3926 | ||
|
|
96f4f796fb | ||
|
|
70cab344c4 | ||
|
|
a7ba57dc17 | ||
|
|
83548824c2 | ||
|
|
354dbbd880 | ||
|
|
23edc49509 | ||
|
|
48254c3f2c | ||
|
|
2cab48704c | ||
|
|
64d4f31d78 | ||
|
|
0c9ff24041 | ||
|
|
3ff8279e80 | ||
|
|
cb6e477dfe | ||
|
|
edfd93518e | ||
|
|
89807d6a82 | ||
|
|
49dea4913b | ||
|
|
dec2cae0a7 | ||
|
|
cf6cd07396 | ||
|
|
975b9c9ab0 | ||
|
|
8ac73bdbe4 | ||
|
|
877f440f7b | ||
|
|
d13bdc3824 | ||
|
|
744daf9418 | ||
|
|
bf475e1990 | ||
|
|
203f3d779a | ||
|
|
4230c4894d | ||
|
|
6bb266693f | ||
|
|
5d53c32701 | ||
|
|
2e7e561c1d | ||
|
|
d8515fd41c | ||
|
|
694c47b261 | ||
|
|
77dea16ac8 | ||
|
|
6ae27bed01 | ||
|
|
da1973a038 | ||
|
|
be24916a7f | ||
|
|
2cb99ebbd0 | ||
|
|
91ee320bfa | ||
|
|
8fb754bcd0 | ||
|
|
b7b72db9ad | ||
|
|
634415ca17 | ||
|
|
2f7ae819ac | ||
|
|
0a477f8731 | ||
|
|
a755f82549 | ||
|
|
7f4173ae7c | ||
|
|
fb47597b09 | ||
|
|
450b233cc2 | ||
|
|
b7d7674f1e | ||
|
|
0e832c2c97 | ||
|
|
8e4aa7bf18 | ||
|
|
a42dfa629e | ||
|
|
b970dfddaf | ||
|
|
46a4ea8276 | ||
|
|
3f2f4a94aa | ||
|
|
f930e0c76e | ||
|
|
0fdbb3322b | ||
|
|
e9c8999ede | ||
|
|
73cbd709f9 | ||
|
|
9dce3c095b | ||
|
|
e5a2e17a9c | ||
|
|
0ec589fac3 | ||
|
|
36bb63e084 | ||
|
|
91d6aafb48 | ||
|
|
c8868a9d83 | ||
|
|
09f572fbc0 | ||
|
|
58e6d097d8 | ||
|
|
15bf934de5 | ||
|
|
cdfee16818 | ||
|
|
bcb668de18 | ||
|
|
fac7e79277 | ||
|
|
a6c8b75904 | ||
|
|
25cb05bda9 | ||
|
|
6fa6d38549 | ||
|
|
883c052378 | ||
|
|
61f317c24c | ||
|
|
64f08d4ff2 | ||
|
|
e738e43358 | ||
|
|
f6f6217a98 | ||
|
|
31db8709bf | ||
|
|
5080cbf9fd | ||
|
|
9880124196 | ||
|
|
9c7b509b2a | ||
|
|
e0dccdd398 | ||
|
|
5d583bdf6c | ||
|
|
1e501364d5 | ||
|
|
74278def2e | ||
|
|
e375a149e1 | ||
|
|
2bfc0e97f6 | ||
|
|
ac45505528 | ||
|
|
7404061141 | ||
|
|
46c329d6f6 | ||
|
|
1818e4c2b4 | ||
|
|
e7bd17373d | ||
|
|
c58e74062f | ||
|
|
6d210f2090 | ||
|
|
af7d5a63b2 | ||
|
|
e41acb6364 | ||
|
|
bdf7f13954 | ||
|
|
0f56a4b443 | ||
|
|
1b5284b13f | ||
|
|
d1e4a464cd | ||
|
|
ff059017c0 | ||
|
|
f22ba4bd60 | ||
|
|
1db772673e | ||
|
|
75313f2baa | ||
|
|
090eb8e25f | ||
|
|
a9793f58a1 | ||
|
|
7177fd24f8 | ||
|
|
1e501f6c40 | ||
|
|
2629a3802c | ||
|
|
51ce91174b | ||
|
|
107d0c421a | ||
|
|
18b0b23992 | ||
|
|
d1b29d1342 | ||
|
|
2def60c5f3 | ||
|
|
19a17d4623 | ||
|
|
845817aadf | ||
|
|
3233a68fbb | ||
|
|
cf074e5ddd | ||
|
|
002c755248 | ||
|
|
d627cec608 | ||
|
|
1315224cbb | ||
|
|
7760b9ff4d | ||
|
|
28559564b2 | ||
|
|
fa880d20ad | ||
|
|
ae7d31af1c | ||
|
|
9d303bf29b | ||
|
|
5f1688f271 | ||
|
|
1d4c9ed90c | ||
|
|
d48352fb5d | ||
|
|
6d6536acb2 | ||
|
|
b6f94d81ea | ||
|
|
8477a69283 | ||
|
|
d58cb3ec7e | ||
|
|
8a370aedac | ||
|
|
24ca0e9c0b | ||
|
|
e1dd521e49 | ||
|
|
1255733945 | ||
|
|
3201a67f61 | ||
|
|
d0ff690d68 | ||
|
|
fb640d0a3d | ||
|
|
38f9ef31dc | ||
|
|
a8276b2680 | ||
|
|
ececca6cde | ||
|
|
8bbb4b56ee | ||
|
|
539a1641c6 | ||
|
|
1b0635aba3 | ||
|
|
429491f531 | ||
|
|
e9c0cdd389 | ||
|
|
0cae023b24 | ||
|
|
8ee239e921 | ||
|
|
8bb56eeeea | ||
|
|
fa9e259fd9 | ||
|
|
f3bdae76de | ||
|
|
03879ff054 | ||
|
|
c8398a9b87 | ||
|
|
b8972bd69d | ||
|
|
0ae937a798 | ||
|
|
4459bef203 | ||
|
|
e07237f640 | ||
|
|
8c5a994424 | ||
|
|
2eb25b256b | ||
|
|
f3bc19a989 | ||
|
|
7a8fef3173 | ||
|
|
7465e7e42d | ||
|
|
5e73a67d44 | ||
|
|
2316dc2b9a | ||
|
|
a2d7797cee | ||
|
|
fd050249af | ||
|
|
7bcd2830dd | ||
|
|
47462a125b | ||
|
|
7caf9830b0 | ||
|
|
2bc0c46f98 | ||
|
|
3318832e9d | ||
|
|
e7d2084568 | ||
|
|
c2d3cb4c63 | ||
|
|
c48dd4400f | ||
|
|
e38cafe986 | ||
|
|
85ca019d96 | ||
|
|
4a5ba28a87 | ||
|
|
82156fdbf0 | ||
|
|
6114090418 | ||
|
|
3099b31276 | ||
|
|
f17f86513e | ||
|
|
90f794c6c3 | ||
|
|
66ca2cfddd | ||
|
|
269dd2c6a7 | ||
|
|
e7998f59aa | ||
|
|
9fb556eef0 | ||
|
|
e781ab63db | ||
|
|
3e76968220 | ||
|
|
2812c24c16 | ||
|
|
d77ab8e255 | ||
|
|
4b3cd7316c | ||
|
|
6dae56384a | ||
|
|
2b2dfae83e | ||
|
|
6c10dbeae9 | ||
|
|
9173202b84 | ||
|
|
8870bb4653 | ||
|
|
7a0e7779fe | ||
|
|
a048ffc9b0 | ||
|
|
4587915b2a | ||
|
|
da665ddc25 | ||
|
|
5add979d91 | ||
|
|
20afe8bd14 | ||
|
|
940b606a07 | ||
|
|
9505053704 | ||
|
|
2c9ca78281 | ||
|
|
63719a8ac3 | ||
|
|
8fab62482a | ||
|
|
d6e9c2706f | ||
|
|
f7f2e53a0a | ||
|
|
9cdffeeb3f | ||
|
|
fbb6edd298 | ||
|
|
5eb6bdced4 | ||
|
|
5633b4d39d | ||
|
|
4435c6e98e | ||
|
|
2ebd2eac88 | ||
|
|
b78b292f0c | ||
|
|
efbd6fb8bb | ||
|
|
680079be39 | ||
|
|
e4fc8d2ebe | ||
|
|
f52354a889 | ||
|
|
59f898b7a7 | ||
|
|
8f4a2124a9 | ||
|
|
481888294d | ||
|
|
d1e440a4a1 | ||
|
|
81bdc8fdf6 | ||
|
|
e048d87fc9 | ||
|
|
e26cde0927 | ||
|
|
20108c6b90 | ||
|
|
9195ef745a | ||
|
|
d0459c530d | ||
|
|
f160785c5c | ||
|
|
5c0a57185c | ||
|
|
43479d9e9d | ||
|
|
c0da50d2b2 | ||
|
|
c24883a1c0 | ||
|
|
1b77ee6248 | ||
|
|
bf4b3b6bd9 | ||
|
|
efbeddead3 | ||
|
|
3cfeb1624a | ||
|
|
b95dc034ca | ||
|
|
86a7dbe66e | ||
|
|
b43a7a92cd | ||
|
|
6563d31710 | ||
|
|
cf89ba9eff | ||
|
|
9b01272832 | ||
|
|
58525c94d5 | ||
|
|
621bd0cda9 | ||
|
|
1610f770d7 | ||
|
|
0fc871d2f0 | ||
|
|
1ad6143061 | ||
|
|
92da3cd848 | ||
|
|
6212bcb191 | ||
|
|
d69abbd3f0 | ||
|
|
1d00a8823e | ||
|
|
5d6e1011df | ||
|
|
f5bdb44443 | ||
|
|
7efc1c2b49 | ||
|
|
132e3b74bd | ||
|
|
bdbf4ba40e | ||
|
|
acb6e97e6a | ||
|
|
445d72b8b5 | ||
|
|
92c5e11b40 | ||
|
|
0dd046c16c | ||
|
|
305168ca3e | ||
|
|
b72f6163dc | ||
|
|
33d4fdabfa | ||
|
|
cafcf657a4 | ||
|
|
101067de12 | ||
|
|
7360db05b4 | ||
|
|
c1c05c67ea | ||
|
|
399a76e67b | ||
|
|
765ac263db | ||
|
|
a4e4d7dfcd | ||
|
|
73f9c2867d | ||
|
|
9c86d50916 | ||
|
|
1d14c75f55 | ||
|
|
99709cc3f1 | ||
|
|
5bc880b988 | ||
|
|
958759f44b | ||
|
|
f34294fa0c | ||
|
|
99cbe98ce8 | ||
|
|
86bf29050e | ||
|
|
04cbc4980d | ||
|
|
8765151c8a | ||
|
|
12b84ac8c1 | ||
|
|
8ec64ac683 | ||
|
|
ed8648a322 | ||
|
|
88641243ab | ||
|
|
40e146aa1e | ||
|
|
f3f9cd9234 | ||
|
|
ebf1b291d0 | ||
|
|
bc7a9cd8fb | ||
|
|
d48502b82a | ||
|
|
479ec54a8d | ||
|
|
49625662a9 | ||
|
|
8b809a079a | ||
|
|
778433cb90 | ||
|
|
411cb8f476 | ||
|
|
63bf4f0dc0 | ||
|
|
80e59a0d5d | ||
|
|
8bbd3d1476 | ||
|
|
e725e4bced | ||
|
|
08d65046f0 | ||
|
|
44b9745000 | ||
|
|
9654fc875b | ||
|
|
0f425e65ec | ||
|
|
199e724291 | ||
|
|
e277f2a63b | ||
|
|
f4db09178a | ||
|
|
86be3cdc2a | ||
|
|
cb64ccc715 | ||
|
|
f66a3c7bc2 | ||
|
|
fe80df3080 | ||
|
|
1932476c13 | ||
|
|
d2c1f79f20 | ||
|
|
8eacae8cf9 | ||
|
|
c8a80fd818 | ||
|
|
b9e8d7140a | ||
|
|
6eff2605d6 | ||
|
|
fd7a3ea4a4 | ||
|
|
8d3eeb36d7 | ||
|
|
8e0548e180 | ||
|
|
a517bb4b1e | ||
|
|
9dcefb23a1 | ||
|
|
d9da74bc06 | ||
|
|
5e19323ed9 | ||
|
|
611c1dd96e | ||
|
|
d800609c62 | ||
|
|
c78c9cd10d | ||
|
|
e76394f36c | ||
|
|
080e09557d | ||
|
|
fca2e6d5a6 | ||
|
|
b45f2b1d6e | ||
|
|
fc2e70ee90 | ||
|
|
b4561e857f | ||
|
|
7023251239 | ||
|
|
e2bd68c901 | ||
|
|
35ced3985a | ||
|
|
3e18700d45 | ||
|
|
f9f49d87c2 | ||
|
|
6863631c26 | ||
|
|
9d939cec48 | ||
|
|
4c77d3f52a | ||
|
|
7be747b921 | ||
|
|
bb20526b64 | ||
|
|
bcbb1b08b2 | ||
|
|
3d98f97c64 | ||
|
|
c349456ef6 | ||
|
|
5a4905924d | ||
|
|
b826035dd5 | ||
|
|
a7cab4d039 | ||
|
|
fc3810f6d1 | ||
|
|
3dc71d82ce | ||
|
|
9c7b38981c | ||
|
|
8b85ac3fd9 | ||
|
|
81e1c4e2fc | ||
|
|
388ae76b52 | ||
|
|
b67d63149d | ||
|
|
28280e8ded | ||
|
|
6b3fbd3425 | ||
|
|
a7ab46375b | ||
|
|
b14d5e26f6 | ||
|
|
9a61dfba0c | ||
|
|
dd86780596 | ||
|
|
154c209e2d | ||
|
|
d1ea5e171f | ||
|
|
a1188d0ed0 | ||
|
|
47d205a646 | ||
|
|
80f772c28a | ||
|
|
f817d9bec1 | ||
|
|
e2effb08a4 | ||
|
|
7fcea295c5 | ||
|
|
cc799437ea | ||
|
|
89d23f37f2 | ||
|
|
b92071ef00 | ||
|
|
47246ae26c | ||
|
|
9c15869c28 | ||
|
|
51e9094f4a | ||
|
|
5e3a6fec33 | ||
|
|
c43fe0268c | ||
|
|
d413095f7e | ||
|
|
1bedf4de06 | ||
|
|
3967a761f4 | ||
|
|
b081350bd9 | ||
|
|
16f1430ba6 | ||
|
|
085ad71157 | ||
|
|
35972ba172 | ||
|
|
3834d3e35c | ||
|
|
8d0a2a2a4e | ||
|
|
11c0339bec | ||
|
|
915dd77783 | ||
|
|
b6bfa6fb79 | ||
|
|
f070197bd7 | ||
|
|
5a7699bb2e | ||
|
|
8628d26f38 | ||
|
|
8411229bd5 | ||
|
|
72b9ebc65d | ||
|
|
3b799ca14c | ||
|
|
0474512e30 | ||
|
|
f0905c6ec3 | ||
|
|
86296ad2cd | ||
|
|
52f5889f77 | ||
|
|
81e0b4f2d1 | ||
|
|
cbecc9b903 | ||
|
|
b8b465af3e | ||
|
|
59b35c6745 | ||
|
|
7032833011 | ||
|
|
f406c78785 | ||
|
|
f326b5837a | ||
|
|
5dd4b3468f | ||
|
|
d4f8e83404 | ||
|
|
7b8b007cd9 | ||
|
|
3547d26587 | ||
|
|
7e62c2eb6d | ||
|
|
56401e1e5f | ||
|
|
860db2d508 | ||
|
|
4b8874975c | ||
|
|
bd6b6f6622 | ||
|
|
4340727e6c | ||
|
|
3ceccade87 | ||
|
|
28ad7df65d | ||
|
|
79a3508579 | ||
|
|
1b840245bd | ||
|
|
6a3828fddd | ||
|
|
91cb6b5065 | ||
|
|
0826a0b555 | ||
|
|
bcbbb98bfe | ||
|
|
66159b38aa | ||
|
|
23d17e4beb | ||
|
|
d97b0e3241 | ||
|
|
eb2533ec4c | ||
|
|
b7b365067f | ||
|
|
86e284e028 | ||
|
|
d9e543b680 | ||
|
|
c773c232d8 | ||
|
|
58ae24336a | ||
|
|
7d3a035ee0 | ||
|
|
e06e75c7e7 | ||
|
|
593e0f43b4 | ||
|
|
008ab0f814 | ||
|
|
3f7e8750d4 | ||
|
|
f1ed3acae5 | ||
|
|
920d21b9d3 | ||
|
|
2fb35d1c28 | ||
|
|
09be85b8dd | ||
|
|
eadc3ccd50 | ||
|
|
255732f0d3 | ||
|
|
53c269c6fd | ||
|
|
675d001633 | ||
|
|
58be922079 | ||
|
|
c84d3a557d | ||
|
|
d577c79632 | ||
|
|
6ad2b01e14 | ||
|
|
fd3a1f3d60 | ||
|
|
87de7069b9 | ||
|
|
6fba62c87a | ||
|
|
f14be22816 | ||
|
|
1df4141196 | ||
|
|
fae45ede08 | ||
|
|
4e0cff2a50 | ||
|
|
9c74423510 | ||
|
|
5976e7ab57 | ||
|
|
a1a22572fb | ||
|
|
c11875b328 | ||
|
|
8ff648e4f9 | ||
|
|
1bac34556f | ||
|
|
0436157b95 | ||
|
|
ae0db349c1 | ||
|
|
08411970d5 | ||
|
|
dc724e0c8b | ||
|
|
0a5d1ec706 | ||
|
|
58250eff2b | ||
|
|
11a4efc505 | ||
|
|
7537b35fb8 | ||
|
|
33cc74eeeb | ||
|
|
f021acee49 | ||
|
|
abe694ca95 | ||
|
|
b286f201a8 | ||
|
|
bd93a12e85 | ||
|
|
92769650fa | ||
|
|
dc4fe5c6d7 | ||
|
|
566bda51f2 | ||
|
|
f63757ec35 | ||
|
|
7a0ed06909 | ||
|
|
9934fe76be | ||
|
|
a8aad21001 | ||
|
|
d055bf91cc | ||
|
|
0e1b1a011d | ||
|
|
eab3c2895c | ||
|
|
163da6a484 | ||
|
|
324916d11a | ||
|
|
3ccb0655c1 | ||
|
|
e04398e397 | ||
|
|
231ea2a3bb | ||
|
|
b99d88c6a1 | ||
|
|
189d72d5fd | ||
|
|
a7aab0c23e | ||
|
|
a69bee4762 | ||
|
|
9acd33094d | ||
|
|
8e7aad2075 | ||
|
|
ce5879fa14 | ||
|
|
7b7507d6e1 | ||
|
|
14823decf3 | ||
|
|
673fb82e65 | ||
|
|
181cf24bc0 | ||
|
|
89f2602880 | ||
|
|
db9b1dbcd9 | ||
|
|
e881c4bcab | ||
|
|
670ad51ade | ||
|
|
eb6fc7d32a | ||
|
|
ed1a390583 | ||
|
|
809e1857c5 | ||
|
|
7c38af48b9 | ||
|
|
60ad3eb970 | ||
|
|
a7685b3a6b | ||
|
|
8f1fddc816 | ||
|
|
1bf996fa5c | ||
|
|
248ae880b6 | ||
|
|
2d2fa82d17 | ||
|
|
c94678957f | ||
|
|
16f38a699f | ||
|
|
a6c2c24479 | ||
|
|
b8c9926c0a | ||
|
|
df374b5222 | ||
|
|
5ea1eb78f5 | ||
|
|
5d2c0fd9ba | ||
|
|
0803753fea | ||
|
|
2c2f1efdcd | ||
|
|
b323e1707d | ||
|
|
09104e9930 | ||
|
|
5fa1702ca6 | ||
|
|
17b598d30c | ||
|
|
53be8894e4 | ||
|
|
c3deacd562 | ||
|
|
8ab3fe81d8 | ||
|
|
2f0a33d8a3 | ||
|
|
05d0d131a7 | ||
|
|
c140629995 | ||
|
|
7d106a65ca | ||
|
|
0179f6a830 | ||
|
|
830afe85dc | ||
|
|
8bf39420b4 | ||
|
|
71d08b3e29 | ||
|
|
06ffa33485 | ||
|
|
874e05975b | ||
|
|
f5d30d521c | ||
|
|
e047922be0 | ||
|
|
83ab8a79cc | ||
|
|
350cf045d8 | ||
|
|
68a0ea15b4 | ||
|
|
2b4f5e68d1 | ||
|
|
055f417278 | ||
|
|
70029bc348 | ||
|
|
cf57433bbd | ||
|
|
1ac6e794cb | ||
|
|
a853427427 | ||
|
|
50e989e263 | ||
|
|
10e6ed9341 | ||
|
|
38c84acae5 | ||
|
|
29f46c2bee | ||
|
|
39c10a2b6e | ||
|
|
b913348d5f | ||
|
|
2b14cb566f | ||
|
|
b0df5223be | ||
|
|
ed7cd1e859 | ||
|
|
f125d9115b | ||
|
|
a9d5f12fec | ||
|
|
7f32e5dc35 | ||
|
|
c3111ab34f | ||
|
|
9339774af2 | ||
|
|
b0d21deda9 | ||
|
|
fab6f0e65b | ||
|
|
b6c33fd544 | ||
|
|
fb4b345800 | ||
|
|
af9c2a07ae | ||
|
|
ab180fc648 | ||
|
|
682f8c43b5 | ||
|
|
f693213567 | ||
|
|
9165d6bab9 | ||
|
|
2975fe1a7b | ||
|
|
de691a498d | ||
|
|
2e6e742c3c | ||
|
|
e9bd0f772b | ||
|
|
77f785076f | ||
|
|
94278f7202 | ||
|
|
a0d8d704df | ||
|
|
f6861ec96f | ||
|
|
f733b05302 | ||
|
|
6fa73386cb | ||
|
|
5ca01bb9e4 | ||
|
|
1ca59daca9 | ||
|
|
594c4d79a5 | ||
|
|
1f16b958b1 | ||
|
|
4c0d13df9b | ||
|
|
b2c6528baf | ||
|
|
ea17820432 | ||
|
|
1257b049bc | ||
|
|
b969813548 | ||
|
|
10677ece81 | ||
|
|
d570746e45 | ||
|
|
4fcd9d147d | ||
|
|
9c54ae3387 | ||
|
|
24114fee74 | ||
|
|
220ee33f2b | ||
|
|
4118cc02c1 | ||
|
|
32d77eeb04 | ||
|
|
032f232626 | ||
|
|
4d318be195 | ||
|
|
6b45f9aba2 | ||
|
|
1e10d02fec | ||
|
|
51290d8457 | ||
|
|
582f4f834e | ||
|
|
e87d98b0dd | ||
|
|
383496e65e | ||
|
|
4519c1f43c | ||
|
|
a616f65471 | ||
|
|
1f78ed189a | ||
|
|
7dde358adc | ||
|
|
27b83249c9 | ||
|
|
56aa074538 | ||
|
|
9d90e7de03 | ||
|
|
7d4d9c526a | ||
|
|
fe6856b059 | ||
|
|
a54fbf2ca6 | ||
|
|
d8024aebe5 | ||
|
|
8652bd22f1 | ||
|
|
f15a9ca301 | ||
|
|
65ced034b8 | ||
|
|
bec30224ff | ||
|
|
0428106da3 | ||
|
|
73e7442456 | ||
|
|
26de1bba83 | ||
|
|
e0690782b8 | ||
|
|
8fff4f61e5 | ||
|
|
10defdd06a | ||
|
|
485139c15c | ||
|
|
b605ebb609 | ||
|
|
aecfcd4e59 | ||
|
|
942d46196f | ||
|
|
78be2eca7c | ||
|
|
1fa2b9841d | ||
|
|
9fbd0822aa | ||
|
|
e323cf3ff3 | ||
|
|
8ceabd4df3 | ||
|
|
a8776b107b | ||
|
|
096b533982 | ||
|
|
dae503afaa | ||
|
|
b39eab7f94 | ||
|
|
e5a66240c0 | ||
|
|
e0ef13ddeb | ||
|
|
855f90fa6f | ||
|
|
614db89ae3 | ||
|
|
1358b94163 | ||
|
|
350e02d40d | ||
|
|
0b26ba3fc8 | ||
|
|
3a0a78731b | ||
|
|
6be16ed24b | ||
|
|
b555942428 | ||
|
|
b2dca40d81 | ||
|
|
15870bbd01 | ||
|
|
10d33b3473 | ||
|
|
ac25992bc7 | ||
|
|
30783c442d | ||
|
|
a50a8003a0 | ||
|
|
315bdae00a | ||
|
|
2ddfd26f1b | ||
|
|
f3ed5df611 | ||
|
|
b4e44234bc | ||
|
|
4ca2a3cf3c | ||
|
|
33d2fc2f64 | ||
|
|
27a95f51aa | ||
|
|
a78d6a9bb1 | ||
|
|
567f9a5809 | ||
|
|
3a421c724f | ||
|
|
34dd81c03a | ||
|
|
b3f502cdb9 | ||
|
|
587dfd44a4 | ||
|
|
52767c1ba0 | ||
|
|
014b5c59d8 | ||
|
|
fad7a336a1 | ||
|
|
ffbc0baf72 | ||
|
|
345f12196c | ||
|
|
5769b68bc0 | ||
|
|
4e2743abd9 | ||
|
|
be2d40a58a | ||
|
|
81549898c0 | ||
|
|
0baedd1851 | ||
|
|
6b559c2fbc | ||
|
|
986986064e | ||
|
|
4654c1d016 | ||
|
|
163e8369b0 | ||
|
|
5cc9c5dfa8 | ||
|
|
fbd90643cb | ||
|
|
30e2f2d76f | ||
|
|
11c60089a8 | ||
|
|
abb893e6e4 | ||
|
|
4511c1976d | ||
|
|
4240d50496 | ||
|
|
6240b0a278 | ||
|
|
e37afbe0b8 | ||
|
|
40cf7fcbd2 | ||
|
|
cc28492d31 | ||
|
|
bc0550c262 | ||
|
|
b83b782dc4 | ||
|
|
16a348475c | ||
|
|
709185a264 | ||
|
|
9cb1a06b6c | ||
|
|
be27283ef6 | ||
|
|
b924bfad68 | ||
|
|
192b9a571c | ||
|
|
6ec6cb4e95 | ||
|
|
36a0e46c39 | ||
|
|
dfb1b1468c | ||
|
|
3c91e41614 | ||
|
|
7e8a800f29 | ||
|
|
2334762b03 | ||
|
|
3fc088f8c7 | ||
|
|
a9bbd26f1d | ||
|
|
6e99d5762a | ||
|
|
15b1c6656f | ||
|
|
d412794205 | ||
|
|
0a899a1448 | ||
|
|
7a34302e95 | ||
|
|
27783821af | ||
|
|
b374af6ebd | ||
|
|
16f1131a4d | ||
|
|
d5f071afb5 | ||
|
|
14b4f038c0 | ||
|
|
bcac2a0710 | ||
|
|
1a6d92847f | ||
|
|
6a16fd4a1a | ||
|
|
44731e308c | ||
|
|
4763b624a6 | ||
|
|
6609b3ce37 | ||
|
|
7e182627d9 | ||
|
|
5777f5d386 | ||
|
|
5dbe81a1d3 | ||
|
|
4cf096a4a9 | ||
|
|
18e6c97c48 | ||
|
|
97afd99a18 | ||
|
|
23f13e9754 | ||
|
|
2e02ecbccc | ||
|
|
e4f49a8753 | ||
|
|
51d3045de2 | ||
|
|
76048b23e8 | ||
|
|
f20756fb10 | ||
|
|
17b2d7ca77 | ||
|
|
40f796288a | ||
|
|
2f546d0a3c | ||
|
|
18c782ab26 | ||
|
|
33cee6c7f6 | ||
|
|
a2e51e7b49 | ||
|
|
bd19aa0ed3 | ||
|
|
8f4c56f334 | ||
|
|
1dcc38b233 | ||
|
|
fff79f1867 | ||
|
|
3f17c357d9 | ||
|
|
9938a17f92 | ||
|
|
9746f4314a | ||
|
|
0238451fc0 | ||
|
|
2098aee7d6 | ||
|
|
fb588f6a56 | ||
|
|
896c7a23cd | ||
|
|
1463c5b9ac | ||
|
|
c6270b2ed5 | ||
|
|
ab3176af34 | ||
|
|
5aa535c329 | ||
|
|
133b1886fc | ||
|
|
66295fa4a6 | ||
|
|
e54c44eeab | ||
|
|
a7aaa39863 | ||
|
|
ea6abd740f | ||
|
|
e1a0bfdffe | ||
|
|
3f3343cd3e | ||
|
|
4059eabd58 | ||
|
|
6b46102661 | ||
|
|
141a273a8b | ||
|
|
2fffb1dcd0 | ||
|
|
e698e4e533 | ||
|
|
b7546397f0 | ||
|
|
0311677258 | ||
|
|
88fb59d91b | ||
|
|
a1d9f6c5dc | ||
|
|
c579c5e967 | ||
|
|
c9c194053d | ||
|
|
f20a11ed25 | ||
|
|
76a353c9e5 | ||
|
|
392f04d586 | ||
|
|
94de6cf59c | ||
|
|
8af2804a5d | ||
|
|
054479754c | ||
|
|
5bafcf6525 | ||
|
|
306c51c669 | ||
|
|
27bfd4e526 | ||
|
|
ca227c8698 | ||
|
|
32f9036447 | ||
|
|
190ef07981 | ||
|
|
82597f0ec0 | ||
|
|
8499d21158 | ||
|
|
c9154514c4 | ||
|
|
0d5095fc65 | ||
|
|
034caf70b2 | ||
|
|
e565cf6048 | ||
|
|
59f197aec1 | ||
|
|
a0e5beb0fb | ||
|
|
c1e90619bd | ||
|
|
fec09bf15d | ||
|
|
a0d7ede350 | ||
|
|
b26afec81f | ||
|
|
8f7c4f7d2e | ||
|
|
0416006a30 | ||
|
|
7f9134fb2d | ||
|
|
91e274546c | ||
|
|
69f8595256 | ||
|
|
930087f2f6 | ||
|
|
9f9f7664b5 | ||
|
|
72528252e3 | ||
|
|
e4bd63f9c0 | ||
|
|
9accfed4e7 | ||
|
|
f1e21efe63 | ||
|
|
b05641ce40 | ||
|
|
fec040e754 | ||
|
|
34a9da136f | ||
|
|
c43fda4c1a | ||
|
|
7de81fcc53 | ||
|
|
9d46608efa | ||
|
|
80b8b72cb8 | ||
|
|
9787c5f4c8 | ||
|
|
d5f6429de8 | ||
|
|
df827a983a | ||
|
|
29f3683901 | ||
|
|
c7932289e7 | ||
|
|
7a0b07c719 | ||
|
|
4d402db521 | ||
|
|
7109903e61 | ||
|
|
3092fc4035 | ||
|
|
f5bc4b5f95 | ||
|
|
69759a5990 | ||
|
|
453fe2a345 | ||
|
|
ff18735cb2 | ||
|
|
030dfb04e0 | ||
|
|
06e4874c99 | ||
|
|
0d8a0fdc30 | ||
|
|
53365f74a7 | ||
|
|
0368181998 | ||
|
|
6101f45ef9 | ||
|
|
bf96b45238 | ||
|
|
98d7c0f4f7 | ||
|
|
f2017cb020 | ||
|
|
f889ac45b8 | ||
|
|
eccde5e9de | ||
|
|
ce7d243c7e | ||
|
|
6c4d6609ad | ||
|
|
db710571fd | ||
|
|
574dd17882 | ||
|
|
422f7c112c | ||
|
|
e974356f32 | ||
|
|
b19ad2fb53 | ||
|
|
126d7701b0 | ||
|
|
ecf17d1653 | ||
|
|
7447661e9b | ||
|
|
7e5edcfd33 | ||
|
|
39d60b715a | ||
|
|
d497a201ca | ||
|
|
54537cdfb3 | ||
|
|
bc737bd61a | ||
|
|
a5c1d95500 | ||
|
|
c1f49e1684 | ||
|
|
9f66931e16 | ||
|
|
6c6b8bd5cc | ||
|
|
04e24906be | ||
|
|
974c1b2d42 | ||
|
|
bca9bea1c1 | ||
|
|
bd3f9ecabe | ||
|
|
c047270c02 | ||
|
|
97f18fac3a | ||
|
|
c71d2e2087 | ||
|
|
59185202c6 | ||
|
|
bee83e84f6 | ||
|
|
82e02ea5fc | ||
|
|
a95c26a06a | ||
|
|
0b0a17ae9d | ||
|
|
30f51acbc8 | ||
|
|
e0898585a1 | ||
|
|
62bdc9fecc | ||
|
|
e73277c7e8 | ||
|
|
8d29e47f54 | ||
|
|
2db772b9ea | ||
|
|
7b81316508 | ||
|
|
05358deeca | ||
|
|
9f610f3a9e | ||
|
|
dbfd06730c | ||
|
|
5b025168c7 | ||
|
|
46124a49b2 | ||
|
|
608cc3b85c | ||
|
|
6afe044b51 | ||
|
|
15aad84dc5 | ||
|
|
f7e1d82d40 | ||
|
|
339b1944e7 | ||
|
|
85367c3a47 | ||
|
|
607d65fbce | ||
|
|
9f0ee2a388 | ||
|
|
1fc0b47fdf | ||
|
|
6418b2439b | ||
|
|
06d5556dfa | ||
|
|
fb8e402ad2 | ||
|
|
c24044635b | ||
|
|
67ba388efb | ||
|
|
e41604227c | ||
|
|
8a609c32fd | ||
|
|
96db61ffb8 | ||
|
|
c153bd8b2f | ||
|
|
383003653f | ||
|
|
fc383f199e | ||
|
|
2c566d02fe | ||
|
|
a8f1d167f6 | ||
|
|
261b4c23c7 | ||
|
|
dcdc352371 | ||
|
|
be514c856c | ||
|
|
128eb31d90 | ||
|
|
747b028412 | ||
|
|
7fe37d8a05 | ||
|
|
f10c27b8cb | ||
|
|
60427f63d1 | ||
|
|
178b47e6af | ||
|
|
3a70ed9ebe | ||
|
|
89abf7bf4d | ||
|
|
cfe9e5aa6c | ||
|
|
4c24ed9464 | ||
|
|
11208ebbf1 | ||
|
|
774ce35571 | ||
|
|
dbee18b552 | ||
|
|
31d9ea4a3e | ||
|
|
3b68efdc6a | ||
|
|
2be689b7e2 | ||
|
|
2db5806991 | ||
|
|
220bc3f0e3 | ||
|
|
48a6c984b8 | ||
|
|
dc016bf521 | ||
|
|
ff43d2365f | ||
|
|
ed63cbd6ee | ||
|
|
5f432ac8f5 | ||
|
|
c7224074d6 | ||
|
|
eed30fea75 | ||
|
|
5625bd0617 | ||
|
|
5ef5d25b15 | ||
|
|
0f15ad7b9b | ||
|
|
f11d00fa41 | ||
|
|
61ebb401b7 | ||
|
|
5c5a3ecf1b | ||
|
|
0197004f78 | ||
|
|
2c28da8e05 | ||
|
|
c7fa5fa42c | ||
|
|
7ba71e30fb | ||
|
|
7cb0952474 | ||
|
|
a8ae232fa9 | ||
|
|
5b251628e9 | ||
|
|
b9a324c0da | ||
|
|
5b95419ca5 | ||
|
|
ecbccea703 | ||
|
|
c240ab6ecf | ||
|
|
6882c0870e | ||
|
|
b0eeaf4f40 | ||
|
|
c6ed6fadc2 | ||
|
|
e462474e1d | ||
|
|
6b77d52b1f | ||
|
|
9b9c5355e4 | ||
|
|
d890b4cc0a | ||
|
|
2c74e6fa77 | ||
|
|
c0384f221e | ||
|
|
8e60dc7526 | ||
|
|
8900ab4d9b | ||
|
|
fb043a6e4e | ||
|
|
7f8b271465 | ||
|
|
fdae235858 | ||
|
|
1deb710f26 | ||
|
|
ec6504b39c | ||
|
|
dd85e4d707 | ||
|
|
fa64a84311 | ||
|
|
e0f06eae43 | ||
|
|
0f206ee814 | ||
|
|
cc0f378d54 | ||
|
|
e33c9cba7c | ||
|
|
989e9f8ead | ||
|
|
8f097af4ec | ||
|
|
c40dbb19ab | ||
|
|
ffaf6e66e3 | ||
|
|
74c730174f | ||
|
|
c82a8dd14c | ||
|
|
f8253af561 | ||
|
|
ed370ff0e6 | ||
|
|
ee0f0393cf | ||
|
|
db2fe38b55 | ||
|
|
d631d5f9f2 | ||
|
|
4f29fa9906 | ||
|
|
5b72fda140 | ||
|
|
f81ccbb3df | ||
|
|
9fd0f67678 | ||
|
|
15d50aca64 | ||
|
|
7234d1d9c7 | ||
|
|
9796a9b20c | ||
|
|
016dd82050 | ||
|
|
b95779be21 | ||
|
|
10171468d9 | ||
|
|
bf597a6dd1 | ||
|
|
45dad8bab9 | ||
|
|
64ccbf18c0 | ||
|
|
9dc1d94a0c | ||
|
|
7824e1f6a6 | ||
|
|
2469a6aecb | ||
|
|
8f0afda028 | ||
|
|
35e22b6b32 | ||
|
|
323f82a7e0 | ||
|
|
8534bf1f00 | ||
|
|
eb4f27405b | ||
|
|
2d3b70271c | ||
|
|
ad1b6017cd | ||
|
|
05467d5a52 | ||
|
|
ae5e94808e | ||
|
|
d7ffcfcf97 | ||
|
|
0cb58b0259 | ||
|
|
31b2051e21 | ||
|
|
eb0bdc2c3e | ||
|
|
6583c741cd | ||
|
|
2d9295643e | ||
|
|
ee86e2c6d7 | ||
|
|
02a63fadc3 | ||
|
|
f3711edcf1 | ||
|
|
22d07ba4e4 | ||
|
|
f6abca506e | ||
|
|
b5424acdb9 | ||
|
|
47c7f3d995 | ||
|
|
0014ffa829 | ||
|
|
c03943f394 | ||
|
|
deb1e8d20e | ||
|
|
174964a7bc | ||
|
|
9c568178fb | ||
|
|
dbb7d7e26c | ||
|
|
ade2340971 | ||
|
|
4d77550cf0 | ||
|
|
c683454e7e | ||
|
|
f133fd326b | ||
|
|
1faa66f005 | ||
|
|
8773f3158f | ||
|
|
7e37c39485 | ||
|
|
14c17cafa1 | ||
|
|
8696a7fd13 | ||
|
|
bb4b8c57b9 | ||
|
|
d63cfc3f0f | ||
|
|
f377f44dae | ||
|
|
0b1bb1ac3a | ||
|
|
f208e52a76 | ||
|
|
b091529a3c | ||
|
|
b323a3cbff | ||
|
|
b59623ef43 | ||
|
|
9c163950da | ||
|
|
d357bbd375 | ||
|
|
f542a3d26b | ||
|
|
59a4ff482a | ||
|
|
40ca5b04f4 | ||
|
|
411e5b88c9 | ||
|
|
b4c299bad0 | ||
|
|
ab4bdc913f | ||
|
|
1fe248a51b | ||
|
|
2559b9d017 | ||
|
|
4db43567e8 | ||
|
|
5333842a1d | ||
|
|
98c3806b15 | ||
|
|
b6afc225c8 | ||
|
|
ad30dc1e20 | ||
|
|
ff51983e15 | ||
|
|
fdf01663d1 | ||
|
|
4b94288301 | ||
|
|
4bf99ade15 | ||
|
|
e3e166d8cf | ||
|
|
3da3999612 | ||
|
|
d50116b8ac | ||
|
|
75ed53320e | ||
|
|
17b786ae73 | ||
|
|
dfd42a43c3 | ||
|
|
f7b8dd63f0 | ||
|
|
a8abf124c8 | ||
|
|
176ccefcd8 | ||
|
|
cbd2ffd031 | ||
|
|
0b534d2adc | ||
|
|
526a20bd16 | ||
|
|
51094b1b08 | ||
|
|
f1ac2033ab | ||
|
|
a1b8d815f5 | ||
|
|
8b756bd98e | ||
|
|
46047c58d0 | ||
|
|
374c761e77 | ||
|
|
6c7b26e13f | ||
|
|
b51b108045 | ||
|
|
86e8c89488 | ||
|
|
41c3b34b1f | ||
|
|
47f48f5d85 | ||
|
|
e15e2ef7a0 | ||
|
|
d0c8b279da | ||
|
|
612d83b51d | ||
|
|
9c30efeb7e | ||
|
|
39fa4cc107 | ||
|
|
b09c122373 | ||
|
|
3348243b7b | ||
|
|
b46b65ed37 | ||
|
|
18e4088fad | ||
|
|
5fd6cd64f9 | ||
|
|
3d24bbfbe4 | ||
|
|
1775612512 | ||
|
|
0d2d967cc7 | ||
|
|
a5e52a1fd4 | ||
|
|
291a93bafa | ||
|
|
c4737bea17 | ||
|
|
45dad7ba1b | ||
|
|
db7c9da871 | ||
|
|
bc92621ade | ||
|
|
fd8e559c3a | ||
|
|
222e11d4ae | ||
|
|
7d682f0acb | ||
|
|
8364b6b0b1 | ||
|
|
9f6b517671 | ||
|
|
7ac40e5521 | ||
|
|
36066dd3ee | ||
|
|
636aa83ed3 | ||
|
|
33d152b6cc | ||
|
|
51c4fec0d5 | ||
|
|
0017486dca | ||
|
|
edc70f4aaf | ||
|
|
756926ff00 | ||
|
|
cb160dd531 | ||
|
|
77334ccb44 | ||
|
|
796db21295 | ||
|
|
535d7b681b | ||
|
|
7db2897ded | ||
|
|
960e038886 | ||
|
|
ea14422ff1 | ||
|
|
38d05d17e5 | ||
|
|
db9bd5267f | ||
|
|
ab3b773bbe | ||
|
|
0bc4ee60e0 | ||
|
|
a3ef0e1cdd | ||
|
|
679bacf0b5 | ||
|
|
02e3952f3b | ||
|
|
64b7e89c0c | ||
|
|
bee4c5571a | ||
|
|
96929dd1e8 | ||
|
|
53e06b2507 | ||
|
|
b80d4bebf3 | ||
|
|
55bec9b658 | ||
|
|
2a63b0f110 | ||
|
|
07b88cffce | ||
|
|
58c8451f36 | ||
|
|
3047121c63 | ||
|
|
7079f8ff1f | ||
|
|
2c3b9f3570 | ||
|
|
fad2428f47 | ||
|
|
c3d3110f6a | ||
|
|
79ec00276c | ||
|
|
9c117d345f | ||
|
|
46cc1c65a4 | ||
|
|
71d9fe7818 | ||
|
|
4ccabf93db | ||
|
|
6612a34939 | ||
|
|
e5b4225f7c | ||
|
|
b2ca35ddbc | ||
|
|
76ab842d9b | ||
|
|
78653a33aa | ||
|
|
24dc1ed715 | ||
|
|
682d8dcd21 | ||
|
|
640bb54e73 | ||
|
|
e0977d7686 | ||
|
|
112ab398db | ||
|
|
af93fcfa05 | ||
|
|
62d231c004 | ||
|
|
49358274d7 | ||
|
|
7b1e379ca9 | ||
|
|
22d7368dfb | ||
|
|
24121bc703 | ||
|
|
9fc87fa767 | ||
|
|
328f82d59a | ||
|
|
78717fc328 | ||
|
|
3b35c3425e | ||
|
|
874ae0354e | ||
|
|
4c6b4764f0 | ||
|
|
59ee8a8647 | ||
|
|
af284305d5 | ||
|
|
d53a4af1a4 | ||
|
|
2e1b928540 | ||
|
|
040ac68679 | ||
|
|
049d71d874 | ||
|
|
bf2c8c8f82 | ||
|
|
ef428960c9 | ||
|
|
992fc9d6e1 | ||
|
|
8639f89f51 | ||
|
|
0424ec307b | ||
|
|
ac5a69af45 | ||
|
|
94e8c80473 | ||
|
|
87f0e62d94 | ||
|
|
46b4070f3f | ||
|
|
2a776f9788 | ||
|
|
f4c7ef9862 | ||
|
|
50e12e9df1 | ||
|
|
b7faebbac8 | ||
|
|
4191fdf147 | ||
|
|
9a4f12be98 | ||
|
|
7ad4258add | ||
|
|
9945c4994c | ||
|
|
5faf9fed7e | ||
|
|
13a9b69b09 | ||
|
|
4975650e00 | ||
|
|
0cc7178546 | ||
|
|
41f24c321d | ||
|
|
4b3fbafdd2 | ||
|
|
7ac40086f5 | ||
|
|
313dfc45f5 | ||
|
|
78a55d7a28 | ||
|
|
bb6ac83698 | ||
|
|
9d0e366880 | ||
|
|
9bff48a0e7 | ||
|
|
60121eb514 | ||
|
|
527ca1da4f | ||
|
|
7689413e42 | ||
|
|
ba7a92b0ce | ||
|
|
4c7d816dd7 | ||
|
|
032f2f260f | ||
|
|
20e98bf6c0 | ||
|
|
5c2266df4b | ||
|
|
67dda51722 | ||
|
|
e4c4bcf36f | ||
|
|
82d8a8b6e2 | ||
|
|
13a10d5aa3 | ||
|
|
9022726446 | ||
|
|
94bfcd23b7 | ||
|
|
526b3b0716 | ||
|
|
61f92af1cf | ||
|
|
a72778d364 | ||
|
|
5ae17037a3 | ||
|
|
02f0da20b0 | ||
|
|
b41631c4e6 | ||
|
|
0e49d9a6b0 | ||
|
|
4a7d108ab3 | ||
|
|
3cfd000849 | ||
|
|
1b38185361 | ||
|
|
9cb9a5df77 | ||
|
|
5035536e3f | ||
|
|
3e12bc583a | ||
|
|
e568c2233e | ||
|
|
061a75edd6 | ||
|
|
82c4d7b0ce | ||
|
|
136dadde95 | ||
|
|
0c14841585 | ||
|
|
0eebf34d9d | ||
|
|
cf186b77a7 | ||
|
|
a3372437bf | ||
|
|
4c57b4853d | ||
|
|
38eb2968ab | ||
|
|
bea56c9569 | ||
|
|
7e508ff2cf | ||
|
|
563772eda4 | ||
|
|
0533915aad | ||
|
|
c3a227d1c4 | ||
|
|
f6c903e708 | ||
|
|
7dc011c063 | ||
|
|
4e3b303016 | ||
|
|
7e1f5447e7 | ||
|
|
7e3472758b | ||
|
|
328a22e175 | ||
|
|
417b453699 | ||
|
|
6ea7190a3e | ||
|
|
b54b08c91b | ||
|
|
c30943b1c0 | ||
|
|
2abf7cab80 | ||
|
|
4137196899 | ||
|
|
019839faaa | ||
|
|
f52183a878 | ||
|
|
750b9ff032 | ||
|
|
28602e747c | ||
|
|
6cc37c69e2 | ||
|
|
a5cd0eb8a4 | ||
|
|
c23e266427 | ||
|
|
651acffbe5 | ||
|
|
71bd93b89c | ||
|
|
6da620de58 | ||
|
|
bdceea7afd | ||
|
|
d80a39cec8 | ||
|
|
5b5fae5f20 | ||
|
|
01b06aedcf | ||
|
|
c711383811 | ||
|
|
17cc153435 | ||
|
|
67446fd49b | ||
|
|
325bb615a7 | ||
|
|
ee5cd8418e | ||
|
|
342609a1b4 | ||
|
|
f270cf1a26 | ||
|
|
371c3b796c | ||
|
|
6b7ceee1b9 | ||
|
|
fdb20a27a3 | ||
|
|
2c94198eb6 | ||
|
|
e8110b8125 | ||
|
|
c39fd7b1ca | ||
|
|
a9c09a7c62 | ||
|
|
82beaabb41 | ||
|
|
63b4295d20 | ||
|
|
312a3f389b | ||
|
|
609af1ae1c | ||
|
|
4cd759f73d | ||
|
|
e156e70281 | ||
|
|
9b464929fe | ||
|
|
0c176d7bde | ||
|
|
7a3f0c00ad | ||
|
|
7aefc49c40 | ||
|
|
741dd8ea65 | ||
|
|
76adc82068 | ||
|
|
bd1512d196 | ||
|
|
9a4acbfaf5 | ||
|
|
ad1f4e7902 | ||
|
|
b328295910 | ||
|
|
828b2a5cd9 | ||
|
|
2ff7cbeaaa | ||
|
|
b2f7738830 | ||
|
|
dc0279532a | ||
|
|
0c59d02bdc | ||
|
|
0f72beb515 | ||
|
|
d781e29316 | ||
|
|
3b3e8ed332 | ||
|
|
dcdfeb33d2 | ||
|
|
0d85c3a732 | ||
|
|
903d136942 | ||
|
|
9d584da7d0 | ||
|
|
31752f76f7 | ||
|
|
5f1b2aea80 | ||
|
|
4479600d57 | ||
|
|
a90189c3ad | ||
|
|
d8a1caf04f | ||
|
|
cb33d389ed | ||
|
|
967e0955f0 | ||
|
|
e01b432ad3 | ||
|
|
fd91257c40 | ||
|
|
c7b959ce38 | ||
|
|
75eac8961e | ||
|
|
3b7d9aa487 | ||
|
|
1f4b722b00 | ||
|
|
f6519f89b0 | ||
|
|
24af85298e | ||
|
|
e721d857c2 | ||
|
|
5c17f0a67a | ||
|
|
4fcaa4f4a5 | ||
|
|
536f819eda | ||
|
|
a662489877 | ||
|
|
a2973eb597 | ||
|
|
4e21b3a94f | ||
|
|
b703ebeeaf | ||
|
|
b84a5f0337 | ||
|
|
a1ec9a7553 | ||
|
|
91d644b5ba | ||
|
|
5d6c3d6a66 | ||
|
|
1ebb4717df | ||
|
|
cf5881fc4d | ||
|
|
fcd817a326 | ||
|
|
031ec536f0 | ||
|
|
668db403f9 | ||
|
|
b9ad101926 | ||
|
|
435911029f | ||
|
|
699ed30cee | ||
|
|
9eab37dca0 | ||
|
|
9a8a12b7d8 | ||
|
|
a4c2ab35c1 | ||
|
|
3d9c4bf09a | ||
|
|
8b8a39e279 | ||
|
|
82393e2bb2 | ||
|
|
2eb99a4b98 | ||
|
|
6abce58a12 | ||
|
|
990e6e8fa3 | ||
|
|
bfd88516eb | ||
|
|
d8b7e80d29 | ||
|
|
37120974dc | ||
|
|
42fc93c709 | ||
|
|
a625e56543 | ||
|
|
9b738b2caa | ||
|
|
90bb5667bf | ||
|
|
d3d3e2e3aa | ||
|
|
37ca7b22b5 | ||
|
|
50f84a9ae1 | ||
|
|
ff29bf81f8 | ||
|
|
b25f753397 | ||
|
|
6a5d6de1e3 | ||
|
|
1c31a5b0e0 | ||
|
|
4f5cdf7c9b | ||
|
|
f09a767d31 | ||
|
|
cc8034cc4c | ||
|
|
50506cb607 | ||
|
|
aa8d2d5be6 | ||
|
|
114e6025b0 | ||
|
|
fda2717ef9 | ||
|
|
937511dfc0 | ||
|
|
d5c181a14e | ||
|
|
e8ce2375e0 | ||
|
|
6fdb39ded1 | ||
|
|
8e3a2bd620 | ||
|
|
a06bf87a2c | ||
|
|
ee4337d100 | ||
|
|
cff551c0b0 | ||
|
|
63b728f06f | ||
|
|
3793090b1b | ||
|
|
6d02b9a392 | ||
|
|
2c740cf28d | ||
|
|
5214f1e31d | ||
|
|
5d0f84d32c | ||
|
|
ee223abb88 | ||
|
|
21d0c33ecd | ||
|
|
8b6d9406db | ||
|
|
686f98816e | ||
|
|
0fa6b17dcc | ||
|
|
472404953a | ||
|
|
ae4ddf9efa | ||
|
|
ea8ed40b2f | ||
|
|
71bb016160 | ||
|
|
179ffab69c | ||
|
|
deb85c32bb | ||
|
|
92366d189e | ||
|
|
81413c0165 | ||
|
|
1e2eb4b40d | ||
|
|
01003d072c | ||
|
|
5003e4283b | ||
|
|
123c781044 | ||
|
|
a641b24592 | ||
|
|
e68dd1921a | ||
|
|
6953d8e95a | ||
|
|
967c9076a3 | ||
|
|
b3613d36da | ||
|
|
53472df857 | ||
|
|
2549e113b8 | ||
|
|
b15c44cd36 | ||
|
|
f93ded9852 | ||
|
|
89ea063eeb | ||
|
|
44b2264fea | ||
|
|
cb5a470635 | ||
|
|
17d1900581 | ||
|
|
5d501a0901 | ||
|
|
c13722480b | ||
|
|
e7d34c03f2 | ||
|
|
264cd00fff | ||
|
|
a4a6b7b80f | ||
|
|
aebb42d32b | ||
|
|
b4ef6a0038 | ||
|
|
5d235ca7f6 | ||
|
|
c3459d24f1 | ||
|
|
e3778cce0e | ||
|
|
ad607563a2 | ||
|
|
236cb2131b | ||
|
|
66d041f250 | ||
|
|
f3cb54e6d9 | ||
|
|
0aeb9a106e | ||
|
|
fd8102820c | ||
|
|
bfdf891fd3 | ||
|
|
3fa3ff1bc3 | ||
|
|
0a0110fc6b | ||
|
|
852fad922f | ||
|
|
fc68d52bb9 | ||
|
|
dde9fe9788 | ||
|
|
a230068ff7 | ||
|
|
6a75040278 | ||
|
|
c514b0ec65 | ||
|
|
eb97f46e8b | ||
|
|
c90d16cf36 | ||
|
|
ab6ca04802 | ||
|
|
f3003531a5 | ||
|
|
146672254e | ||
|
|
999079b454 | ||
|
|
02fb980451 | ||
|
|
8a06999ba0 | ||
|
|
80dcee5cd5 | ||
|
|
9550ca506f | ||
|
|
30eecc6a04 | ||
|
|
dbd82a1d4f | ||
|
|
76f0c50d3d | ||
|
|
dc519b5421 | ||
|
|
ae12bc3ebb | ||
|
|
e327b736ca | ||
|
|
82b69a5cbb | ||
|
|
11465da702 | ||
|
|
578c074575 | ||
|
|
8cdb5c8453 | ||
|
|
2b1b2d83ca | ||
|
|
c3040bd00a | ||
|
|
8c1aa28c27 | ||
|
|
50b9dd7344 | ||
|
|
78d7ee19dc | ||
|
|
892015b088 | ||
|
|
47f2d01a5a | ||
|
|
33a513faf7 | ||
|
|
720334659a | ||
|
|
240384afe6 | ||
|
|
6722ebd437 | ||
|
|
957e0db1d2 | ||
|
|
804afc5871 | ||
|
|
00d24327ef | ||
|
|
9a605c8859 | ||
|
|
402ca40c9d | ||
|
|
30bd1c16c8 | ||
|
|
721f5a277c | ||
|
|
6fb8ace671 | ||
|
|
ae37338e68 | ||
|
|
03c2c162f9 | ||
|
|
52c3a6e49d | ||
|
|
4e16c1f80b | ||
|
|
7ccb2b84dd | ||
|
|
0a192fbea7 | ||
|
|
a526167d40 | ||
|
|
f78546272c | ||
|
|
c137cc0d33 | ||
|
|
6e4b8b2891 | ||
|
|
5dadae079b | ||
|
|
cd08d806b1 | ||
|
|
5f9f87c06f | ||
|
|
387db16a78 | ||
|
|
36e6f62cd0 | ||
|
|
755ff8d22c | ||
|
|
7b3a19e533 | ||
|
|
4f13f8f798 | ||
|
|
feb7711cf5 | ||
|
|
589c33dade | ||
|
|
e572a1010b | ||
|
|
7e0dc61334 | ||
|
|
8e82ecfe8f | ||
|
|
ec29539e06 | ||
|
|
8cd9614abf | ||
|
|
324ac0a243 | ||
|
|
3711304510 | ||
|
|
50b936936d | ||
|
|
d97da29da2 | ||
|
|
7687b354c5 | ||
|
|
36d7281037 | ||
|
|
865d1fbafc | ||
|
|
ac21e71968 | ||
|
|
943a1e24b8 | ||
|
|
50f01302d3 | ||
|
|
0198807ef9 | ||
|
|
6856139705 | ||
|
|
c93153852f | ||
|
|
ab9c7214ee | ||
|
|
dae69640d0 | ||
|
|
edeb3e7cb1 | ||
|
|
5c43afd40f | ||
|
|
9170ca5b16 | ||
|
|
65d49afa48 | ||
|
|
ab03c0b47c | ||
|
|
7690787553 | ||
|
|
a65402ef42 | ||
|
|
7033bc1a51 | ||
|
|
89d5fbf354 | ||
|
|
8c3533ba97 | ||
|
|
44d6dd08b2 | ||
|
|
cc449417c4 | ||
|
|
497f5fd93f | ||
|
|
7308b8cb3d | ||
|
|
4211c83aa4 | ||
|
|
d01949dc89 | ||
|
|
63a6494834 | ||
|
|
8bea039b83 | ||
|
|
d65889bbc0 | ||
|
|
4a8963770e | ||
|
|
5b0aa2c7b1 | ||
|
|
b6aa99aff8 | ||
|
|
4bf5614195 | ||
|
|
0be30bafa4 | ||
|
|
7b091c370c | ||
|
|
334b5c3b72 | ||
|
|
b7cedb1604 | ||
|
|
2038ad6ee7 | ||
|
|
b243340f0c | ||
|
|
8cc83d301d | ||
|
|
d762f86e94 | ||
|
|
264b23e1a4 | ||
|
|
a6e0afa2bb | ||
|
|
4285a47f40 | ||
|
|
e36963e0eb | ||
|
|
dedd35c6bc | ||
|
|
608945d44a | ||
|
|
b1bf063503 | ||
|
|
14bddf35fb | ||
|
|
ef6c868f23 | ||
|
|
6682049dee | ||
|
|
b0f001a6cb | ||
|
|
dd67702a3e | ||
|
|
05a3879f1c | ||
|
|
4a7b790384 | ||
|
|
09ff81316e | ||
|
|
c88aec845a | ||
|
|
77a54b6a65 | ||
|
|
575036b405 | ||
|
|
f6dfd6603a | ||
|
|
e04edad621 | ||
|
|
f322bfb063 | ||
|
|
014e880372 | ||
|
|
01d22d4703 | ||
|
|
48aae2d2cf | ||
|
|
c571dea953 | ||
|
|
8b172c2e10 | ||
|
|
0a67a3632b | ||
|
|
985e4fdc07 | ||
|
|
1e399778ee | ||
|
|
2e022397c4 | ||
|
|
02835c6bf4 | ||
|
|
91816e8f16 | ||
|
|
10c38c7ca2 | ||
|
|
94a773feb9 | ||
|
|
448ef1f31c | ||
|
|
49941c4e4f | ||
|
|
80f48920c8 | ||
|
|
520e753390 | ||
|
|
355c7ad361 | ||
|
|
5a11b793fe | ||
|
|
7593fbaa12 | ||
|
|
2eb0f72a0e | ||
|
|
8e5b121948 | ||
|
|
648e6a1ffe | ||
|
|
55af2b26e0 | ||
|
|
583882fdce | ||
|
|
9eb31b265f | ||
|
|
ddeb1b3de2 | ||
|
|
59fe4824f8 | ||
|
|
dd8417526b | ||
|
|
09670d5ba4 | ||
|
|
41a7b00f18 | ||
|
|
d90e40305b | ||
|
|
350c948133 | ||
|
|
e5e9966199 | ||
|
|
fbd9f6ea80 | ||
|
|
6df7179e6c | ||
|
|
36eb802baf | ||
|
|
2ccb37beb9 | ||
|
|
246ce10858 | ||
|
|
ba717dca97 | ||
|
|
1e52776ac3 | ||
|
|
8daeeedc06 | ||
|
|
cce9d15d01 | ||
|
|
dd414c970b | ||
|
|
6744f36db7 | ||
|
|
77302fe5c9 | ||
|
|
497ca088a6 | ||
|
|
90bddb6cdd | ||
|
|
fafc7950e2 | ||
|
|
a13d06de42 | ||
|
|
1db82381e3 | ||
|
|
cb8961eeed | ||
|
|
af98f8ff37 | ||
|
|
caf80631f0 | ||
|
|
1812afb7b3 | ||
|
|
9fb66c780c | ||
|
|
ab953c64a0 | ||
|
|
db0a8ad979 | ||
|
|
1c29e81e62 | ||
|
|
7a6d76a64d | ||
|
|
4aa353673b | ||
|
|
f49b3d6efc | ||
|
|
36bb63fad1 | ||
|
|
1f36085df9 | ||
|
|
26669ea3cf | ||
|
|
db8e38b8cf | ||
|
|
e09f58b3bc | ||
|
|
3dc582e5ea | ||
|
|
506e261d20 | ||
|
|
b30c4992a9 | ||
|
|
3eeff489e8 | ||
|
|
5946cda7c6 | ||
|
|
ee2edd838a | ||
|
|
73e732eb6b | ||
|
|
cd7364a89c | ||
|
|
57d1db8dd0 | ||
|
|
964e7b2dd0 | ||
|
|
f101079ae0 | ||
|
|
0f61db4469 | ||
|
|
1bd3903582 | ||
|
|
da4daed5ef | ||
|
|
7d49502ab0 | ||
|
|
03e3b4e119 | ||
|
|
d8348c351d | ||
|
|
70cb4d51c9 | ||
|
|
7c84562945 | ||
|
|
68f3b61f0e | ||
|
|
08fd433f3e | ||
|
|
35a3ff1d33 | ||
|
|
fff496c689 | ||
|
|
e5c209a1bc | ||
|
|
75f105d455 | ||
|
|
00cde0b8dc | ||
|
|
58cd7e173e | ||
|
|
f4076bb736 | ||
|
|
6f7893653c | ||
|
|
55ebae26f9 | ||
|
|
ae8bdfd1a1 | ||
|
|
6a74719060 | ||
|
|
f6295bcb04 | ||
|
|
78f9d84318 | ||
|
|
b1ec70e4a9 | ||
|
|
c936d8cc7b | ||
|
|
e6174ee975 | ||
|
|
f790c43f6e | ||
|
|
8119597d6f | ||
|
|
43abd79950 | ||
|
|
97ae4d166c | ||
|
|
87cc0fbd18 | ||
|
|
baf39a1aa8 | ||
|
|
975977860d | ||
|
|
b5d48cb1ef | ||
|
|
de66571371 | ||
|
|
dab062fb6e | ||
|
|
6a959f2e52 | ||
|
|
47c165c3a9 | ||
|
|
4180a3d8b7 | ||
|
|
05ad5409b4 | ||
|
|
1ef1563649 | ||
|
|
ef47b2c15f | ||
|
|
57935b2564 | ||
|
|
54a5428518 | ||
|
|
9d5fb3b58d | ||
|
|
cbb7287204 | ||
|
|
0553d0ee40 | ||
|
|
7faf7e7523 | ||
|
|
4356d907c1 | ||
|
|
c677e49bd1 | ||
|
|
96c4855331 | ||
|
|
b90b0c4ffa | ||
|
|
c725e2c8b0 | ||
|
|
db7a28eccb | ||
|
|
6a11bb77ba | ||
|
|
181f814e57 | ||
|
|
bd5376c182 | ||
|
|
ecf6de5b02 | ||
|
|
139f27827e | ||
|
|
945e5c56e3 | ||
|
|
fc10824cb6 | ||
|
|
83a5668694 | ||
|
|
f648e682a7 | ||
|
|
f2dbc54066 | ||
|
|
86be82610c | ||
|
|
4810c48d6d | ||
|
|
c4af7684d8 | ||
|
|
fcc2546269 | ||
|
|
40fbb05e1c | ||
|
|
dc5756fd77 | ||
|
|
41db733308 | ||
|
|
0bf219889e | ||
|
|
f2a7ed77ef | ||
|
|
4853eb63fe | ||
|
|
5820c4a29e | ||
|
|
7fd4ed9939 | ||
|
|
88c86d211b | ||
|
|
5d84b79a30 | ||
|
|
140ac73965 | ||
|
|
2a27e66234 | ||
|
|
e759a00119 | ||
|
|
9d5332518c | ||
|
|
90ab741e90 | ||
|
|
96229998c2 | ||
|
|
0659dfccfe | ||
|
|
9c544e2537 | ||
|
|
d7fc56318b | ||
|
|
4bba371644 | ||
|
|
ef5acfe32d | ||
|
|
30787f7259 | ||
|
|
85557f635a | ||
|
|
60d23e5e59 | ||
|
|
97d5bfcba6 | ||
|
|
c233e6bcc3 | ||
|
|
28809ab07a | ||
|
|
bad84757eb | ||
|
|
13118a50b8 | ||
|
|
5495937f46 | ||
|
|
adccf33632 | ||
|
|
b203095d4c | ||
|
|
f3b098fb90 | ||
|
|
af17794c65 | ||
|
|
8fc226ef99 | ||
|
|
3bb3f04108 | ||
|
|
59a9efe85b | ||
|
|
0facd2af3e | ||
|
|
7d0ada5ff9 | ||
|
|
44451f22d5 | ||
|
|
06c6efa970 | ||
|
|
e5851b963a | ||
|
|
4de6131090 | ||
|
|
3a1341a7bc | ||
|
|
c78e48177c | ||
|
|
6edaa0e25b | ||
|
|
fb97809e64 | ||
|
|
0c996b9f48 | ||
|
|
acfb717a18 | ||
|
|
647eab4541 | ||
|
|
1e5bcdec02 | ||
|
|
e7d8e98a9f | ||
|
|
2b3f951a2e | ||
|
|
6751a1284d | ||
|
|
b83831df1f | ||
|
|
f540b93706 | ||
|
|
8466336104 | ||
|
|
f88f1b40ce | ||
|
|
386a7b52d5 | ||
|
|
2e885de796 | ||
|
|
687c04cbb8 | ||
|
|
40c931de4b | ||
|
|
93bc7ef165 | ||
|
|
ee2d190253 | ||
|
|
aedb930cfc | ||
|
|
c596ce91cd | ||
|
|
c01e1a96aa | ||
|
|
8a64969404 | ||
|
|
c254f75bbb | ||
|
|
86692c019c | ||
|
|
1ab1c4ef57 | ||
|
|
926fb62eec | ||
|
|
817690ff73 | ||
|
|
98e1c935a1 | ||
|
|
f30e9976d6 | ||
|
|
80e98aed69 | ||
|
|
6a24cb3d22 | ||
|
|
e13b9e7885 | ||
|
|
dd467d33d0 | ||
|
|
c6b8f4d0c9 | ||
|
|
95240b8093 | ||
|
|
2f962d0a91 | ||
|
|
3c63e1bb57 | ||
|
|
c471b34575 | ||
|
|
d045f0bdb7 | ||
|
|
22becac4bd | ||
|
|
9d632b1b27 | ||
|
|
95c5e10103 | ||
|
|
a5d09d684e | ||
|
|
8aab976bbd | ||
|
|
26c6d1922e | ||
|
|
cd1bb54990 | ||
|
|
d4cd06138c | ||
|
|
961c5cbf17 | ||
|
|
b65e5bb72f | ||
|
|
54914380c0 | ||
|
|
26ccc68bed | ||
|
|
ee3d5a6d47 | ||
|
|
46fde8a1a2 | ||
|
|
fe1d858e35 | ||
|
|
fc42bc6ec9 | ||
|
|
fe6ad195ae | ||
|
|
7193650641 | ||
|
|
5db34f680f | ||
|
|
a82ba8d0ce | ||
|
|
3706fb5dc8 | ||
|
|
08bea4adde | ||
|
|
4c917d0314 | ||
|
|
4866b72eb2 | ||
|
|
2d00be0477 | ||
|
|
3d09aa4c82 | ||
|
|
c44c7895b8 | ||
|
|
8de28761c4 | ||
|
|
711762f0b7 | ||
|
|
5773803961 | ||
|
|
6aeba407db | ||
|
|
140359fc2c | ||
|
|
8ddf48d59f | ||
|
|
2e40a12225 | ||
|
|
dade7245af | ||
|
|
1f9fb20fcd | ||
|
|
0940c5b4c6 | ||
|
|
42ca72dff3 | ||
|
|
2949a6cda9 | ||
|
|
882fc9052e | ||
|
|
9b166fc1f8 | ||
|
|
d4364f30bd | ||
|
|
857421024d | ||
|
|
80faa7a152 | ||
|
|
545a23f11b | ||
|
|
caedb0721e | ||
|
|
47024eb564 | ||
|
|
9c58885c70 | ||
|
|
9fbd4b35a2 | ||
|
|
05b476a270 | ||
|
|
4395ca2e04 | ||
|
|
19f93d906e | ||
|
|
57565375c8 | ||
|
|
eb11cbe867 | ||
|
|
53407e3f38 | ||
|
|
b306c439d7 | ||
|
|
f102819463 | ||
|
|
b942db3dc3 | ||
|
|
78f9fb902b | ||
|
|
d8fef8faac | ||
|
|
8ea6bd2802 | ||
|
|
c659022b5c | ||
|
|
8ca2e93e1a | ||
|
|
5600e214c3 | ||
|
|
6400f8ec0f | ||
|
|
c3a4e2ec40 | ||
|
|
e28c794699 | ||
|
|
da9f180835 | ||
|
|
6b8ce312e3 | ||
|
|
de3fc356e1 | ||
|
|
d0fed4ac02 | ||
|
|
7ce50a355c | ||
|
|
9612f23399 | ||
|
|
cccedc1aa4 | ||
|
|
c430802e32 | ||
|
|
cb4e421901 | ||
|
|
8e97596b7b | ||
|
|
92085e7099 | ||
|
|
c6aa838b51 | ||
|
|
9f5e8d16b3 | ||
|
|
82c06a40ac | ||
|
|
4423eba49b | ||
|
|
5b4c54631a | ||
|
|
5a1a2e9454 | ||
|
|
f005f96ea5 | ||
|
|
5e39123b3b | ||
|
|
393ca8c94d | ||
|
|
f817adc468 | ||
|
|
6c91a5a7f5 | ||
|
|
749b09616d | ||
|
|
5de5ab89b4 | ||
|
|
1d67c96640 | ||
|
|
d1c694ea4a | ||
|
|
06368a232a | ||
|
|
8a7bbd1606 | ||
|
|
131d05033b | ||
|
|
1806a75415 | ||
|
|
659ffe204c | ||
|
|
4647fd8910 | ||
|
|
d492dad8f4 | ||
|
|
3368d70dce | ||
|
|
0e1b2566ff | ||
|
|
369e60162e | ||
|
|
d5e7657fe2 | ||
|
|
f84ce1ebaf | ||
|
|
12bc242944 | ||
|
|
88060cce10 | ||
|
|
272e4db5c7 | ||
|
|
6e21cc3b67 | ||
|
|
0391bc8176 | ||
|
|
3b9264a049 | ||
|
|
2b3c254678 | ||
|
|
287be8c615 | ||
|
|
953fed280f | ||
|
|
e2ff3df314 | ||
|
|
31208a07c2 | ||
|
|
ac7a1b0dfb | ||
|
|
c246773599 | ||
|
|
25cd56a715 | ||
|
|
82c18e2a53 | ||
|
|
d5d38d16ae | ||
|
|
e1cbf33573 | ||
|
|
2ffe3bc14b | ||
|
|
d5867276a9 | ||
|
|
f665ef8fc5 | ||
|
|
b264c21302 | ||
|
|
349b3a2ea0 | ||
|
|
87813a8570 | ||
|
|
aab135516b | ||
|
|
141ba36996 | ||
|
|
d434ca5448 | ||
|
|
94e507aea7 | ||
|
|
3ebc121293 | ||
|
|
41ebd6530b | ||
|
|
2ec7b7b79b | ||
|
|
60ed60353b | ||
|
|
586f1cc532 | ||
|
|
73eb13dfc7 | ||
|
|
ed1269000f | ||
|
|
689fb748ee | ||
|
|
1721fef28b | ||
|
|
364ca0582e | ||
|
|
133a2b4ac2 | ||
|
|
d85187eb74 | ||
|
|
cc1ac11017 | ||
|
|
73f536439e | ||
|
|
b17e7d9a9b | ||
|
|
2f29b758e0 | ||
|
|
482aa3fecc | ||
|
|
d9c19db340 | ||
|
|
6c4d243de5 | ||
|
|
d1561ef777 | ||
|
|
7b4137c351 | ||
|
|
1072336249 | ||
|
|
75bb5c7028 | ||
|
|
376e1ad081 | ||
|
|
b58a22b963 | ||
|
|
47004d9579 | ||
|
|
12810c9cd3 | ||
|
|
7a459170fa | ||
|
|
3cf0df568a | ||
|
|
b88ebd472e | ||
|
|
436416afe2 | ||
|
|
64997815c4 | ||
|
|
8b55cadc83 | ||
|
|
3ecc527209 | ||
|
|
b1b7d1ffba | ||
|
|
4003bd82b0 | ||
|
|
8801255d7d | ||
|
|
3b18f539a7 | ||
|
|
c67a055d16 | ||
|
|
bc973e06d0 | ||
|
|
aeb3c8a0e8 | ||
|
|
cf33a47df0 | ||
|
|
daeb0f04cd | ||
|
|
97243fe395 | ||
|
|
9dbdb65abe | ||
|
|
9af461de35 | ||
|
|
4d71e200c6 | ||
|
|
8e0bdabed2 | ||
|
|
bca553caac | ||
|
|
a2f42a3baf | ||
|
|
7465222a9c | ||
|
|
e28034c5ac | ||
|
|
12bbd32ad0 | ||
|
|
266e466ee4 | ||
|
|
cf83f532ae | ||
|
|
cd019668dc | ||
|
|
515fc8776f | ||
|
|
c7c0996d8c | ||
|
|
b3e64671cc | ||
|
|
4abe214499 | ||
|
|
e94cb5ae7e | ||
|
|
e213c98df1 | ||
|
|
1639282434 | ||
|
|
be0e5dbd83 | ||
|
|
ad72917274 | ||
|
|
6a3f4c3f82 | ||
|
|
a6420bf50c | ||
|
|
eb387896e9 | ||
|
|
f43c163158 | ||
|
|
673bf566fc | ||
|
|
f95c5e1218 | ||
|
|
f33f32f159 | ||
|
|
8df5ae15d1 | ||
|
|
75b399f455 | ||
|
|
12439dd5ec | ||
|
|
3513d41436 | ||
|
|
cab792abe5 | ||
|
|
8870358b1b | ||
|
|
ee087c79ad | ||
|
|
51f579b635 | ||
|
|
c23c3d7d7d | ||
|
|
4abf617b9c | ||
|
|
486375154c | ||
|
|
3026164b16 | ||
|
|
9dd73ef4a4 | ||
|
|
75c72a1e67 | ||
|
|
08354db47b | ||
|
|
027eb5a6b0 | ||
|
|
f71264490c | ||
|
|
6270239a6d | ||
|
|
1195a38f46 | ||
|
|
66e289bab4 | ||
|
|
52c6f26cab | ||
|
|
dc534b674f | ||
|
|
f30c2e8e98 | ||
|
|
c482b3c69a | ||
|
|
266b0ad676 | ||
|
|
87f70ab39d | ||
|
|
8e636da499 | ||
|
|
22889ab175 | ||
|
|
5d2354f177 | ||
|
|
a41fb80ce1 | ||
|
|
2e2575e213 | ||
|
|
8e2898edf9 | ||
|
|
26c61e0809 | ||
|
|
e7a8c3032d | ||
|
|
b477da2094 | ||
|
|
725d1c58aa | ||
|
|
bd6742137f | ||
|
|
9f4921bfa0 | ||
|
|
e8dcfa3d69 | ||
|
|
88720ed09b | ||
|
|
1e804244d0 | ||
|
|
198492bbf0 | ||
|
|
8f9d522f62 | ||
|
|
cbae233aba | ||
|
|
b17ca9c945 | ||
|
|
ebf4ca39ba | ||
|
|
e5e78797e6 | ||
|
|
080997b808 | ||
|
|
77306e8b97 | ||
|
|
6917d2a2f0 | ||
|
|
36c15522c1 | ||
|
|
804c343a4f | ||
|
|
cd5d75427e | ||
|
|
5ddc127da6 | ||
|
|
f859695b49 | ||
|
|
cb3d2eb9e9 | ||
|
|
33eae08f04 | ||
|
|
aa3f98677d | ||
|
|
fffccaaf41 | ||
|
|
cdc8d0c373 | ||
|
|
d14f0c45fc | ||
|
|
39955b0451 | ||
|
|
52dfb7ffe2 | ||
|
|
93462856e1 | ||
|
|
615f155a3a | ||
|
|
fcd9e423ec | ||
|
|
db8f2bfd99 | ||
|
|
55801fc76e | ||
|
|
d3d89c3256 | ||
|
|
8875b3d572 | ||
|
|
aabc2be693 | ||
|
|
c9afb51cea | ||
|
|
c0a656876c | ||
|
|
17a647630b | ||
|
|
c88e118b3c | ||
|
|
ae6a802106 | ||
|
|
b184f94413 | ||
|
|
ee3ec091f4 | ||
|
|
ef49b59053 | ||
|
|
1f8125805e | ||
|
|
efd712c69b | ||
|
|
109a4156e1 | ||
|
|
678d33295b | ||
|
|
5e58956d0a | ||
|
|
e276fd2cb3 | ||
|
|
9b22cb10c4 | ||
|
|
8ca31a0e05 | ||
|
|
20149a5da1 | ||
|
|
054d43bb11 | ||
|
|
65488b820c | ||
|
|
c3c9f87954 | ||
|
|
56f447be9f | ||
|
|
79fa9db0da | ||
|
|
071c10137b | ||
|
|
a4962b80d6 | ||
|
|
5307c33232 | ||
|
|
1b660cce12 | ||
|
|
8df8c278b6 | ||
|
|
d7e8264517 | ||
|
|
f11c316347 | ||
|
|
f62e02c24f | ||
|
|
70113c38c9 | ||
|
|
3d8132f5e2 | ||
|
|
39affb5aa4 | ||
|
|
a882c5f474 | ||
|
|
61a7ff1622 | ||
|
|
42e7373bd3 | ||
|
|
e269d3ae7d | ||
|
|
e7ddaef5bd | ||
|
|
62984e4584 | ||
|
|
3c53455d15 | ||
|
|
bbb43a39fd | ||
|
|
43e7d3c945 | ||
|
|
2f72e83bbd | ||
|
|
57179b4ca1 | ||
|
|
4bc8eec4eb | ||
|
|
baf510bf8c | ||
|
|
6d53cdd6ce | ||
|
|
ebbf078c7d | ||
|
|
95e431e9ec | ||
|
|
eba470f2f2 | ||
|
|
11addc50ff | ||
|
|
e4df2f98cc | ||
|
|
e7c14660d3 | ||
|
|
90076b6172 | ||
|
|
28b83495d8 | ||
|
|
551c7837ac | ||
|
|
59e6acc757 | ||
|
|
9990c960f2 | ||
|
|
2006a06eff | ||
|
|
2b6bda1ed8 | ||
|
|
468083d2f5 | ||
|
|
483fc223bb | ||
|
|
66ce97024d | ||
|
|
8c97f81943 | ||
|
|
d7c1630570 | ||
|
|
5e1a5ac8de | ||
|
|
9eb4ab6ad9 | ||
|
|
4932a817a0 | ||
|
|
5d003e29b1 | ||
|
|
dc95bd503e | ||
|
|
f738dd7b7c | ||
|
|
f908b74fa3 | ||
|
|
c687ac745b | ||
|
|
912e0b7e46 | ||
|
|
03bc7237ad | ||
|
|
061f62da54 | ||
|
|
dd565ac1ad | ||
|
|
5cdefc4625 | ||
|
|
ce00af8767 | ||
|
|
51047444aa | ||
|
|
aa6cd05ed8 | ||
|
|
dac14bf311 | ||
|
|
05fe2594e4 | ||
|
|
26e1c3514f | ||
|
|
22c83245c5 | ||
|
|
7900aede14 | ||
|
|
f877c6ae5a | ||
|
|
ca681f7041 | ||
|
|
a01da8bbf8 | ||
|
|
f3a65d9636 | ||
|
|
559f4c550f | ||
|
|
03c635a4b5 | ||
|
|
34a4cd0a34 | ||
|
|
3b9b32f404 | ||
|
|
9c724a9802 | ||
|
|
7a6e8a1b17 | ||
|
|
369c12e038 | ||
|
|
0fa5795b85 | ||
|
|
c00c7c0af0 | ||
|
|
cbaed4bb5e | ||
|
|
f74a7348f6 | ||
|
|
8626b23e4e | ||
|
|
0086874277 | ||
|
|
7fc18d9309 | ||
|
|
974f1a385a | ||
|
|
6900b4f6f5 | ||
|
|
d90e4bdb74 | ||
|
|
276c989772 | ||
|
|
ea99110d24 | ||
|
|
221a59fe6f | ||
|
|
eaa5646483 | ||
|
|
041bc3adc5 | ||
|
|
e64b756943 | ||
|
|
201ea3ee8e | ||
|
|
9303ce3e69 | ||
|
|
06c085ab6e | ||
|
|
c576ef1e7c | ||
|
|
11bed5827d | ||
|
|
fab83e2456 | ||
|
|
1d25e9d173 | ||
|
|
9c21f22923 | ||
|
|
3aa697f993 | ||
|
|
8b9848ac56 | ||
|
|
8b8c1093b6 | ||
|
|
d0d6c097fc | ||
|
|
6be5e46994 | ||
|
|
45694b504a | ||
|
|
41dbc50f9c | ||
|
|
4d2ad866f3 | ||
|
|
3cafca04aa | ||
|
|
594f51b859 | ||
|
|
fb56131dd9 | ||
|
|
a34e19629c | ||
|
|
3c12a027d4 | ||
|
|
cb28e03386 | ||
|
|
7393746da2 | ||
|
|
6828c809e4 | ||
|
|
28479149cc | ||
|
|
237c03c8ea | ||
|
|
e73c85cb23 | ||
|
|
b6b2711298 | ||
|
|
3b7130439a | ||
|
|
2c919adb74 | ||
|
|
60231c65b9 | ||
|
|
f196047832 | ||
|
|
240ca32e57 | ||
|
|
fa37c26c4d | ||
|
|
d7dbfc7cc1 | ||
|
|
d9ab5262b1 | ||
|
|
fb124e3741 | ||
|
|
479bf783d2 | ||
|
|
f0f3a6c99d | ||
|
|
f57b7835e2 | ||
|
|
1df3186e0e | ||
|
|
0b7c27828d | ||
|
|
0a19d4ccd6 | ||
|
|
9f3da13860 | ||
|
|
bf812ef714 | ||
|
|
b1ac38fadc | ||
|
|
fb0d12c6cb | ||
|
|
34952f09e1 | ||
|
|
34a7de2970 | ||
|
|
0ff827419e | ||
|
|
b29440aee6 | ||
|
|
11b5605815 | ||
|
|
844587669e | ||
|
|
f6c3664d71 | ||
|
|
c5864a8ce6 | ||
|
|
27c7114af6 | ||
|
|
0791ac1b44 | ||
|
|
1de5cd3ba5 | ||
|
|
729accb482 | ||
|
|
942acef594 | ||
|
|
fb2f339fec | ||
|
|
98044462b1 | ||
|
|
0dcb318f62 | ||
|
|
f32143469f | ||
|
|
3a30508b94 | ||
|
|
e0b9d78fab | ||
|
|
8d6765cf48 | ||
|
|
12bb392a0f | ||
|
|
08df685fe7 | ||
|
|
c8d1be772d | ||
|
|
887e9bc7b5 | ||
|
|
9f2e7c2f34 | ||
|
|
d7bb8884af | ||
|
|
464e792496 | ||
|
|
18c3281f9e | ||
|
|
8e2b1be127 | ||
|
|
b61b7787cb | ||
|
|
b465083f45 | ||
|
|
154655a85a | ||
|
|
59e89e62d7 | ||
|
|
d5d7bdaeb5 | ||
|
|
b2f82948ee | ||
|
|
428e4e4a85 | ||
|
|
1e83741c9a | ||
|
|
621d6a9516 | ||
|
|
3550821fb4 | ||
|
|
5b0c40da24 | ||
|
|
e0ac521438 | ||
|
|
c29458f3ec | ||
|
|
bf94d763ba | ||
|
|
8a37aa1517 | ||
|
|
f3d24df6f2 | ||
|
|
fd5d8270dc | ||
|
|
be612d9e0c | ||
|
|
4a7434d0b0 | ||
|
|
ad2141be2d | ||
|
|
f94639fadf | ||
|
|
89faae660f | ||
|
|
0f422256d6 | ||
|
|
acc1adbe7a | ||
|
|
8002ac9e0a | ||
|
|
6d30cf04db | ||
|
|
430b092a5f | ||
|
|
3eb5fdb581 | ||
|
|
9663bd3abb | ||
|
|
5a4d9ddb21 | ||
|
|
3be3c622dc | ||
|
|
cd6b555e19 | ||
|
|
671302b5c0 | ||
|
|
4f34cdb0a8 | ||
|
|
bd690a9f93 | ||
|
|
51f267d9d4 | ||
|
|
47f53ad958 | ||
|
|
c73cdd800f | ||
|
|
f535ec8278 | ||
|
|
238755752f | ||
|
|
c71a3195af | ||
|
|
54a9328b20 | ||
|
|
3e48522477 | ||
|
|
251a44b776 | ||
|
|
be7a8379b4 | ||
|
|
defce60385 | ||
|
|
354b4b8604 | ||
|
|
5b7dab2dd6 | ||
|
|
8a5601e42f | ||
|
|
232541df44 | ||
|
|
a346b1ff57 | ||
|
|
d96d604e53 | ||
|
|
e704f87f86 | ||
|
|
8f5639afcb | ||
|
|
03950c90f7 | ||
|
|
47a8b7c14a | ||
|
|
2a04d2c799 | ||
|
|
8de922724b | ||
|
|
67b8a28a2f | ||
|
|
51a575159a | ||
|
|
524229a297 | ||
|
|
754e70cf3e | ||
|
|
84bc4dcb0f | ||
|
|
10eaa8ef1d | ||
|
|
c3124c3085 | ||
|
|
8d5b8b477e | ||
|
|
d7d2a9a3db | ||
|
|
25a4c5a9ed | ||
|
|
5c45bbe57b | ||
|
|
d41d04c0f5 | ||
|
|
e422d7f4f7 | ||
|
|
cdc682d5a4 | ||
|
|
9cc93c64aa | ||
|
|
fa7a1cc5ef | ||
|
|
17712eeb19 | ||
|
|
41c3a5a7be | ||
|
|
8765222d22 | ||
|
|
645f814544 | ||
|
|
308cfe0ab3 | ||
|
|
e5e8d20a3a | ||
|
|
a107193e4b | ||
|
|
55eae65b39 | ||
|
|
3f125c8c70 | ||
|
|
75e8b2ac87 | ||
|
|
ee114368ad | ||
|
|
525a87f58e | ||
|
|
44cae2fb2e | ||
|
|
30a453884e | ||
|
|
3b58d94f71 | ||
|
|
8abb86fec4 | ||
|
|
16a089780e | ||
|
|
09b6468d30 | ||
|
|
80fb6d4aa4 | ||
|
|
1f04873517 | ||
|
|
799207e838 | ||
|
|
34866b4836 | ||
|
|
be530dfea2 | ||
|
|
d12a1a47d5 | ||
|
|
8d42e3501e | ||
|
|
2711e41bcd | ||
|
|
5e1eddb939 | ||
|
|
23e7f53bd3 | ||
|
|
000b6b5ae5 | ||
|
|
864f24bd2c | ||
|
|
5d8df28d27 | ||
|
|
f9a5affad9 | ||
|
|
ab81ef8fa7 | ||
|
|
95d8f7ea12 | ||
|
|
5316bf7487 | ||
|
|
a6f774e901 | ||
|
|
f171bc8b59 | ||
|
|
289bbb350e | ||
|
|
d247a2c8bf | ||
|
|
88ed52aec9 | ||
|
|
cb23bcba29 | ||
|
|
2c7ed24796 | ||
|
|
4c6bd5b5b6 | ||
|
|
aeb7b41d44 | ||
|
|
5bdec59de1 | ||
|
|
dfaba1ab95 | ||
|
|
a62fd1af27 | ||
|
|
7a89681722 | ||
|
|
51da40e621 | ||
|
|
d8f0a9ecea | ||
|
|
cf7e015f25 | ||
|
|
1af330f29f | ||
|
|
9afa1770d1 | ||
|
|
3ebbcce1c7 | ||
|
|
2c7c721933 | ||
|
|
7523647391 | ||
|
|
9700cd9097 | ||
|
|
eab7faa0c1 | ||
|
|
a56c1e38c7 | ||
|
|
40a2d17052 | ||
|
|
b14fa8e687 | ||
|
|
678e436f2e | ||
|
|
ff81c4c99c | ||
|
|
420658e6cb | ||
|
|
593ddd851b | ||
|
|
1243402657 | ||
|
|
1a117a7728 | ||
|
|
2b2ee140c3 | ||
|
|
d97f5cd795 | ||
|
|
f3f0b8e403 | ||
|
|
660f9459da | ||
|
|
10952eb2cf | ||
|
|
cdad742700 | ||
|
|
a9e8f60ef6 | ||
|
|
a8b7b26068 | ||
|
|
ba911137fa | ||
|
|
d3f007af18 | ||
|
|
2929fa0e79 | ||
|
|
297a564bee | ||
|
|
8e92d21ebf | ||
|
|
36dbca8784 | ||
|
|
d1cc05e17e | ||
|
|
3b3d531965 | ||
|
|
653789afc7 | ||
|
|
2d651a2d02 | ||
|
|
3e5f3df172 | ||
|
|
f120a7ab5e | ||
|
|
984e4d4875 | ||
|
|
53b8247cb5 | ||
|
|
59db9f8018 | ||
|
|
b73b14f72c | ||
|
|
41597d9bed | ||
|
|
b37317d8b0 | ||
|
|
87dc451108 | ||
|
|
ca4456eda8 | ||
|
|
993df6bc22 | ||
|
|
61be92e26a | ||
|
|
c59b61c0da | ||
|
|
3e214851a4 | ||
|
|
a47b602b08 | ||
|
|
a083b859e4 | ||
|
|
948199deac | ||
|
|
c356620ec1 | ||
|
|
f79ebf09a2 | ||
|
|
c7620992d2 | ||
|
|
ce1bafdce9 | ||
|
|
9872e588c8 | ||
|
|
d609edf4f1 | ||
|
|
3a99d321a8 | ||
|
|
4bb3d999ac | ||
|
|
40101dc311 | ||
|
|
e9c6deffee | ||
|
|
9c29bc69f7 | ||
|
|
1e12429564 | ||
|
|
795704f0f1 | ||
|
|
981b9cdc8c | ||
|
|
3f724339db | ||
|
|
70c857b728 | ||
|
|
9e7e0dffd5 | ||
|
|
c3dea3f878 | ||
|
|
f57f84f606 | ||
|
|
c84683c88b | ||
|
|
b68a2613f8 | ||
|
|
28afa6e77a | ||
|
|
496ce6b349 | ||
|
|
ce9512b78b | ||
|
|
4eb59a6b1c | ||
|
|
80b1ee0a4c | ||
|
|
f993afb26d | ||
|
|
7c80519cbf | ||
|
|
8250c32f49 | ||
|
|
2fe1ff8582 | ||
|
|
17ee98e1a5 | ||
|
|
2ee8f5d80f | ||
|
|
3f302bca8c | ||
|
|
c909e5820e | ||
|
|
a1b85269a4 | ||
|
|
faa1f83ab4 | ||
|
|
308c505c3d | ||
|
|
0eacd2aaae | ||
|
|
18ae46ad4b | ||
|
|
65c2b21df1 | ||
|
|
772acaf31f | ||
|
|
f8d0745e27 | ||
|
|
d719c6a5ab | ||
|
|
769efa16af | ||
|
|
86b4e98ac6 | ||
|
|
3bf8c316a6 | ||
|
|
e37c92ec6d | ||
|
|
a5dd9a0c5d | ||
|
|
7a4a945f13 | ||
|
|
1d18e26eca | ||
|
|
ac4b8df5e4 | ||
|
|
3bc9fb5889 | ||
|
|
632cbb8efa | ||
|
|
789a12aaaf | ||
|
|
ecdbe09e10 | ||
|
|
1dc31c2786 | ||
|
|
32470bf619 | ||
|
|
8b61bfd638 | ||
|
|
8a7a208905 | ||
|
|
0215103e92 | ||
|
|
c2d1be8981 | ||
|
|
4951c9f821 | ||
|
|
726adc43ec | ||
|
|
3c6ae8b59e | ||
|
|
605be3f7f8 | ||
|
|
c51bc70e0f | ||
|
|
e89d7e3029 | ||
|
|
3c07a729a6 | ||
|
|
84c0ed50a5 | ||
|
|
02c126a7c2 | ||
|
|
114ed20e64 | ||
|
|
4b0f45f667 | ||
|
|
36068ae019 | ||
|
|
9d681c2bb3 | ||
|
|
3af1fac7b0 | ||
|
|
761ee0d827 | ||
|
|
fb8bc3f818 | ||
|
|
826a7da808 | ||
|
|
cbd55ade68 | ||
|
|
5705ee6ef8 | ||
|
|
3f5c6d0c1b | ||
|
|
bfed4813b2 | ||
|
|
e58066e244 | ||
|
|
ee48b6a88f | ||
|
|
9ac09ed4de | ||
|
|
22603348aa | ||
|
|
fec73daaa3 | ||
|
|
c6b68648f4 | ||
|
|
1ecb5d1d83 | ||
|
|
dc786d3db5 | ||
|
|
74fe23ec35 | ||
|
|
b0bff54b08 | ||
|
|
1b541d8d6e | ||
|
|
f29ac588dd | ||
|
|
0696667734 | ||
|
|
1793d71db6 | ||
|
|
4211e1941b | ||
|
|
4bdfef5a18 | ||
|
|
8a37f53685 | ||
|
|
4e1ad6e9a8 | ||
|
|
fb10e1aa57 | ||
|
|
3c283a381e | ||
|
|
dac4d5be12 | ||
|
|
530857182d | ||
|
|
9441f77faa | ||
|
|
3cc8b4c327 | ||
|
|
6b19647d57 | ||
|
|
7bd42d0d96 | ||
|
|
c60e8cfaf7 | ||
|
|
7fd002c006 | ||
|
|
db6c50f109 | ||
|
|
aa4789d632 | ||
|
|
ee8de13e14 | ||
|
|
7dde5f6a8d | ||
|
|
736f003f2e | ||
|
|
47af21e8f1 | ||
|
|
605cbef653 | ||
|
|
388ad0c05c | ||
|
|
2ebbb6f1f7 | ||
|
|
d54f1c7477 | ||
|
|
b78f5ec4c3 | ||
|
|
9fd3bf04b7 | ||
|
|
e97bb3de83 | ||
|
|
c2daf8dfa4 | ||
|
|
09b718c439 | ||
|
|
c177bb3a50 | ||
|
|
977a247a06 | ||
|
|
899a3e2f13 | ||
|
|
8ee4ecb48d | ||
|
|
f7e6f7fa23 | ||
|
|
1f80e360fc | ||
|
|
d7011316d0 | ||
|
|
d3671b344f | ||
|
|
a60cccbf9f | ||
|
|
3e72f5f10e | ||
|
|
b94b78971c | ||
|
|
4d08161ac2 | ||
|
|
8954e48140 | ||
|
|
aa99aa4e85 | ||
|
|
d79febcd06 | ||
|
|
13fc7f3a05 | ||
|
|
14309e1ddc | ||
|
|
5513967926 | ||
|
|
eacd875f3b | ||
|
|
c4fe07c7af | ||
|
|
1186e3f91a | ||
|
|
f354385bf5 | ||
|
|
cabe001590 | ||
|
|
89f691e141 | ||
|
|
4a63291144 | ||
|
|
593b77064c | ||
|
|
9fefc88656 | ||
|
|
eb08081330 | ||
|
|
a3bfddfa5e | ||
|
|
36da48798a | ||
|
|
a0f28f90fa | ||
|
|
851229a01f | ||
|
|
c9c854cea7 | ||
|
|
a38436e889 | ||
|
|
23fc384f2c | ||
|
|
1540119723 | ||
|
|
574f42d79a | ||
|
|
536b0700b0 | ||
|
|
5ba761eb85 | ||
|
|
611ac379bb | ||
|
|
03f32a7ead | ||
|
|
50ea2bb20d | ||
|
|
525daedd5a | ||
|
|
e118031ef8 | ||
|
|
45eedbe58c | ||
|
|
e37c932fca | ||
|
|
5eb778bf4d | ||
|
|
ab9b890b52 | ||
|
|
31c746e5dc | ||
|
|
f01f731107 | ||
|
|
70f0f5a8ca | ||
|
|
cc357c4db8 | ||
|
|
97f4aecfc1 | ||
|
|
2af0f87c8b | ||
|
|
b062d94eef | ||
|
|
6c1b0c0ed2 | ||
|
|
ddcdc684e2 | ||
|
|
eae89f92e6 | ||
|
|
01d115b06b | ||
|
|
79057965a8 | ||
|
|
dcd4d95c8e | ||
|
|
cf61d96df0 | ||
|
|
f8da79f828 | ||
|
|
9750e7d70e | ||
|
|
50aa2bb6b9 | ||
|
|
1d1dd597ed | ||
|
|
cfe5537ee5 | ||
|
|
7869eb3fc4 | ||
|
|
6dfa0602f0 | ||
|
|
f870544302 | ||
|
|
75a40b2251 | ||
|
|
28fb109ed0 | ||
|
|
48607afac5 | ||
|
|
b6ea9ef21a | ||
|
|
b8dd44baa9 | ||
|
|
c4f1fde75b | ||
|
|
667170e2c7 | ||
|
|
53429e6551 | ||
|
|
ac8f97f2b3 | ||
|
|
41c0d2f8cb | ||
|
|
1f3a43dbe6 | ||
|
|
369e195a44 | ||
|
|
15006fedb9 | ||
|
|
e35b23f54d | ||
|
|
f72b0a6032 | ||
|
|
ac9ed061ec | ||
|
|
d919fa3344 | ||
|
|
79913fde35 | ||
|
|
da634d0a8b | ||
|
|
fac54cb426 | ||
|
|
3f19b9b7c1 | ||
|
|
dc48695ab9 | ||
|
|
0a31a35098 | ||
|
|
86f2541695 | ||
|
|
181c4ccaaa | ||
|
|
ed848087d5 | ||
|
|
edd66be5be | ||
|
|
246995dbc8 | ||
|
|
b931fbe5ab | ||
|
|
e014ff015d | ||
|
|
4fa5f40232 | ||
|
|
9b15be97aa | ||
|
|
a7ada46bd9 | ||
|
|
9d16788ad9 | ||
|
|
6ce89aecc3 | ||
|
|
963d0ce7e3 | ||
|
|
0f08d7f851 | ||
|
|
44c514eb9c | ||
|
|
513cbdda93 | ||
|
|
e1ba152352 | ||
|
|
446e764500 | ||
|
|
901d00caa6 | ||
|
|
094790d2c9 | ||
|
|
1c0163a5cc | ||
|
|
8fa7e5817a | ||
|
|
01b89d5682 | ||
|
|
9f01c1a803 | ||
|
|
46f0f50016 | ||
|
|
b8070dbbd7 | ||
|
|
3b16d803c9 | ||
|
|
de195c23a6 | ||
|
|
d3b8908886 | ||
|
|
2688176c77 | ||
|
|
a5839317aa | ||
|
|
a0aab26a41 | ||
|
|
27713812a0 | ||
|
|
cf2c5fda4f | ||
|
|
a9684c0dbf | ||
|
|
c0bf5e1c4d | ||
|
|
a31e3e7dcb | ||
|
|
17b41a3337 | ||
|
|
89a683ae74 | ||
|
|
008661069b | ||
|
|
9296e92e1c | ||
|
|
a34af8d066 | ||
|
|
8726e04629 | ||
|
|
2a01c940ec | ||
|
|
4eab60cbd2 | ||
|
|
a0e060ac1e | ||
|
|
397a8ea96e | ||
|
|
15830339ef | ||
|
|
b29280285e | ||
|
|
1633491bff | ||
|
|
2b0fa1f7dd | ||
|
|
02b386f80a | ||
|
|
bf20b9c540 | ||
|
|
06a12933f3 | ||
|
|
6dd94d3a79 | ||
|
|
f2f89c762a | ||
|
|
e6c2d9ad29 | ||
|
|
83423254cc | ||
|
|
1c20ddc966 | ||
|
|
675e9f22ea | ||
|
|
77c6fb5b24 | ||
|
|
082a0140ef | ||
|
|
9e535ce055 | ||
|
|
d76dea001b | ||
|
|
af0f9b0e95 | ||
|
|
e2082ea942 | ||
|
|
68923e52a3 | ||
|
|
9281f6d253 | ||
|
|
4647845679 | ||
|
|
cf9cf7dd04 | ||
|
|
1316b54956 | ||
|
|
cbc1fadd6f | ||
|
|
4dd09c9add | ||
|
|
267dc07e6b | ||
|
|
d7b4d5dd50 | ||
|
|
7f220b2fac | ||
|
|
275c0423aa | ||
|
|
d3ee4bbc5a | ||
|
|
85a064861f | ||
|
|
d0b436bff2 | ||
|
|
92b2f18072 | ||
|
|
dfc4eca21f | ||
|
|
fc7ae675e2 | ||
|
|
804ad79985 | ||
|
|
da839880e9 | ||
|
|
e9d33454b5 | ||
|
|
d80891efc4 | ||
|
|
37c1e4025c | ||
|
|
59a83d3e5b | ||
|
|
13af92fdc4 | ||
|
|
0c20ee7d4b | ||
|
|
89d42c2c75 | ||
|
|
04611765a4 | ||
|
|
9dfc4fa1a1 | ||
|
|
43232d5c14 | ||
|
|
f7c272d4fa | ||
|
|
ede21449c8 | ||
|
|
bb8e553662 | ||
|
|
f5f4a27a96 | ||
|
|
d7c9a3e976 | ||
|
|
35eb649e9d | ||
|
|
e56a4c9e9b | ||
|
|
95506e37af | ||
|
|
e41840c522 | ||
|
|
2a46a27e6c | ||
|
|
0bcdc27653 | ||
|
|
ddf0f74de7 | ||
|
|
91b21b2334 | ||
|
|
66e568de3b | ||
|
|
f5ca97e393 | ||
|
|
8d06a62485 | ||
|
|
93f9420993 | ||
|
|
5b61070c70 | ||
|
|
dbe1a93526 | ||
|
|
aa5d9a79d6 | ||
|
|
86511ea417 | ||
|
|
1866432db7 | ||
|
|
cf2ac6df68 | ||
|
|
33f1f81b8b | ||
|
|
9d0b581fea | ||
|
|
c05724cb18 | ||
|
|
f0714c9f86 | ||
|
|
cf386750c9 | ||
|
|
54f428f645 | ||
|
|
dc2bd20e55 | ||
|
|
c608ee491f | ||
|
|
0130afb76e | ||
|
|
738b926322 | ||
|
|
bea41c7f3f | ||
|
|
1bbe660dfa | ||
|
|
c4bd188da4 | ||
|
|
5acfa126c8 | ||
|
|
67134eaba1 | ||
|
|
5414623791 | ||
|
|
c93d53f5e3 | ||
|
|
507683780e | ||
|
|
e8b9ee5e08 | ||
|
|
d16154d163 | ||
|
|
c342041fba | ||
|
|
bf42a9906d | ||
|
|
9603e8a7d9 | ||
|
|
c7c040b825 | ||
|
|
ac0474f89d | ||
|
|
bb512e57dc | ||
|
|
db652ea186 | ||
|
|
5a9cc19972 | ||
|
|
1a5fd4eebc | ||
|
|
8a1b49ff19 | ||
|
|
b971abe897 | ||
|
|
43b925ce74 | ||
|
|
62b742ece3 | ||
|
|
d16ef949ca | ||
|
|
23e7cba87f | ||
|
|
a8e6f30d8e | ||
|
|
9c49410898 | ||
|
|
802d74aa6b | ||
|
|
71f9e49e67 | ||
|
|
82ea1051b5 | ||
|
|
6c4d20cd6f | ||
|
|
04c27802c0 | ||
|
|
c3b7202f4f | ||
|
|
81103ef35d | ||
|
|
0eb5c1c62a | ||
|
|
a9de951744 | ||
|
|
a42a1bb09d | ||
|
|
9fbfc9bd4d | ||
|
|
242a998bdc | ||
|
|
9d1bf70234 | ||
|
|
b8c1cc1a51 | ||
|
|
eedd20ef96 | ||
|
|
7c197ad96d | ||
|
|
654fd03c73 | ||
|
|
cee16e0fa3 | ||
|
|
73c471e9ef | ||
|
|
533b99fbf9 | ||
|
|
f39eb98bab | ||
|
|
da77d856a1 | ||
|
|
b2575b38e7 | ||
|
|
0a3cf9ad3d | ||
|
|
00334d0de0 | ||
|
|
226b886ca8 | ||
|
|
bc93bdb5bb | ||
|
|
af214c3a79 | ||
|
|
4eb10f6621 | ||
|
|
7d7d469025 | ||
|
|
fd40bdc0be | ||
|
|
7e0480ae0e | ||
|
|
d80265ccd6 | ||
|
|
1b5a1ae257 | ||
|
|
d8d24a922a | ||
|
|
03339b7b5b | ||
|
|
2028c6e03d | ||
|
|
2988835af5 | ||
|
|
62cca96b72 | ||
|
|
b4dea075a3 | ||
|
|
533f67d3fa | ||
|
|
906e2f0eac | ||
|
|
b8091db6b9 | ||
|
|
381c067755 | ||
|
|
2182ab5187 | ||
|
|
aa5740fb61 | ||
|
|
da92eeae42 | ||
|
|
a9dcf4a860 | ||
|
|
2da0cad6ae | ||
|
|
af1fa6234e | ||
|
|
2a282a3b5f | ||
|
|
7bb23aeca4 | ||
|
|
de939d89eb | ||
|
|
77c975f536 | ||
|
|
75ab0ebcf5 | ||
|
|
10273d6e08 | ||
|
|
d5552a3477 | ||
|
|
a8b081a052 | ||
|
|
9e96dc8b35 | ||
|
|
4d58b24c15 | ||
|
|
0392ac98d2 | ||
|
|
5e3915cbe3 | ||
|
|
29b809de68 | ||
|
|
8f73e89ca0 | ||
|
|
0d0d5d3717 | ||
|
|
14835de9fb | ||
|
|
8a1a26ce4c | ||
|
|
5bf3276e8d | ||
|
|
93dfcb9357 | ||
|
|
0c8662d2b6 | ||
|
|
d84f1d14b5 | ||
|
|
4da31bd566 | ||
|
|
423d2be5f8 | ||
|
|
453a1617aa | ||
|
|
b9258c6178 | ||
|
|
6800d3372f | ||
|
|
a650110ba7 | ||
|
|
54b31d149e | ||
|
|
a745475808 | ||
|
|
233c1c0e76 | ||
|
|
f11554092b |
58
.github/ISSUE_TEMPLATE.md
vendored
Normal file
58
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
## Please follow the guide below
|
||||||
|
|
||||||
|
- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly
|
||||||
|
- Put an `x` into all the boxes [ ] relevant to your *issue* (like that [x])
|
||||||
|
- Use *Preview* tab to see how your issue will actually look like
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2016.05.30.2*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
||||||
|
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2016.05.30.2**
|
||||||
|
|
||||||
|
### Before submitting an *issue* make sure you have:
|
||||||
|
- [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
||||||
|
- [ ] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
|
||||||
|
|
||||||
|
### What is the purpose of your *issue*?
|
||||||
|
- [ ] Bug report (encountered problems with youtube-dl)
|
||||||
|
- [ ] Site support request (request for adding support for a new site)
|
||||||
|
- [ ] Feature request (request for a new functionality)
|
||||||
|
- [ ] Question
|
||||||
|
- [ ] Other
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:
|
||||||
|
|
||||||
|
Add `-v` flag to **your command line** you run youtube-dl with, copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):
|
||||||
|
```
|
||||||
|
$ youtube-dl -v <your command line>
|
||||||
|
[debug] System config: []
|
||||||
|
[debug] User config: []
|
||||||
|
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||||
|
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||||
|
[debug] youtube-dl version 2016.05.30.2
|
||||||
|
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||||
|
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||||
|
[debug] Proxy map: {}
|
||||||
|
...
|
||||||
|
<end of log>
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**):
|
||||||
|
- Single video: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||||
|
- Single video: https://youtu.be/BaW_jenozKc
|
||||||
|
- Playlist: https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Description of your *issue*, suggested solution and other information
|
||||||
|
|
||||||
|
Explanation of your *issue* in arbitrary form goes here. Please make sure the [description is worded well enough to be understood](https://github.com/rg3/youtube-dl#is-the-description-of-the-issue-itself-sufficient). Provide as much context and examples as possible.
|
||||||
|
If work on your *issue* required an account credentials please provide them or explain how one can obtain them.
|
||||||
58
.github/ISSUE_TEMPLATE_tmpl.md
vendored
Normal file
58
.github/ISSUE_TEMPLATE_tmpl.md
vendored
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
## Please follow the guide below
|
||||||
|
|
||||||
|
- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly
|
||||||
|
- Put an `x` into all the boxes [ ] relevant to your *issue* (like that [x])
|
||||||
|
- Use *Preview* tab to see how your issue will actually look like
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *%(version)s*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
||||||
|
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **%(version)s**
|
||||||
|
|
||||||
|
### Before submitting an *issue* make sure you have:
|
||||||
|
- [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
||||||
|
- [ ] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
|
||||||
|
|
||||||
|
### What is the purpose of your *issue*?
|
||||||
|
- [ ] Bug report (encountered problems with youtube-dl)
|
||||||
|
- [ ] Site support request (request for adding support for a new site)
|
||||||
|
- [ ] Feature request (request for a new functionality)
|
||||||
|
- [ ] Question
|
||||||
|
- [ ] Other
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:
|
||||||
|
|
||||||
|
Add `-v` flag to **your command line** you run youtube-dl with, copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):
|
||||||
|
```
|
||||||
|
$ youtube-dl -v <your command line>
|
||||||
|
[debug] System config: []
|
||||||
|
[debug] User config: []
|
||||||
|
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||||
|
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||||
|
[debug] youtube-dl version %(version)s
|
||||||
|
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||||
|
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||||
|
[debug] Proxy map: {}
|
||||||
|
...
|
||||||
|
<end of log>
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**):
|
||||||
|
- Single video: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||||
|
- Single video: https://youtu.be/BaW_jenozKc
|
||||||
|
- Playlist: https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Description of your *issue*, suggested solution and other information
|
||||||
|
|
||||||
|
Explanation of your *issue* in arbitrary form goes here. Please make sure the [description is worded well enough to be understood](https://github.com/rg3/youtube-dl#is-the-description-of-the-issue-itself-sufficient). Provide as much context and examples as possible.
|
||||||
|
If work on your *issue* required an account credentials please provide them or explain how one can obtain them.
|
||||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,5 +1,6 @@
|
|||||||
*.pyc
|
*.pyc
|
||||||
*.pyo
|
*.pyo
|
||||||
|
*.class
|
||||||
*~
|
*~
|
||||||
*.DS_Store
|
*.DS_Store
|
||||||
wine-py2exe/
|
wine-py2exe/
|
||||||
@@ -12,6 +13,7 @@ README.txt
|
|||||||
youtube-dl.1
|
youtube-dl.1
|
||||||
youtube-dl.bash-completion
|
youtube-dl.bash-completion
|
||||||
youtube-dl.fish
|
youtube-dl.fish
|
||||||
|
youtube_dl/extractor/lazy_extractors.py
|
||||||
youtube-dl
|
youtube-dl
|
||||||
youtube-dl.exe
|
youtube-dl.exe
|
||||||
youtube-dl.tar.gz
|
youtube-dl.tar.gz
|
||||||
@@ -26,10 +28,16 @@ updates_key.pem
|
|||||||
*.mp4
|
*.mp4
|
||||||
*.m4a
|
*.m4a
|
||||||
*.m4v
|
*.m4v
|
||||||
|
*.mp3
|
||||||
*.part
|
*.part
|
||||||
*.swp
|
*.swp
|
||||||
test/testdata
|
test/testdata
|
||||||
|
test/local_parameters.json
|
||||||
.tox
|
.tox
|
||||||
youtube-dl.zsh
|
youtube-dl.zsh
|
||||||
|
|
||||||
|
# IntelliJ related files
|
||||||
.idea
|
.idea
|
||||||
.idea/*
|
*.iml
|
||||||
|
|
||||||
|
tmp/
|
||||||
|
|||||||
@@ -5,14 +5,15 @@ python:
|
|||||||
- "3.2"
|
- "3.2"
|
||||||
- "3.3"
|
- "3.3"
|
||||||
- "3.4"
|
- "3.4"
|
||||||
before_install:
|
- "3.5"
|
||||||
- sudo apt-get update -qq
|
sudo: false
|
||||||
- sudo apt-get install -yqq rtmpdump
|
install:
|
||||||
|
- bash ./devscripts/install_srelay.sh
|
||||||
|
- export PATH=$PATH:$(pwd)/tmp/srelay-0.4.8b6
|
||||||
script: nosetests test --verbose
|
script: nosetests test --verbose
|
||||||
notifications:
|
notifications:
|
||||||
email:
|
email:
|
||||||
- filippo.valsorda@gmail.com
|
- filippo.valsorda@gmail.com
|
||||||
- phihag@phihag.de
|
|
||||||
- yasoob.khld@gmail.com
|
- yasoob.khld@gmail.com
|
||||||
# irc:
|
# irc:
|
||||||
# channels:
|
# channels:
|
||||||
|
|||||||
45
AUTHORS
45
AUTHORS
@@ -128,3 +128,48 @@ Ping O.
|
|||||||
Mister Hat
|
Mister Hat
|
||||||
Peter Ding
|
Peter Ding
|
||||||
jackyzy823
|
jackyzy823
|
||||||
|
George Brighton
|
||||||
|
Remita Amine
|
||||||
|
Aurélio A. Heckert
|
||||||
|
Bernhard Minks
|
||||||
|
sceext
|
||||||
|
Zach Bruggeman
|
||||||
|
Tjark Saul
|
||||||
|
slangangular
|
||||||
|
Behrouz Abbasi
|
||||||
|
ngld
|
||||||
|
nyuszika7h
|
||||||
|
Shaun Walbridge
|
||||||
|
Lee Jenkins
|
||||||
|
Anssi Hannula
|
||||||
|
Lukáš Lalinský
|
||||||
|
Qijiang Fan
|
||||||
|
Rémy Léone
|
||||||
|
Marco Ferragina
|
||||||
|
reiv
|
||||||
|
Muratcan Simsek
|
||||||
|
Evan Lu
|
||||||
|
flatgreen
|
||||||
|
Brian Foley
|
||||||
|
Vignesh Venkat
|
||||||
|
Tom Gijselinck
|
||||||
|
Founder Fang
|
||||||
|
Andrew Alexeyew
|
||||||
|
Saso Bezlaj
|
||||||
|
Erwin de Haan
|
||||||
|
Jens Wille
|
||||||
|
Robin Houtevelts
|
||||||
|
Patrick Griffis
|
||||||
|
Aidan Rowe
|
||||||
|
mutantmonkey
|
||||||
|
Ben Congdon
|
||||||
|
Kacper Michajłow
|
||||||
|
José Joaquín Atria
|
||||||
|
Viťas Strádal
|
||||||
|
Kagami Hiiragi
|
||||||
|
Philip Huppert
|
||||||
|
blahgeek
|
||||||
|
Kevin Deldycke
|
||||||
|
inondle
|
||||||
|
Tomáš Čech
|
||||||
|
Déstin Reed
|
||||||
|
|||||||
@@ -1,6 +1,20 @@
|
|||||||
**Please include the full output of youtube-dl when run with `-v`**.
|
**Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||||
|
```
|
||||||
|
$ youtube-dl -v <your command line>
|
||||||
|
[debug] System config: []
|
||||||
|
[debug] User config: []
|
||||||
|
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||||
|
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||||
|
[debug] youtube-dl version 2015.12.06
|
||||||
|
[debug] Git HEAD: 135392e
|
||||||
|
[debug] Python version 2.6.6 - Windows-2003Server-5.2.3790-SP2
|
||||||
|
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||||
|
[debug] Proxy map: {}
|
||||||
|
...
|
||||||
|
```
|
||||||
|
**Do not post screenshots of verbose log only plain text is acceptable.**
|
||||||
|
|
||||||
The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
||||||
|
|
||||||
Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
|
Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
|
||||||
|
|
||||||
@@ -14,21 +28,21 @@ So please elaborate on what feature you are requesting, or what bug you want to
|
|||||||
- How it could be fixed
|
- How it could be fixed
|
||||||
- How your proposed solution would look like
|
- How your proposed solution would look like
|
||||||
|
|
||||||
If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a commiter myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
|
If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a committer myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
|
||||||
|
|
||||||
For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
|
For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the `-v` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
|
||||||
|
|
||||||
If your server has multiple IPs or you suspect censorship, adding --call-home may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
|
If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
|
||||||
|
|
||||||
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
|
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `http://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `http://www.youtube.com/`) is *not* an example URL.
|
||||||
|
|
||||||
### Are you using the latest version?
|
### Are you using the latest version?
|
||||||
|
|
||||||
Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
|
Before reporting any issue, type `youtube-dl -U`. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
|
||||||
|
|
||||||
### Is the issue already documented?
|
### Is the issue already documented?
|
||||||
|
|
||||||
Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or at https://github.com/rg3/youtube-dl/search?type=Issues . If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
|
Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/rg3/youtube-dl/search?type=Issues) of this repository. If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
|
||||||
|
|
||||||
### Why are existing options not enough?
|
### Why are existing options not enough?
|
||||||
|
|
||||||
@@ -71,14 +85,16 @@ To run the test, simply invoke your favorite test runner, or execute a test file
|
|||||||
If you want to create a build of youtube-dl yourself, you'll need
|
If you want to create a build of youtube-dl yourself, you'll need
|
||||||
|
|
||||||
* python
|
* python
|
||||||
* make
|
* make (both GNU make and BSD make are supported)
|
||||||
* pandoc
|
* pandoc
|
||||||
* zip
|
* zip
|
||||||
* nosetests
|
* nosetests
|
||||||
|
|
||||||
### Adding support for a new site
|
### Adding support for a new site
|
||||||
|
|
||||||
If you want to add support for a new site, you can follow this quick list (assuming your service is called `yourextractor`):
|
If you want to add support for a new site, first of all **make sure** this site is **not dedicated to [copyright infringement](#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. youtube-dl does **not support** such sites thus pull requests adding support for them **will be rejected**.
|
||||||
|
|
||||||
|
After you have ensured this site is distributing it's content legally, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||||
|
|
||||||
1. [Fork this repository](https://github.com/rg3/youtube-dl/fork)
|
1. [Fork this repository](https://github.com/rg3/youtube-dl/fork)
|
||||||
2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
|
2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
|
||||||
@@ -114,27 +130,29 @@ If you want to add support for a new site, you can follow this quick list (assum
|
|||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
# TODO more code goes here, for example ...
|
# TODO more code goes here, for example ...
|
||||||
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
|
title = self._html_search_regex(r'<h1>(.+?)</h1>', webpage, 'title')
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
|
'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False),
|
||||||
# TODO more properties (see youtube_dl/extractor/common.py)
|
# TODO more properties (see youtube_dl/extractor/common.py)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
||||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||||
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
|
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L68-L226). Add tests and code for as many as you want.
|
||||||
8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
8. Keep in mind that the only mandatory fields in info dict for successful extraction process are `id`, `title` and either `url` or `formats`, i.e. these are the critical data the extraction does not make any sense without. This means that [any field](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L138-L226) apart from aforementioned mandatory ones should be treated **as optional** and extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields. For example, if you have some intermediate dict `meta` that is a source of metadata and it has a key `summary` that you want to extract and put into resulting info dict as `description`, you should be ready that this key may be missing from the `meta` dict, i.e. you should extract it as `meta.get('summary')` and not `meta['summary']`. Similarly, you should pass `fatal=False` when extracting data from a webpage with `_search_regex/_html_search_regex`.
|
||||||
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
9. Check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||||
|
10. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add youtube_dl/extractor/__init__.py
|
$ git add youtube_dl/extractor/extractors.py
|
||||||
$ git add youtube_dl/extractor/yourextractor.py
|
$ git add youtube_dl/extractor/yourextractor.py
|
||||||
$ git commit -m '[yourextractor] Add new extractor'
|
$ git commit -m '[yourextractor] Add new extractor'
|
||||||
$ git push origin yourextractor
|
$ git push origin yourextractor
|
||||||
|
|
||||||
10. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
11. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||||
|
|
||||||
In any case, thank you very much for your contributions!
|
In any case, thank you very much for your contributions!
|
||||||
|
|
||||||
|
|||||||
38
Makefile
38
Makefile
@@ -1,8 +1,9 @@
|
|||||||
all: youtube-dl README.md CONTRIBUTING.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish supportedsites
|
all: youtube-dl README.md CONTRIBUTING.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish supportedsites
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part *.info.json *.mp4 *.flv *.mp3 *.avi CONTRIBUTING.md.tmp youtube-dl youtube-dl.exe
|
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish youtube_dl/extractor/lazy_extractors.py *.dump *.part *.info.json *.mp4 *.m4a *.flv *.mp3 *.avi *.mkv *.webm *.jpg *.png CONTRIBUTING.md.tmp ISSUE_TEMPLATE.md.tmp youtube-dl youtube-dl.exe
|
||||||
find . -name "*.pyc" -delete
|
find . -name "*.pyc" -delete
|
||||||
|
find . -name "*.class" -delete
|
||||||
|
|
||||||
PREFIX ?= /usr/local
|
PREFIX ?= /usr/local
|
||||||
BINDIR ?= $(PREFIX)/bin
|
BINDIR ?= $(PREFIX)/bin
|
||||||
@@ -11,15 +12,7 @@ SHAREDIR ?= $(PREFIX)/share
|
|||||||
PYTHON ?= /usr/bin/env python
|
PYTHON ?= /usr/bin/env python
|
||||||
|
|
||||||
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
||||||
ifeq ($(PREFIX),/usr)
|
SYSCONFDIR != if [ $(PREFIX) = /usr -o $(PREFIX) = /usr/local ]; then echo /etc; else echo $(PREFIX)/etc; fi
|
||||||
SYSCONFDIR=/etc
|
|
||||||
else
|
|
||||||
ifeq ($(PREFIX),/usr/local)
|
|
||||||
SYSCONFDIR=/etc
|
|
||||||
else
|
|
||||||
SYSCONFDIR=$(PREFIX)/etc
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||||
install -d $(DESTDIR)$(BINDIR)
|
install -d $(DESTDIR)$(BINDIR)
|
||||||
@@ -44,7 +37,7 @@ test:
|
|||||||
ot: offlinetest
|
ot: offlinetest
|
||||||
|
|
||||||
offlinetest: codetest
|
offlinetest: codetest
|
||||||
nosetests --verbose test --exclude test_download.py --exclude test_age_restriction.py --exclude test_subtitles.py --exclude test_write_annotations.py --exclude test_youtube_lists.py
|
$(PYTHON) -m nose --verbose test --exclude test_download.py --exclude test_age_restriction.py --exclude test_subtitles.py --exclude test_write_annotations.py --exclude test_youtube_lists.py --exclude test_iqiyi_sdk_interpreter.py --exclude test_socks.py
|
||||||
|
|
||||||
tar: youtube-dl.tar.gz
|
tar: youtube-dl.tar.gz
|
||||||
|
|
||||||
@@ -61,37 +54,46 @@ youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
|
|||||||
chmod a+x youtube-dl
|
chmod a+x youtube-dl
|
||||||
|
|
||||||
README.md: youtube_dl/*.py youtube_dl/*/*.py
|
README.md: youtube_dl/*.py youtube_dl/*/*.py
|
||||||
COLUMNS=80 python youtube_dl/__main__.py --help | python devscripts/make_readme.py
|
COLUMNS=80 $(PYTHON) youtube_dl/__main__.py --help | $(PYTHON) devscripts/make_readme.py
|
||||||
|
|
||||||
CONTRIBUTING.md: README.md
|
CONTRIBUTING.md: README.md
|
||||||
python devscripts/make_contributing.py README.md CONTRIBUTING.md
|
$(PYTHON) devscripts/make_contributing.py README.md CONTRIBUTING.md
|
||||||
|
|
||||||
|
.github/ISSUE_TEMPLATE.md: devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl.md youtube_dl/version.py
|
||||||
|
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl.md .github/ISSUE_TEMPLATE.md
|
||||||
|
|
||||||
supportedsites:
|
supportedsites:
|
||||||
python devscripts/make_supportedsites.py docs/supportedsites.md
|
$(PYTHON) devscripts/make_supportedsites.py docs/supportedsites.md
|
||||||
|
|
||||||
README.txt: README.md
|
README.txt: README.md
|
||||||
pandoc -f markdown -t plain README.md -o README.txt
|
pandoc -f markdown -t plain README.md -o README.txt
|
||||||
|
|
||||||
youtube-dl.1: README.md
|
youtube-dl.1: README.md
|
||||||
python devscripts/prepare_manpage.py >youtube-dl.1.temp.md
|
$(PYTHON) devscripts/prepare_manpage.py youtube-dl.1.temp.md
|
||||||
pandoc -s -f markdown -t man youtube-dl.1.temp.md -o youtube-dl.1
|
pandoc -s -f markdown -t man youtube-dl.1.temp.md -o youtube-dl.1
|
||||||
rm -f youtube-dl.1.temp.md
|
rm -f youtube-dl.1.temp.md
|
||||||
|
|
||||||
youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-completion.in
|
youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-completion.in
|
||||||
python devscripts/bash-completion.py
|
$(PYTHON) devscripts/bash-completion.py
|
||||||
|
|
||||||
bash-completion: youtube-dl.bash-completion
|
bash-completion: youtube-dl.bash-completion
|
||||||
|
|
||||||
youtube-dl.zsh: youtube_dl/*.py youtube_dl/*/*.py devscripts/zsh-completion.in
|
youtube-dl.zsh: youtube_dl/*.py youtube_dl/*/*.py devscripts/zsh-completion.in
|
||||||
python devscripts/zsh-completion.py
|
$(PYTHON) devscripts/zsh-completion.py
|
||||||
|
|
||||||
zsh-completion: youtube-dl.zsh
|
zsh-completion: youtube-dl.zsh
|
||||||
|
|
||||||
youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
|
youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
|
||||||
python devscripts/fish-completion.py
|
$(PYTHON) devscripts/fish-completion.py
|
||||||
|
|
||||||
fish-completion: youtube-dl.fish
|
fish-completion: youtube-dl.fish
|
||||||
|
|
||||||
|
lazy-extractors: youtube_dl/extractor/lazy_extractors.py
|
||||||
|
|
||||||
|
_EXTRACTOR_FILES != find youtube_dl/extractor -iname '*.py' -and -not -iname 'lazy_extractors.py'
|
||||||
|
youtube_dl/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
||||||
|
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
||||||
|
|
||||||
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||||
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
||||||
--exclude '*.DS_Store' \
|
--exclude '*.DS_Store' \
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import os
|
|||||||
from os.path import dirname as dirn
|
from os.path import dirname as dirn
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
|
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
|
||||||
import youtube_dl
|
import youtube_dl
|
||||||
|
|
||||||
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
|
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
|
||||||
|
|||||||
@@ -1,17 +1,42 @@
|
|||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
|
||||||
from socketserver import ThreadingMixIn
|
|
||||||
import argparse
|
import argparse
|
||||||
import ctypes
|
import ctypes
|
||||||
import functools
|
import functools
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
import tempfile
|
||||||
import threading
|
import threading
|
||||||
import traceback
|
import traceback
|
||||||
import os.path
|
import os.path
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname((os.path.abspath(__file__)))))
|
||||||
|
from youtube_dl.compat import (
|
||||||
|
compat_http_server,
|
||||||
|
compat_str,
|
||||||
|
compat_urlparse,
|
||||||
|
)
|
||||||
|
|
||||||
class BuildHTTPServer(ThreadingMixIn, HTTPServer):
|
# These are not used outside of buildserver.py thus not in compat.py
|
||||||
|
|
||||||
|
try:
|
||||||
|
import winreg as compat_winreg
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import _winreg as compat_winreg
|
||||||
|
|
||||||
|
try:
|
||||||
|
import socketserver as compat_socketserver
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import SocketServer as compat_socketserver
|
||||||
|
|
||||||
|
try:
|
||||||
|
compat_input = raw_input
|
||||||
|
except NameError: # Python 3
|
||||||
|
compat_input = input
|
||||||
|
|
||||||
|
|
||||||
|
class BuildHTTPServer(compat_socketserver.ThreadingMixIn, compat_http_server.HTTPServer):
|
||||||
allow_reuse_address = True
|
allow_reuse_address = True
|
||||||
|
|
||||||
|
|
||||||
@@ -191,7 +216,7 @@ def main(args=None):
|
|||||||
action='store_const', dest='action', const='service',
|
action='store_const', dest='action', const='service',
|
||||||
help='Run as a Windows service')
|
help='Run as a Windows service')
|
||||||
parser.add_argument('-b', '--bind', metavar='<host:port>',
|
parser.add_argument('-b', '--bind', metavar='<host:port>',
|
||||||
action='store', default='localhost:8142',
|
action='store', default='0.0.0.0:8142',
|
||||||
help='Bind to host:port (default %default)')
|
help='Bind to host:port (default %default)')
|
||||||
options = parser.parse_args(args=args)
|
options = parser.parse_args(args=args)
|
||||||
|
|
||||||
@@ -216,7 +241,7 @@ def main(args=None):
|
|||||||
srv = BuildHTTPServer((host, port), BuildHTTPRequestHandler)
|
srv = BuildHTTPServer((host, port), BuildHTTPRequestHandler)
|
||||||
thr = threading.Thread(target=srv.serve_forever)
|
thr = threading.Thread(target=srv.serve_forever)
|
||||||
thr.start()
|
thr.start()
|
||||||
input('Press ENTER to shut down')
|
compat_input('Press ENTER to shut down')
|
||||||
srv.shutdown()
|
srv.shutdown()
|
||||||
thr.join()
|
thr.join()
|
||||||
|
|
||||||
@@ -231,8 +256,6 @@ def rmtree(path):
|
|||||||
os.remove(fname)
|
os.remove(fname)
|
||||||
os.rmdir(path)
|
os.rmdir(path)
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
class BuildError(Exception):
|
class BuildError(Exception):
|
||||||
def __init__(self, output, code=500):
|
def __init__(self, output, code=500):
|
||||||
@@ -249,15 +272,25 @@ class HTTPError(BuildError):
|
|||||||
|
|
||||||
class PythonBuilder(object):
|
class PythonBuilder(object):
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
pythonVersion = kwargs.pop('python', '2.7')
|
python_version = kwargs.pop('python', '3.4')
|
||||||
try:
|
python_path = None
|
||||||
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Python\PythonCore\%s\InstallPath' % pythonVersion)
|
for node in ('Wow6432Node\\', ''):
|
||||||
try:
|
try:
|
||||||
self.pythonPath, _ = _winreg.QueryValueEx(key, '')
|
key = compat_winreg.OpenKey(
|
||||||
finally:
|
compat_winreg.HKEY_LOCAL_MACHINE,
|
||||||
_winreg.CloseKey(key)
|
r'SOFTWARE\%sPython\PythonCore\%s\InstallPath' % (node, python_version))
|
||||||
except Exception:
|
try:
|
||||||
raise BuildError('No such Python version: %s' % pythonVersion)
|
python_path, _ = compat_winreg.QueryValueEx(key, '')
|
||||||
|
finally:
|
||||||
|
compat_winreg.CloseKey(key)
|
||||||
|
break
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if not python_path:
|
||||||
|
raise BuildError('No such Python version: %s' % python_version)
|
||||||
|
|
||||||
|
self.pythonPath = python_path
|
||||||
|
|
||||||
super(PythonBuilder, self).__init__(**kwargs)
|
super(PythonBuilder, self).__init__(**kwargs)
|
||||||
|
|
||||||
@@ -305,8 +338,10 @@ class YoutubeDLBuilder(object):
|
|||||||
|
|
||||||
def build(self):
|
def build(self):
|
||||||
try:
|
try:
|
||||||
subprocess.check_output([os.path.join(self.pythonPath, 'python.exe'), 'setup.py', 'py2exe'],
|
proc = subprocess.Popen([os.path.join(self.pythonPath, 'python.exe'), 'setup.py', 'py2exe'], stdin=subprocess.PIPE, cwd=self.buildPath)
|
||||||
cwd=self.buildPath)
|
proc.wait()
|
||||||
|
#subprocess.check_output([os.path.join(self.pythonPath, 'python.exe'), 'setup.py', 'py2exe'],
|
||||||
|
# cwd=self.buildPath)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
raise BuildError(e.output)
|
raise BuildError(e.output)
|
||||||
|
|
||||||
@@ -369,12 +404,12 @@ class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, Clea
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
|
class BuildHTTPRequestHandler(compat_http_server.BaseHTTPRequestHandler):
|
||||||
actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching.
|
actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching.
|
||||||
|
|
||||||
def do_GET(self):
|
def do_GET(self):
|
||||||
path = urlparse.urlparse(self.path)
|
path = compat_urlparse.urlparse(self.path)
|
||||||
paramDict = dict([(key, value[0]) for key, value in urlparse.parse_qs(path.query).items()])
|
paramDict = dict([(key, value[0]) for key, value in compat_urlparse.parse_qs(path.query).items()])
|
||||||
action, _, path = path.path.strip('/').partition('/')
|
action, _, path = path.path.strip('/').partition('/')
|
||||||
if path:
|
if path:
|
||||||
path = path.split('/')
|
path = path.split('/')
|
||||||
@@ -388,7 +423,7 @@ class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
|
|||||||
builder.close()
|
builder.close()
|
||||||
except BuildError as e:
|
except BuildError as e:
|
||||||
self.send_response(e.code)
|
self.send_response(e.code)
|
||||||
msg = unicode(e).encode('UTF-8')
|
msg = compat_str(e).encode('UTF-8')
|
||||||
self.send_header('Content-Type', 'text/plain; charset=UTF-8')
|
self.send_header('Content-Type', 'text/plain; charset=UTF-8')
|
||||||
self.send_header('Content-Length', len(msg))
|
self.send_header('Content-Length', len(msg))
|
||||||
self.end_headers()
|
self.end_headers()
|
||||||
@@ -400,7 +435,5 @@ class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
|
|||||||
else:
|
else:
|
||||||
self.send_response(500, 'Malformed URL')
|
self.send_response(500, 'Malformed URL')
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import os
|
|||||||
from os.path import dirname as dirn
|
from os.path import dirname as dirn
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
|
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
|
||||||
import youtube_dl
|
import youtube_dl
|
||||||
from youtube_dl.utils import shell_quote
|
from youtube_dl.utils import shell_quote
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import with_statement, unicode_literals
|
|||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import glob
|
import glob
|
||||||
import io # For Python 2 compatibilty
|
import io # For Python 2 compatibility
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import os
|
|||||||
import textwrap
|
import textwrap
|
||||||
|
|
||||||
# We must be able to import youtube_dl
|
# We must be able to import youtube_dl
|
||||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
||||||
|
|
||||||
import youtube_dl
|
import youtube_dl
|
||||||
|
|
||||||
|
|||||||
8
devscripts/install_srelay.sh
Executable file
8
devscripts/install_srelay.sh
Executable file
@@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
mkdir -p tmp && cd tmp
|
||||||
|
wget -N http://downloads.sourceforge.net/project/socks-relay/socks-relay/srelay-0.4.8/srelay-0.4.8b6.tar.gz
|
||||||
|
tar zxvf srelay-0.4.8b6.tar.gz
|
||||||
|
cd srelay-0.4.8b6
|
||||||
|
./configure
|
||||||
|
make
|
||||||
19
devscripts/lazy_load_template.py
Normal file
19
devscripts/lazy_load_template.py
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
# encoding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
class LazyLoadExtractor(object):
|
||||||
|
_module = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def ie_key(cls):
|
||||||
|
return cls.__name__[:-2]
|
||||||
|
|
||||||
|
def __new__(cls, *args, **kwargs):
|
||||||
|
mod = __import__(cls._module, fromlist=(cls.__name__,))
|
||||||
|
real_cls = getattr(mod, cls.__name__)
|
||||||
|
instance = real_cls.__new__(real_cls)
|
||||||
|
instance.__init__(*args, **kwargs)
|
||||||
|
return instance
|
||||||
29
devscripts/make_issue_template.py
Normal file
29
devscripts/make_issue_template.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import io
|
||||||
|
import optparse
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
|
||||||
|
options, args = parser.parse_args()
|
||||||
|
if len(args) != 2:
|
||||||
|
parser.error('Expected an input and an output filename')
|
||||||
|
|
||||||
|
infile, outfile = args
|
||||||
|
|
||||||
|
with io.open(infile, encoding='utf-8') as inf:
|
||||||
|
issue_template_tmpl = inf.read()
|
||||||
|
|
||||||
|
# Get the version from youtube_dl/version.py without importing the package
|
||||||
|
exec(compile(open('youtube_dl/version.py').read(),
|
||||||
|
'youtube_dl/version.py', 'exec'))
|
||||||
|
|
||||||
|
out = issue_template_tmpl % {'version': locals()['__version__']}
|
||||||
|
|
||||||
|
with io.open(outfile, 'w', encoding='utf-8') as outf:
|
||||||
|
outf.write(out)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
63
devscripts/make_lazy_extractors.py
Normal file
63
devscripts/make_lazy_extractors.py
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
from __future__ import unicode_literals, print_function
|
||||||
|
|
||||||
|
from inspect import getsource
|
||||||
|
import os
|
||||||
|
from os.path import dirname as dirn
|
||||||
|
import sys
|
||||||
|
|
||||||
|
print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr)
|
||||||
|
|
||||||
|
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
|
||||||
|
|
||||||
|
lazy_extractors_filename = sys.argv[1]
|
||||||
|
if os.path.exists(lazy_extractors_filename):
|
||||||
|
os.remove(lazy_extractors_filename)
|
||||||
|
|
||||||
|
from youtube_dl.extractor import _ALL_CLASSES
|
||||||
|
from youtube_dl.extractor.common import InfoExtractor
|
||||||
|
|
||||||
|
with open('devscripts/lazy_load_template.py', 'rt') as f:
|
||||||
|
module_template = f.read()
|
||||||
|
|
||||||
|
module_contents = [module_template + '\n' + getsource(InfoExtractor.suitable)]
|
||||||
|
|
||||||
|
ie_template = '''
|
||||||
|
class {name}(LazyLoadExtractor):
|
||||||
|
_VALID_URL = {valid_url!r}
|
||||||
|
_module = '{module}'
|
||||||
|
'''
|
||||||
|
|
||||||
|
make_valid_template = '''
|
||||||
|
@classmethod
|
||||||
|
def _make_valid_url(cls):
|
||||||
|
return {valid_url!r}
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
def build_lazy_ie(ie, name):
|
||||||
|
valid_url = getattr(ie, '_VALID_URL', None)
|
||||||
|
s = ie_template.format(
|
||||||
|
name=name,
|
||||||
|
valid_url=valid_url,
|
||||||
|
module=ie.__module__)
|
||||||
|
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
|
||||||
|
s += '\n' + getsource(ie.suitable)
|
||||||
|
if hasattr(ie, '_make_valid_url'):
|
||||||
|
# search extractors
|
||||||
|
s += make_valid_template.format(valid_url=ie._make_valid_url())
|
||||||
|
return s
|
||||||
|
|
||||||
|
names = []
|
||||||
|
for ie in list(sorted(_ALL_CLASSES[:-1], key=lambda cls: cls.ie_key())) + _ALL_CLASSES[-1:]:
|
||||||
|
name = ie.ie_key() + 'IE'
|
||||||
|
src = build_lazy_ie(ie, name)
|
||||||
|
module_contents.append(src)
|
||||||
|
names.append(name)
|
||||||
|
|
||||||
|
module_contents.append(
|
||||||
|
'_ALL_CLASSES = [{0}]'.format(', '.join(names)))
|
||||||
|
|
||||||
|
module_src = '\n'.join(module_contents) + '\n'
|
||||||
|
|
||||||
|
with open(lazy_extractors_filename, 'wt') as f:
|
||||||
|
f.write(module_src)
|
||||||
@@ -9,7 +9,7 @@ import sys
|
|||||||
|
|
||||||
# Import youtube_dl
|
# Import youtube_dl
|
||||||
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
|
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
|
||||||
sys.path.append(ROOT_DIR)
|
sys.path.insert(0, ROOT_DIR)
|
||||||
import youtube_dl
|
import youtube_dl
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,16 +1,13 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import io
|
import io
|
||||||
|
import optparse
|
||||||
import os.path
|
import os.path
|
||||||
import sys
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
README_FILE = os.path.join(ROOT_DIR, 'README.md')
|
README_FILE = os.path.join(ROOT_DIR, 'README.md')
|
||||||
|
|
||||||
with io.open(README_FILE, encoding='utf-8') as f:
|
|
||||||
readme = f.read()
|
|
||||||
|
|
||||||
PREFIX = '''%YOUTUBE-DL(1)
|
PREFIX = '''%YOUTUBE-DL(1)
|
||||||
|
|
||||||
# NAME
|
# NAME
|
||||||
@@ -22,11 +19,56 @@ youtube\-dl \- download videos from youtube.com or other video platforms
|
|||||||
**youtube-dl** \[OPTIONS\] URL [URL...]
|
**youtube-dl** \[OPTIONS\] URL [URL...]
|
||||||
|
|
||||||
'''
|
'''
|
||||||
readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme)
|
|
||||||
readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme)
|
|
||||||
readme = PREFIX + readme
|
|
||||||
|
|
||||||
if sys.version_info < (3, 0):
|
|
||||||
print(readme.encode('utf-8'))
|
def main():
|
||||||
else:
|
parser = optparse.OptionParser(usage='%prog OUTFILE.md')
|
||||||
print(readme)
|
options, args = parser.parse_args()
|
||||||
|
if len(args) != 1:
|
||||||
|
parser.error('Expected an output filename')
|
||||||
|
|
||||||
|
outfile, = args
|
||||||
|
|
||||||
|
with io.open(README_FILE, encoding='utf-8') as f:
|
||||||
|
readme = f.read()
|
||||||
|
|
||||||
|
readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme)
|
||||||
|
readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme)
|
||||||
|
readme = PREFIX + readme
|
||||||
|
|
||||||
|
readme = filter_options(readme)
|
||||||
|
|
||||||
|
with io.open(outfile, 'w', encoding='utf-8') as outf:
|
||||||
|
outf.write(readme)
|
||||||
|
|
||||||
|
|
||||||
|
def filter_options(readme):
|
||||||
|
ret = ''
|
||||||
|
in_options = False
|
||||||
|
for line in readme.split('\n'):
|
||||||
|
if line.startswith('# '):
|
||||||
|
if line[2:].startswith('OPTIONS'):
|
||||||
|
in_options = True
|
||||||
|
else:
|
||||||
|
in_options = False
|
||||||
|
|
||||||
|
if in_options:
|
||||||
|
if line.lstrip().startswith('-'):
|
||||||
|
option, description = re.split(r'\s{2,}', line.lstrip())
|
||||||
|
split_option = option.split(' ')
|
||||||
|
|
||||||
|
if not split_option[-1].startswith('-'): # metavar
|
||||||
|
option = ' '.join(split_option[:-1] + ['*%s*' % split_option[-1]])
|
||||||
|
|
||||||
|
# Pandoc's definition_lists. See http://pandoc.org/README.html
|
||||||
|
# for more information.
|
||||||
|
ret += '\n%s\n: %s\n' % (option, description)
|
||||||
|
else:
|
||||||
|
ret += line.lstrip() + '\n'
|
||||||
|
else:
|
||||||
|
ret += line + '\n'
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
# * the git config user.signingkey is properly set
|
# * the git config user.signingkey is properly set
|
||||||
|
|
||||||
# You will need
|
# You will need
|
||||||
# pip install coverage nose rsa
|
# pip install coverage nose rsa wheel
|
||||||
|
|
||||||
# TODO
|
# TODO
|
||||||
# release notes
|
# release notes
|
||||||
@@ -15,10 +15,28 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
skip_tests=true
|
skip_tests=true
|
||||||
if [ "$1" = '--run-tests' ]; then
|
buildserver='localhost:8142'
|
||||||
skip_tests=false
|
|
||||||
shift
|
while true
|
||||||
fi
|
do
|
||||||
|
case "$1" in
|
||||||
|
--run-tests)
|
||||||
|
skip_tests=false
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--buildserver)
|
||||||
|
buildserver="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--*)
|
||||||
|
echo "ERROR: unknown option $1"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi
|
if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi
|
||||||
version="$1"
|
version="$1"
|
||||||
@@ -33,6 +51,9 @@ if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: th
|
|||||||
useless_files=$(find youtube_dl -type f -not -name '*.py')
|
useless_files=$(find youtube_dl -type f -not -name '*.py')
|
||||||
if [ ! -z "$useless_files" ]; then echo "ERROR: Non-.py files in youtube_dl: $useless_files"; exit 1; fi
|
if [ ! -z "$useless_files" ]; then echo "ERROR: Non-.py files in youtube_dl: $useless_files"; exit 1; fi
|
||||||
if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
|
if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
|
||||||
|
if ! type pandoc >/dev/null 2>/dev/null; then echo 'ERROR: pandoc is missing'; exit 1; fi
|
||||||
|
if ! python3 -c 'import rsa' 2>/dev/null; then echo 'ERROR: python3-rsa is missing'; exit 1; fi
|
||||||
|
if ! python3 -c 'import wheel' 2>/dev/null; then echo 'ERROR: wheel is missing'; exit 1; fi
|
||||||
|
|
||||||
/bin/echo -e "\n### First of all, testing..."
|
/bin/echo -e "\n### First of all, testing..."
|
||||||
make clean
|
make clean
|
||||||
@@ -45,9 +66,9 @@ fi
|
|||||||
/bin/echo -e "\n### Changing version in version.py..."
|
/bin/echo -e "\n### Changing version in version.py..."
|
||||||
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
|
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
|
||||||
|
|
||||||
/bin/echo -e "\n### Committing documentation and youtube_dl/version.py..."
|
/bin/echo -e "\n### Committing documentation, templates and youtube_dl/version.py..."
|
||||||
make README.md CONTRIBUTING.md supportedsites
|
make README.md CONTRIBUTING.md .github/ISSUE_TEMPLATE.md supportedsites
|
||||||
git add README.md CONTRIBUTING.md docs/supportedsites.md youtube_dl/version.py
|
git add README.md CONTRIBUTING.md .github/ISSUE_TEMPLATE.md docs/supportedsites.md youtube_dl/version.py
|
||||||
git commit -m "release $version"
|
git commit -m "release $version"
|
||||||
|
|
||||||
/bin/echo -e "\n### Now tagging, signing and pushing..."
|
/bin/echo -e "\n### Now tagging, signing and pushing..."
|
||||||
@@ -64,7 +85,7 @@ git push origin "$version"
|
|||||||
REV=$(git rev-parse HEAD)
|
REV=$(git rev-parse HEAD)
|
||||||
make youtube-dl youtube-dl.tar.gz
|
make youtube-dl youtube-dl.tar.gz
|
||||||
read -p "VM running? (y/n) " -n 1
|
read -p "VM running? (y/n) " -n 1
|
||||||
wget "http://localhost:8142/build/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe
|
wget "http://$buildserver/build/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe
|
||||||
mkdir -p "build/$version"
|
mkdir -p "build/$version"
|
||||||
mv youtube-dl youtube-dl.exe "build/$version"
|
mv youtube-dl youtube-dl.exe "build/$version"
|
||||||
mv youtube-dl.tar.gz "build/$version/youtube-dl-$version.tar.gz"
|
mv youtube-dl.tar.gz "build/$version/youtube-dl-$version.tar.gz"
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import os
|
|||||||
from os.path import dirname as dirn
|
from os.path import dirname as dirn
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
|
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
|
||||||
import youtube_dl
|
import youtube_dl
|
||||||
|
|
||||||
ZSH_COMPLETION_FILE = "youtube-dl.zsh"
|
ZSH_COMPLETION_FILE = "youtube-dl.zsh"
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
# Supported sites
|
# Supported sites
|
||||||
- **1tv**: Первый канал
|
- **1tv**: Первый канал
|
||||||
- **1up.com**
|
- **1up.com**
|
||||||
|
- **20min**
|
||||||
- **220.ro**
|
- **220.ro**
|
||||||
- **22tracks:genre**
|
- **22tracks:genre**
|
||||||
- **22tracks:track**
|
- **22tracks:track**
|
||||||
- **24video**
|
- **24video**
|
||||||
|
- **3qsdn**: 3Q SDN
|
||||||
- **3sat**
|
- **3sat**
|
||||||
- **4tube**
|
- **4tube**
|
||||||
- **56.com**
|
- **56.com**
|
||||||
@@ -14,123 +16,174 @@
|
|||||||
- **9gag**
|
- **9gag**
|
||||||
- **abc.net.au**
|
- **abc.net.au**
|
||||||
- **Abc7News**
|
- **Abc7News**
|
||||||
|
- **abcnews**
|
||||||
|
- **abcnews:video**
|
||||||
- **AcademicEarth:Course**
|
- **AcademicEarth:Course**
|
||||||
|
- **acast**
|
||||||
|
- **acast:channel**
|
||||||
- **AddAnime**
|
- **AddAnime**
|
||||||
- **AdobeTV**
|
- **AdobeTV**
|
||||||
|
- **AdobeTVChannel**
|
||||||
|
- **AdobeTVShow**
|
||||||
- **AdobeTVVideo**
|
- **AdobeTVVideo**
|
||||||
- **AdultSwim**
|
- **AdultSwim**
|
||||||
- **Aftenposten**
|
- **aenetworks**: A+E Networks: A&E, Lifetime, History.com, FYI Network
|
||||||
- **Aftonbladet**
|
- **Aftonbladet**
|
||||||
- **AirMozilla**
|
- **AirMozilla**
|
||||||
- **AlJazeera**
|
- **AlJazeera**
|
||||||
- **Allocine**
|
- **Allocine**
|
||||||
- **AlphaPorno**
|
- **AlphaPorno**
|
||||||
|
- **AnimeOnDemand**
|
||||||
- **anitube.se**
|
- **anitube.se**
|
||||||
- **AnySex**
|
- **AnySex**
|
||||||
- **Aparat**
|
- **Aparat**
|
||||||
- **AppleDaily**
|
- **AppleConnect**
|
||||||
- **AppleTrailers**
|
- **AppleDaily**: 臺灣蘋果日報
|
||||||
|
- **appletrailers**
|
||||||
|
- **appletrailers:section**
|
||||||
- **archive.org**: archive.org videos
|
- **archive.org**: archive.org videos
|
||||||
- **ARD**
|
- **ARD**
|
||||||
- **ARD:mediathek**
|
- **ARD:mediathek**
|
||||||
|
- **ARD:mediathek**: Saarländischer Rundfunk
|
||||||
- **arte.tv**
|
- **arte.tv**
|
||||||
- **arte.tv:+7**
|
- **arte.tv:+7**
|
||||||
|
- **arte.tv:cinema**
|
||||||
- **arte.tv:concert**
|
- **arte.tv:concert**
|
||||||
- **arte.tv:creative**
|
- **arte.tv:creative**
|
||||||
- **arte.tv:ddc**
|
- **arte.tv:ddc**
|
||||||
- **arte.tv:embed**
|
- **arte.tv:embed**
|
||||||
- **arte.tv:future**
|
- **arte.tv:future**
|
||||||
|
- **arte.tv:info**
|
||||||
|
- **arte.tv:magazine**
|
||||||
- **AtresPlayer**
|
- **AtresPlayer**
|
||||||
- **ATTTechChannel**
|
- **ATTTechChannel**
|
||||||
|
- **AudiMedia**
|
||||||
|
- **AudioBoom**
|
||||||
- **audiomack**
|
- **audiomack**
|
||||||
- **audiomack:album**
|
- **audiomack:album**
|
||||||
|
- **auroravid**: AuroraVid
|
||||||
- **Azubu**
|
- **Azubu**
|
||||||
- **BaiduVideo**
|
- **AzubuLive**
|
||||||
|
- **BaiduVideo**: 百度视频
|
||||||
- **bambuser**
|
- **bambuser**
|
||||||
- **bambuser:channel**
|
- **bambuser:channel**
|
||||||
- **Bandcamp**
|
- **Bandcamp**
|
||||||
- **Bandcamp:album**
|
- **Bandcamp:album**
|
||||||
|
- **bbc**: BBC
|
||||||
- **bbc.co.uk**: BBC iPlayer
|
- **bbc.co.uk**: BBC iPlayer
|
||||||
|
- **bbc.co.uk:article**: BBC articles
|
||||||
- **BeatportPro**
|
- **BeatportPro**
|
||||||
- **Beeg**
|
- **Beeg**
|
||||||
- **BehindKink**
|
- **BehindKink**
|
||||||
- **Bet**
|
- **Bet**
|
||||||
|
- **Bigflix**
|
||||||
- **Bild**: Bild.de
|
- **Bild**: Bild.de
|
||||||
- **BiliBili**
|
- **BiliBili**
|
||||||
|
- **BioBioChileTV**
|
||||||
|
- **BIQLE**
|
||||||
|
- **BleacherReport**
|
||||||
|
- **BleacherReportCMS**
|
||||||
- **blinkx**
|
- **blinkx**
|
||||||
- **blip.tv:user**
|
|
||||||
- **BlipTV**
|
|
||||||
- **Bloomberg**
|
- **Bloomberg**
|
||||||
|
- **BokeCC**
|
||||||
- **Bpb**: Bundeszentrale für politische Bildung
|
- **Bpb**: Bundeszentrale für politische Bildung
|
||||||
- **BR**: Bayerischer Rundfunk Mediathek
|
- **BR**: Bayerischer Rundfunk Mediathek
|
||||||
|
- **BravoTV**
|
||||||
- **Break**
|
- **Break**
|
||||||
- **Brightcove**
|
- **brightcove:legacy**
|
||||||
|
- **brightcove:new**
|
||||||
- **bt:article**: Bergens Tidende Articles
|
- **bt:article**: Bergens Tidende Articles
|
||||||
- **bt:vestlendingen**: Bergens Tidende - Vestlendingen
|
- **bt:vestlendingen**: Bergens Tidende - Vestlendingen
|
||||||
- **BuzzFeed**
|
- **BuzzFeed**
|
||||||
- **BYUtv**
|
- **BYUtv**
|
||||||
- **Camdemy**
|
- **Camdemy**
|
||||||
- **CamdemyFolder**
|
- **CamdemyFolder**
|
||||||
- **Canal13cl**
|
- **CamWithHer**
|
||||||
- **canalc2.tv**
|
- **canalc2.tv**
|
||||||
- **Canalplus**: canalplus.fr, piwiplus.fr and d8.tv
|
- **Canalplus**: canalplus.fr, piwiplus.fr and d8.tv
|
||||||
|
- **Canvas**
|
||||||
|
- **CBC**
|
||||||
|
- **CBCPlayer**
|
||||||
- **CBS**
|
- **CBS**
|
||||||
|
- **CBSInteractive**
|
||||||
|
- **CBSLocal**
|
||||||
- **CBSNews**: CBS News
|
- **CBSNews**: CBS News
|
||||||
|
- **CBSNewsLiveVideo**: CBS News Live Videos
|
||||||
- **CBSSports**
|
- **CBSSports**
|
||||||
|
- **CDA**
|
||||||
- **CeskaTelevize**
|
- **CeskaTelevize**
|
||||||
- **channel9**: Channel 9
|
- **channel9**: Channel 9
|
||||||
|
- **Chaturbate**
|
||||||
- **Chilloutzone**
|
- **Chilloutzone**
|
||||||
- **chirbit**
|
- **chirbit**
|
||||||
- **chirbit:profile**
|
- **chirbit:profile**
|
||||||
- **Cinchcast**
|
- **Cinchcast**
|
||||||
- **Cinemassacre**
|
- **Clipfish**
|
||||||
- **clipfish**
|
|
||||||
- **cliphunter**
|
- **cliphunter**
|
||||||
|
- **ClipRs**
|
||||||
- **Clipsyndicate**
|
- **Clipsyndicate**
|
||||||
|
- **cloudtime**: CloudTime
|
||||||
- **Cloudy**
|
- **Cloudy**
|
||||||
- **Clubic**
|
- **Clubic**
|
||||||
|
- **Clyp**
|
||||||
- **cmt.com**
|
- **cmt.com**
|
||||||
- **CNET**
|
- **CNBC**
|
||||||
- **CNN**
|
- **CNN**
|
||||||
- **CNNArticle**
|
- **CNNArticle**
|
||||||
- **CNNBlogs**
|
- **CNNBlogs**
|
||||||
- **CollegeHumor**
|
|
||||||
- **CollegeRama**
|
- **CollegeRama**
|
||||||
- **ComCarCoff**
|
- **ComCarCoff**
|
||||||
- **ComedyCentral**
|
- **ComedyCentral**
|
||||||
- **ComedyCentralShows**: The Daily Show / The Colbert Report
|
- **ComedyCentralShows**: The Daily Show / The Colbert Report
|
||||||
- **CondeNast**: Condé Nast media group: Condé Nast, GQ, Glamour, Vanity Fair, Vogue, W Magazine, WIRED
|
- **CondeNast**: Condé Nast media group: Allure, Architectural Digest, Ars Technica, Bon Appétit, Brides, Condé Nast, Condé Nast Traveler, Details, Epicurious, GQ, Glamour, Golf Digest, SELF, Teen Vogue, The New Yorker, Vanity Fair, Vogue, W Magazine, WIRED
|
||||||
|
- **Coub**
|
||||||
- **Cracked**
|
- **Cracked**
|
||||||
|
- **Crackle**
|
||||||
- **Criterion**
|
- **Criterion**
|
||||||
- **CrooksAndLiars**
|
- **CrooksAndLiars**
|
||||||
- **Crunchyroll**
|
- **Crunchyroll**
|
||||||
- **crunchyroll:playlist**
|
- **crunchyroll:playlist**
|
||||||
|
- **CSNNE**
|
||||||
- **CSpan**: C-SPAN
|
- **CSpan**: C-SPAN
|
||||||
- **CtsNews**
|
- **CtsNews**: 華視新聞
|
||||||
- **culturebox.francetvinfo.fr**
|
- **culturebox.francetvinfo.fr**
|
||||||
|
- **CultureUnplugged**
|
||||||
|
- **CWTV**
|
||||||
|
- **DailyMail**
|
||||||
- **dailymotion**
|
- **dailymotion**
|
||||||
- **dailymotion:playlist**
|
- **dailymotion:playlist**
|
||||||
- **dailymotion:user**
|
- **dailymotion:user**
|
||||||
- **DailymotionCloud**
|
- **DailymotionCloud**
|
||||||
- **daum.net**
|
- **daum.net**
|
||||||
|
- **daum.net:clip**
|
||||||
|
- **daum.net:playlist**
|
||||||
|
- **daum.net:user**
|
||||||
- **DBTV**
|
- **DBTV**
|
||||||
|
- **DCN**
|
||||||
|
- **dcn:live**
|
||||||
|
- **dcn:season**
|
||||||
|
- **dcn:video**
|
||||||
- **DctpTv**
|
- **DctpTv**
|
||||||
- **DeezerPlaylist**
|
- **DeezerPlaylist**
|
||||||
- **defense.gouv.fr**
|
- **defense.gouv.fr**
|
||||||
|
- **democracynow**
|
||||||
- **DHM**: Filmarchiv - Deutsches Historisches Museum
|
- **DHM**: Filmarchiv - Deutsches Historisches Museum
|
||||||
|
- **DigitallySpeaking**
|
||||||
|
- **Digiteka**
|
||||||
- **Discovery**
|
- **Discovery**
|
||||||
- **divxstage**: DivxStage
|
|
||||||
- **Dotsub**
|
- **Dotsub**
|
||||||
- **DouyuTV**
|
- **DouyuTV**: 斗鱼
|
||||||
|
- **DPlay**
|
||||||
- **dramafever**
|
- **dramafever**
|
||||||
- **dramafever:series**
|
- **dramafever:series**
|
||||||
- **DRBonanza**
|
- **DRBonanza**
|
||||||
- **Dropbox**
|
- **Dropbox**
|
||||||
- **DrTuber**
|
- **DrTuber**
|
||||||
- **DRTV**
|
- **DRTV**
|
||||||
- **Dump**
|
|
||||||
- **Dumpert**
|
- **Dumpert**
|
||||||
- **dvtv**: http://video.aktualne.cz/
|
- **dvtv**: http://video.aktualne.cz/
|
||||||
|
- **dw**
|
||||||
|
- **dw:article**
|
||||||
- **EaglePlatform**
|
- **EaglePlatform**
|
||||||
- **EbaumsWorld**
|
- **EbaumsWorld**
|
||||||
- **EchoMsk**
|
- **EchoMsk**
|
||||||
@@ -146,33 +199,42 @@
|
|||||||
- **Eporner**
|
- **Eporner**
|
||||||
- **EroProfile**
|
- **EroProfile**
|
||||||
- **Escapist**
|
- **Escapist**
|
||||||
- **ESPN** (Currently broken)
|
- **ESPN**
|
||||||
|
- **EsriVideo**
|
||||||
|
- **Europa**
|
||||||
- **EveryonesMixtape**
|
- **EveryonesMixtape**
|
||||||
- **exfm**: ex.fm
|
- **exfm**: ex.fm
|
||||||
- **ExpoTV**
|
- **ExpoTV**
|
||||||
- **ExtremeTube**
|
- **ExtremeTube**
|
||||||
|
- **EyedoTV**
|
||||||
- **facebook**
|
- **facebook**
|
||||||
- **faz.net**
|
- **faz.net**
|
||||||
- **fc2**
|
- **fc2**
|
||||||
|
- **Fczenit**
|
||||||
|
- **features.aol.com**
|
||||||
- **fernsehkritik.tv**
|
- **fernsehkritik.tv**
|
||||||
- **fernsehkritik.tv:postecke**
|
|
||||||
- **Firstpost**
|
- **Firstpost**
|
||||||
- **FiveTV**
|
- **FiveTV**
|
||||||
- **Flickr**
|
- **Flickr**
|
||||||
- **Folketinget**: Folketinget (ft.dk; Danish parliament)
|
- **Folketinget**: Folketinget (ft.dk; Danish parliament)
|
||||||
- **FootyRoom**
|
- **FootyRoom**
|
||||||
|
- **Formula1**
|
||||||
|
- **FOX**
|
||||||
- **Foxgay**
|
- **Foxgay**
|
||||||
- **FoxNews**
|
- **FoxNews**: Fox News and Fox Business Video
|
||||||
- **FoxSports**
|
- **FoxSports**
|
||||||
- **france2.fr:generation-quoi**
|
- **france2.fr:generation-quoi**
|
||||||
- **FranceCulture**
|
- **FranceCulture**
|
||||||
|
- **FranceCultureEmission**
|
||||||
- **FranceInter**
|
- **FranceInter**
|
||||||
- **francetv**: France 2, 3, 4, 5 and Ô
|
- **francetv**: France 2, 3, 4, 5 and Ô
|
||||||
- **francetvinfo.fr**
|
- **francetvinfo.fr**
|
||||||
- **Freesound**
|
- **Freesound**
|
||||||
- **freespeech.org**
|
- **freespeech.org**
|
||||||
- **FreeVideo**
|
- **FreeVideo**
|
||||||
|
- **Funimation**
|
||||||
- **FunnyOrDie**
|
- **FunnyOrDie**
|
||||||
|
- **GameInformer**
|
||||||
- **Gamekings**
|
- **Gamekings**
|
||||||
- **GameOne**
|
- **GameOne**
|
||||||
- **gameone:playlist**
|
- **gameone:playlist**
|
||||||
@@ -188,25 +250,27 @@
|
|||||||
- **Giga**
|
- **Giga**
|
||||||
- **Glide**: Glide mobile video messages (glide.me)
|
- **Glide**: Glide mobile video messages (glide.me)
|
||||||
- **Globo**
|
- **Globo**
|
||||||
|
- **GloboArticle**
|
||||||
- **GodTube**
|
- **GodTube**
|
||||||
- **GoldenMoustache**
|
- **GoldenMoustache**
|
||||||
- **Golem**
|
- **Golem**
|
||||||
- **GorillaVid**: GorillaVid.in, daclips.in, movpod.in, fastvideo.in and realvid.net
|
- **GoogleDrive**
|
||||||
- **Goshgay**
|
- **Goshgay**
|
||||||
|
- **GPUTechConf**
|
||||||
- **Groupon**
|
- **Groupon**
|
||||||
- **Hark**
|
- **Hark**
|
||||||
|
- **HBO**
|
||||||
- **HearThisAt**
|
- **HearThisAt**
|
||||||
- **Heise**
|
- **Heise**
|
||||||
- **HellPorno**
|
- **HellPorno**
|
||||||
- **Helsinki**: helsinki.fi
|
- **Helsinki**: helsinki.fi
|
||||||
- **HentaiStigma**
|
- **HentaiStigma**
|
||||||
- **HistoricFilms**
|
- **HistoricFilms**
|
||||||
- **History**
|
|
||||||
- **hitbox**
|
- **hitbox**
|
||||||
- **hitbox:live**
|
- **hitbox:live**
|
||||||
- **HornBunny**
|
- **HornBunny**
|
||||||
- **HostingBulk**
|
|
||||||
- **HotNewHipHop**
|
- **HotNewHipHop**
|
||||||
|
- **HotStar**
|
||||||
- **Howcast**
|
- **Howcast**
|
||||||
- **HowStuffWorks**
|
- **HowStuffWorks**
|
||||||
- **HuffPost**: Huffington Post
|
- **HuffPost**: Huffington Post
|
||||||
@@ -216,21 +280,25 @@
|
|||||||
- **imdb**: Internet Movie Database trailers
|
- **imdb**: Internet Movie Database trailers
|
||||||
- **imdb:list**: Internet Movie Database lists
|
- **imdb:list**: Internet Movie Database lists
|
||||||
- **Imgur**
|
- **Imgur**
|
||||||
|
- **ImgurAlbum**
|
||||||
- **Ina**
|
- **Ina**
|
||||||
|
- **Indavideo**
|
||||||
|
- **IndavideoEmbed**
|
||||||
- **InfoQ**
|
- **InfoQ**
|
||||||
- **Instagram**
|
- **Instagram**
|
||||||
- **instagram:user**: Instagram user profile
|
- **instagram:user**: Instagram user profile
|
||||||
- **InternetVideoArchive**
|
- **InternetVideoArchive**
|
||||||
- **IPrima**
|
- **IPrima**
|
||||||
- **iqiyi**
|
- **iqiyi**: 爱奇艺
|
||||||
|
- **Ir90Tv**
|
||||||
- **ivi**: ivi.ru
|
- **ivi**: ivi.ru
|
||||||
- **ivi:compilation**: ivi.ru compilations
|
- **ivi:compilation**: ivi.ru compilations
|
||||||
|
- **ivideon**: Ivideon TV
|
||||||
- **Izlesene**
|
- **Izlesene**
|
||||||
- **JadoreCettePub**
|
|
||||||
- **JeuxVideo**
|
- **JeuxVideo**
|
||||||
- **Jove**
|
- **Jove**
|
||||||
- **jpopsuki.tv**
|
- **jpopsuki.tv**
|
||||||
- **Jukebox**
|
- **JWPlatform**
|
||||||
- **Kaltura**
|
- **Kaltura**
|
||||||
- **KanalPlay**: Kanal 5/9/11 Play
|
- **KanalPlay**: Kanal 5/9/11 Play
|
||||||
- **Kankan**
|
- **Kankan**
|
||||||
@@ -240,104 +308,150 @@
|
|||||||
- **KeezMovies**
|
- **KeezMovies**
|
||||||
- **KhanAcademy**
|
- **KhanAcademy**
|
||||||
- **KickStarter**
|
- **KickStarter**
|
||||||
|
- **KonserthusetPlay**
|
||||||
- **kontrtube**: KontrTube.ru - Труба зовёт
|
- **kontrtube**: KontrTube.ru - Труба зовёт
|
||||||
- **KrasView**: Красвью
|
- **KrasView**: Красвью
|
||||||
- **Ku6**
|
- **Ku6**
|
||||||
|
- **KUSI**
|
||||||
|
- **kuwo:album**: 酷我音乐 - 专辑
|
||||||
|
- **kuwo:category**: 酷我音乐 - 分类
|
||||||
|
- **kuwo:chart**: 酷我音乐 - 排行榜
|
||||||
|
- **kuwo:mv**: 酷我音乐 - MV
|
||||||
|
- **kuwo:singer**: 酷我音乐 - 歌手
|
||||||
|
- **kuwo:song**: 酷我音乐
|
||||||
- **la7.tv**
|
- **la7.tv**
|
||||||
- **Laola1Tv**
|
- **Laola1Tv**
|
||||||
- **Letv**
|
- **Le**: 乐视网
|
||||||
- **LetvPlaylist**
|
- **Learnr**
|
||||||
- **LetvTv**
|
- **Lecture2Go**
|
||||||
|
- **Lemonde**
|
||||||
|
- **LePlaylist**
|
||||||
|
- **LetvCloud**: 乐视云
|
||||||
- **Libsyn**
|
- **Libsyn**
|
||||||
|
- **life**: Life.ru
|
||||||
- **life:embed**
|
- **life:embed**
|
||||||
- **lifenews**: LIFE | NEWS
|
- **limelight**
|
||||||
|
- **limelight:channel**
|
||||||
|
- **limelight:channel_list**
|
||||||
|
- **LiTV**
|
||||||
- **LiveLeak**
|
- **LiveLeak**
|
||||||
- **livestream**
|
- **livestream**
|
||||||
- **livestream:original**
|
- **livestream:original**
|
||||||
- **LnkGo**
|
- **LnkGo**
|
||||||
|
- **LocalNews8**
|
||||||
|
- **LoveHomePorn**
|
||||||
- **lrt.lt**
|
- **lrt.lt**
|
||||||
- **lynda**: lynda.com videos
|
- **lynda**: lynda.com videos
|
||||||
- **lynda:course**: lynda.com online courses
|
- **lynda:course**: lynda.com online courses
|
||||||
- **m6**
|
- **m6**
|
||||||
- **macgamestore**: MacGameStore trailers
|
- **macgamestore**: MacGameStore trailers
|
||||||
- **mailru**: Видео@Mail.Ru
|
- **mailru**: Видео@Mail.Ru
|
||||||
- **Malemotion**
|
- **MakersChannel**
|
||||||
- **MDR**
|
- **MakerTV**
|
||||||
|
- **MatchTV**
|
||||||
|
- **MDR**: MDR.DE and KiKA
|
||||||
- **media.ccc.de**
|
- **media.ccc.de**
|
||||||
- **MegaVideoz**
|
|
||||||
- **metacafe**
|
- **metacafe**
|
||||||
- **Metacritic**
|
- **Metacritic**
|
||||||
- **Mgoon**
|
- **Mgoon**
|
||||||
|
- **MGTV**: 芒果TV
|
||||||
- **Minhateca**
|
- **Minhateca**
|
||||||
- **MinistryGrid**
|
- **MinistryGrid**
|
||||||
|
- **Minoto**
|
||||||
- **miomio.tv**
|
- **miomio.tv**
|
||||||
- **mitele.es**
|
- **MiTele**: mitele.es
|
||||||
- **mixcloud**
|
- **mixcloud**
|
||||||
|
- **mixcloud:playlist**
|
||||||
|
- **mixcloud:stream**
|
||||||
|
- **mixcloud:user**
|
||||||
- **MLB**
|
- **MLB**
|
||||||
|
- **Mnet**
|
||||||
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
|
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
|
||||||
- **Mofosex**
|
- **Mofosex**
|
||||||
- **Mojvideo**
|
- **Mojvideo**
|
||||||
- **Moniker**: allmyvideos.net and vidspot.net
|
- **Moniker**: allmyvideos.net and vidspot.net
|
||||||
- **mooshare**: Mooshare.biz
|
|
||||||
- **Morningstar**: morningstar.com
|
- **Morningstar**: morningstar.com
|
||||||
- **Motherless**
|
- **Motherless**
|
||||||
- **Motorsport**: motorsport.com
|
- **Motorsport**: motorsport.com
|
||||||
- **MovieClips**
|
- **MovieClips**
|
||||||
|
- **MovieFap**
|
||||||
- **Moviezine**
|
- **Moviezine**
|
||||||
- **movshare**: MovShare
|
|
||||||
- **MPORA**
|
- **MPORA**
|
||||||
|
- **MSNBC**
|
||||||
- **MTV**
|
- **MTV**
|
||||||
|
- **mtv.de**
|
||||||
- **mtviggy.com**
|
- **mtviggy.com**
|
||||||
- **mtvservices:embedded**
|
- **mtvservices:embedded**
|
||||||
- **MuenchenTV**: münchen.tv
|
- **MuenchenTV**: münchen.tv
|
||||||
- **MusicPlayOn**
|
- **MusicPlayOn**
|
||||||
- **MusicVault**
|
- **mva**: Microsoft Virtual Academy videos
|
||||||
- **muzu.tv**
|
- **mva:course**: Microsoft Virtual Academy courses
|
||||||
|
- **Mwave**
|
||||||
|
- **MwaveMeetGreet**
|
||||||
- **MySpace**
|
- **MySpace**
|
||||||
- **MySpace:album**
|
- **MySpace:album**
|
||||||
- **MySpass**
|
- **MySpass**
|
||||||
- **myvideo**
|
- **Myvi**
|
||||||
|
- **myvideo** (Currently broken)
|
||||||
- **MyVidster**
|
- **MyVidster**
|
||||||
- **N-JOY**
|
|
||||||
- **n-tv.de**
|
- **n-tv.de**
|
||||||
- **NationalGeographic**
|
- **natgeo**
|
||||||
|
- **natgeo:channel**
|
||||||
- **Naver**
|
- **Naver**
|
||||||
- **NBA**
|
- **NBA**
|
||||||
- **NBC**
|
- **NBC**
|
||||||
- **NBCNews**
|
- **NBCNews**
|
||||||
- **NBCSports**
|
- **NBCSports**
|
||||||
- **NBCSportsVPlayer**
|
- **NBCSportsVPlayer**
|
||||||
- **ndr**: NDR.de - Mediathek
|
- **ndr**: NDR.de - Norddeutscher Rundfunk
|
||||||
|
- **ndr:embed**
|
||||||
|
- **ndr:embed:base**
|
||||||
- **NDTV**
|
- **NDTV**
|
||||||
- **NerdCubedFeed**
|
- **NerdCubedFeed**
|
||||||
- **Nerdist**
|
- **netease:album**: 网易云音乐 - 专辑
|
||||||
|
- **netease:djradio**: 网易云音乐 - 电台
|
||||||
|
- **netease:mv**: 网易云音乐 - MV
|
||||||
|
- **netease:playlist**: 网易云音乐 - 歌单
|
||||||
|
- **netease:program**: 网易云音乐 - 电台节目
|
||||||
|
- **netease:singer**: 网易云音乐 - 歌手
|
||||||
|
- **netease:song**: 网易云音乐
|
||||||
- **Netzkino**
|
- **Netzkino**
|
||||||
- **Newgrounds**
|
- **Newgrounds**
|
||||||
- **Newstube**
|
- **Newstube**
|
||||||
- **NextMedia**
|
- **NextMedia**: 蘋果日報
|
||||||
- **NextMediaActionNews**
|
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
||||||
|
- **nextmovie.com**
|
||||||
- **nfb**: National Film Board of Canada
|
- **nfb**: National Film Board of Canada
|
||||||
- **nfl.com**
|
- **nfl.com**
|
||||||
- **nhl.com**
|
- **nhl.com**
|
||||||
- **nhl.com:news**: NHL news
|
- **nhl.com:news**: NHL news
|
||||||
- **nhl.com:videocenter**: NHL videocenter category
|
- **nhl.com:videocenter**
|
||||||
|
- **nhl.com:videocenter:category**: NHL videocenter category
|
||||||
|
- **nick.com**
|
||||||
- **niconico**: ニコニコ動画
|
- **niconico**: ニコニコ動画
|
||||||
- **NiconicoPlaylist**
|
- **NiconicoPlaylist**
|
||||||
|
- **njoy**: N-JOY
|
||||||
|
- **njoy:embed**
|
||||||
- **Noco**
|
- **Noco**
|
||||||
- **Normalboots**
|
- **Normalboots**
|
||||||
- **NosVideo**
|
- **NosVideo**
|
||||||
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
|
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
|
||||||
- **novamov**: NovaMov
|
- **nowness**
|
||||||
- **Nowness**
|
- **nowness:playlist**
|
||||||
- **NowTV**
|
- **nowness:series**
|
||||||
|
- **NowTV** (Currently broken)
|
||||||
|
- **NowTVList**
|
||||||
- **nowvideo**: NowVideo
|
- **nowvideo**: NowVideo
|
||||||
- **npo.nl**
|
- **Noz**
|
||||||
|
- **npo**: npo.nl and ntr.nl
|
||||||
- **npo.nl:live**
|
- **npo.nl:live**
|
||||||
- **npo.nl:radio**
|
- **npo.nl:radio**
|
||||||
- **npo.nl:radio:fragment**
|
- **npo.nl:radio:fragment**
|
||||||
|
- **Npr**
|
||||||
- **NRK**
|
- **NRK**
|
||||||
- **NRKPlaylist**
|
- **NRKPlaylist**
|
||||||
- **NRKTV**
|
- **NRKSkole**: NRK Skole
|
||||||
|
- **NRKTV**: NRK TV and NRK Radio
|
||||||
- **ntv.ru**
|
- **ntv.ru**
|
||||||
- **Nuvid**
|
- **Nuvid**
|
||||||
- **NYTimes**
|
- **NYTimes**
|
||||||
@@ -349,58 +463,78 @@
|
|||||||
- **OnionStudios**
|
- **OnionStudios**
|
||||||
- **Ooyala**
|
- **Ooyala**
|
||||||
- **OoyalaExternal**
|
- **OoyalaExternal**
|
||||||
- **OpenFilm**
|
- **Openload**
|
||||||
|
- **OraTV**
|
||||||
- **orf:fm4**: radio FM4
|
- **orf:fm4**: radio FM4
|
||||||
- **orf:iptv**: iptv.ORF.at
|
- **orf:iptv**: iptv.ORF.at
|
||||||
- **orf:oe1**: Radio Österreich 1
|
- **orf:oe1**: Radio Österreich 1
|
||||||
- **orf:tvthek**: ORF TVthek
|
- **orf:tvthek**: ORF TVthek
|
||||||
|
- **pandora.tv**: 판도라TV
|
||||||
- **parliamentlive.tv**: UK parliament videos
|
- **parliamentlive.tv**: UK parliament videos
|
||||||
- **Patreon**
|
- **Patreon**
|
||||||
- **PBS**
|
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
||||||
|
- **pcmag**
|
||||||
|
- **People**
|
||||||
|
- **periscope**: Periscope
|
||||||
|
- **periscope:user**: Periscope user videos
|
||||||
- **PhilharmonieDeParis**: Philharmonie de Paris
|
- **PhilharmonieDeParis**: Philharmonie de Paris
|
||||||
- **Phoenix**
|
- **phoenix.de**
|
||||||
- **Photobucket**
|
- **Photobucket**
|
||||||
- **Pinkbike**
|
- **Pinkbike**
|
||||||
- **Pladform**
|
- **Pladform**
|
||||||
- **PlanetaPlay**
|
|
||||||
- **play.fm**
|
- **play.fm**
|
||||||
- **played.to**
|
- **played.to**
|
||||||
|
- **PlaysTV**
|
||||||
|
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
||||||
- **Playvid**
|
- **Playvid**
|
||||||
- **Playwire**
|
- **Playwire**
|
||||||
|
- **pluralsight**
|
||||||
|
- **pluralsight:course**
|
||||||
- **plus.google**: Google Plus
|
- **plus.google**: Google Plus
|
||||||
- **pluzz.francetv.fr**
|
- **pluzz.francetv.fr**
|
||||||
- **podomatic**
|
- **podomatic**
|
||||||
- **PornHd**
|
- **PornHd**
|
||||||
- **PornHub**
|
- **PornHub**
|
||||||
- **PornHubPlaylist**
|
- **PornHubPlaylist**
|
||||||
|
- **PornHubUserVideos**
|
||||||
- **Pornotube**
|
- **Pornotube**
|
||||||
- **PornoVoisines**
|
- **PornoVoisines**
|
||||||
- **PornoXO**
|
- **PornoXO**
|
||||||
|
- **PressTV**
|
||||||
- **PrimeShareTV**
|
- **PrimeShareTV**
|
||||||
- **PromptFile**
|
- **PromptFile**
|
||||||
- **prosiebensat1**: ProSiebenSat.1 Digital
|
- **prosiebensat1**: ProSiebenSat.1 Digital
|
||||||
- **Puls4**
|
- **Puls4**
|
||||||
- **Pyvideo**
|
- **Pyvideo**
|
||||||
- **qqmusic**
|
- **qqmusic**: QQ音乐
|
||||||
- **qqmusic:album**
|
- **qqmusic:album**: QQ音乐 - 专辑
|
||||||
- **qqmusic:singer**
|
- **qqmusic:playlist**: QQ音乐 - 歌单
|
||||||
- **qqmusic:toplist**
|
- **qqmusic:singer**: QQ音乐 - 歌手
|
||||||
- **QuickVid**
|
- **qqmusic:toplist**: QQ音乐 - 排行榜
|
||||||
- **R7**
|
- **R7**
|
||||||
- **radio.de**
|
- **radio.de**
|
||||||
- **radiobremen**
|
- **radiobremen**
|
||||||
|
- **radiocanada**
|
||||||
|
- **RadioCanadaAudioVideo**
|
||||||
- **radiofrance**
|
- **radiofrance**
|
||||||
- **RadioJavan**
|
- **RadioJavan**
|
||||||
- **Rai**
|
- **Rai**
|
||||||
|
- **RaiTV**
|
||||||
- **RBMARadio**
|
- **RBMARadio**
|
||||||
|
- **RDS**: RDS.ca
|
||||||
- **RedTube**
|
- **RedTube**
|
||||||
|
- **RegioTV**
|
||||||
- **Restudy**
|
- **Restudy**
|
||||||
|
- **Reuters**
|
||||||
- **ReverbNation**
|
- **ReverbNation**
|
||||||
|
- **Revision3**
|
||||||
|
- **RICE**
|
||||||
- **RingTV**
|
- **RingTV**
|
||||||
- **RottenTomatoes**
|
- **RottenTomatoes**
|
||||||
- **Roxwel**
|
- **Roxwel**
|
||||||
- **RTBF**
|
- **RTBF**
|
||||||
- **Rte**
|
- **rte**: Raidió Teilifís Éireann TV
|
||||||
|
- **rte:radio**: Raidió Teilifís Éireann radio
|
||||||
- **rtl.nl**: rtl.nl and rtlxl.nl
|
- **rtl.nl**: rtl.nl and rtlxl.nl
|
||||||
- **RTL2**
|
- **RTL2**
|
||||||
- **RTP**
|
- **RTP**
|
||||||
@@ -408,7 +542,9 @@
|
|||||||
- **rtve.es:alacarta**: RTVE a la carta
|
- **rtve.es:alacarta**: RTVE a la carta
|
||||||
- **rtve.es:infantil**: RTVE infantil
|
- **rtve.es:infantil**: RTVE infantil
|
||||||
- **rtve.es:live**: RTVE.es live streams
|
- **rtve.es:live**: RTVE.es live streams
|
||||||
|
- **RTVNH**
|
||||||
- **RUHD**
|
- **RUHD**
|
||||||
|
- **RulePorn**
|
||||||
- **rutube**: Rutube videos
|
- **rutube**: Rutube videos
|
||||||
- **rutube:channel**: Rutube channels
|
- **rutube:channel**: Rutube channels
|
||||||
- **rutube:embed**: Rutube embedded videos
|
- **rutube:embed**: Rutube embedded videos
|
||||||
@@ -417,23 +553,29 @@
|
|||||||
- **RUTV**: RUTV.RU
|
- **RUTV**: RUTV.RU
|
||||||
- **Ruutu**
|
- **Ruutu**
|
||||||
- **safari**: safaribooksonline.com online video
|
- **safari**: safaribooksonline.com online video
|
||||||
|
- **safari:api**
|
||||||
- **safari:course**: safaribooksonline.com online courses
|
- **safari:course**: safaribooksonline.com online courses
|
||||||
- **Sandia**: Sandia National Laboratories
|
- **Sandia**: Sandia National Laboratories
|
||||||
- **Sapo**: SAPO Vídeos
|
- **Sapo**: SAPO Vídeos
|
||||||
- **savefrom.net**
|
- **savefrom.net**
|
||||||
- **SBS**: sbs.com.au
|
- **SBS**: sbs.com.au
|
||||||
|
- **schooltv**
|
||||||
- **SciVee**
|
- **SciVee**
|
||||||
- **screen.yahoo:search**: Yahoo screen search
|
- **screen.yahoo:search**: Yahoo screen search
|
||||||
- **Screencast**
|
- **Screencast**
|
||||||
- **ScreencastOMatic**
|
- **ScreencastOMatic**
|
||||||
|
- **ScreenJunkies**
|
||||||
- **ScreenwaveMedia**
|
- **ScreenwaveMedia**
|
||||||
- **SenateISVP**
|
- **SenateISVP**
|
||||||
|
- **SendtoNews**
|
||||||
- **ServingSys**
|
- **ServingSys**
|
||||||
- **Sexu**
|
- **Sexu**
|
||||||
- **SexyKarma**: Sexy Karma and Watch Indian Porn
|
- **Shahid**
|
||||||
- **Shared**
|
- **Shared**: shared.sx and vivo.sx
|
||||||
- **ShareSix**
|
- **ShareSix**
|
||||||
- **Sina**
|
- **Sina**
|
||||||
|
- **skynewsarabia:video**
|
||||||
|
- **skynewsarabia:video**
|
||||||
- **Slideshare**
|
- **Slideshare**
|
||||||
- **Slutload**
|
- **Slutload**
|
||||||
- **smotri**: Smotri.com
|
- **smotri**: Smotri.com
|
||||||
@@ -442,10 +584,9 @@
|
|||||||
- **smotri:user**: Smotri.com user videos
|
- **smotri:user**: Smotri.com user videos
|
||||||
- **Snotr**
|
- **Snotr**
|
||||||
- **Sohu**
|
- **Sohu**
|
||||||
- **soompi**
|
|
||||||
- **soompi:show**
|
|
||||||
- **soundcloud**
|
- **soundcloud**
|
||||||
- **soundcloud:playlist**
|
- **soundcloud:playlist**
|
||||||
|
- **soundcloud:search**: Soundcloud search
|
||||||
- **soundcloud:set**
|
- **soundcloud:set**
|
||||||
- **soundcloud:user**
|
- **soundcloud:user**
|
||||||
- **soundgasm**
|
- **soundgasm**
|
||||||
@@ -455,7 +596,6 @@
|
|||||||
- **southpark.de**
|
- **southpark.de**
|
||||||
- **southpark.nl**
|
- **southpark.nl**
|
||||||
- **southparkstudios.dk**
|
- **southparkstudios.dk**
|
||||||
- **Space**
|
|
||||||
- **SpankBang**
|
- **SpankBang**
|
||||||
- **Spankwire**
|
- **Spankwire**
|
||||||
- **Spiegel**
|
- **Spiegel**
|
||||||
@@ -466,11 +606,13 @@
|
|||||||
- **SportBox**
|
- **SportBox**
|
||||||
- **SportBoxEmbed**
|
- **SportBoxEmbed**
|
||||||
- **SportDeutschland**
|
- **SportDeutschland**
|
||||||
- **Srf**
|
- **Sportschau**
|
||||||
- **SRMediathek**: Saarländischer Rundfunk
|
- **SRGSSR**
|
||||||
|
- **SRGSSRPlay**: srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites
|
||||||
- **SSA**
|
- **SSA**
|
||||||
- **stanfordoc**: Stanford Open ClassRoom
|
- **stanfordoc**: Stanford Open ClassRoom
|
||||||
- **Steam**
|
- **Steam**
|
||||||
|
- **Stitcher**
|
||||||
- **streamcloud.eu**
|
- **streamcloud.eu**
|
||||||
- **StreamCZ**
|
- **StreamCZ**
|
||||||
- **StreetVoice**
|
- **StreetVoice**
|
||||||
@@ -481,8 +623,10 @@
|
|||||||
- **Syfy**
|
- **Syfy**
|
||||||
- **SztvHu**
|
- **SztvHu**
|
||||||
- **Tagesschau**
|
- **Tagesschau**
|
||||||
|
- **tagesschau:player**
|
||||||
- **Tapely**
|
- **Tapely**
|
||||||
- **Tass**
|
- **Tass**
|
||||||
|
- **TDSLifeway**
|
||||||
- **teachertube**: teachertube.com videos
|
- **teachertube**: teachertube.com videos
|
||||||
- **teachertube:user:collection**: teachertube.com user and collection videos
|
- **teachertube:user:collection**: teachertube.com user and collection videos
|
||||||
- **TeachingChannel**
|
- **TeachingChannel**
|
||||||
@@ -491,66 +635,81 @@
|
|||||||
- **TechTalks**
|
- **TechTalks**
|
||||||
- **techtv.mit.edu**
|
- **techtv.mit.edu**
|
||||||
- **ted**
|
- **ted**
|
||||||
- **tegenlicht.vpro.nl**
|
- **Tele13**
|
||||||
- **TeleBruxelles**
|
- **TeleBruxelles**
|
||||||
- **telecinco.es**
|
- **Telecinco**: telecinco.es, cuatro.com and mediaset.es
|
||||||
|
- **Telegraaf**
|
||||||
- **TeleMB**
|
- **TeleMB**
|
||||||
- **TeleTask**
|
- **TeleTask**
|
||||||
- **TenPlay**
|
|
||||||
- **TestTube**
|
|
||||||
- **TF1**
|
- **TF1**
|
||||||
- **TheOnion**
|
- **TheIntercept**
|
||||||
- **ThePlatform**
|
- **ThePlatform**
|
||||||
|
- **ThePlatformFeed**
|
||||||
|
- **TheScene**
|
||||||
- **TheSixtyOne**
|
- **TheSixtyOne**
|
||||||
|
- **TheStar**
|
||||||
|
- **ThisAmericanLife**
|
||||||
- **ThisAV**
|
- **ThisAV**
|
||||||
- **THVideo**
|
- **THVideo**
|
||||||
- **THVideoPlaylist**
|
- **THVideoPlaylist**
|
||||||
- **tinypic**: tinypic.com videos
|
- **tinypic**: tinypic.com videos
|
||||||
- **tlc.com**
|
|
||||||
- **tlc.de**
|
- **tlc.de**
|
||||||
- **TMZ**
|
- **TMZ**
|
||||||
- **TMZArticle**
|
- **TMZArticle**
|
||||||
- **TNAFlix**
|
- **TNAFlix**
|
||||||
|
- **TNAFlixNetworkEmbed**
|
||||||
|
- **toggle**
|
||||||
- **tou.tv**
|
- **tou.tv**
|
||||||
- **Toypics**: Toypics user profile
|
- **Toypics**: Toypics user profile
|
||||||
- **ToypicsUser**: Toypics user profile
|
- **ToypicsUser**: Toypics user profile
|
||||||
- **TrailerAddict** (Currently broken)
|
- **TrailerAddict** (Currently broken)
|
||||||
- **Trilulilu**
|
- **Trilulilu**
|
||||||
|
- **trollvids**
|
||||||
- **TruTube**
|
- **TruTube**
|
||||||
- **Tube8**
|
- **Tube8**
|
||||||
- **TubiTv**
|
- **TubiTv**
|
||||||
- **Tudou**
|
- **tudou**
|
||||||
|
- **tudou:album**
|
||||||
|
- **tudou:playlist**
|
||||||
- **Tumblr**
|
- **Tumblr**
|
||||||
- **TuneIn**
|
- **tunein:clip**
|
||||||
|
- **tunein:program**
|
||||||
|
- **tunein:station**
|
||||||
|
- **tunein:topic**
|
||||||
- **Turbo**
|
- **Turbo**
|
||||||
- **Tutv**
|
- **Tutv**
|
||||||
- **tv.dfb.de**
|
- **tv.dfb.de**
|
||||||
- **TV2**
|
- **TV2**
|
||||||
- **TV2Article**
|
- **TV2Article**
|
||||||
|
- **TV3**
|
||||||
- **TV4**: tv4.se and tv4play.se
|
- **TV4**: tv4.se and tv4play.se
|
||||||
- **TVC**
|
- **TVC**
|
||||||
- **TVCArticle**
|
- **TVCArticle**
|
||||||
- **tvigle**: Интернет-телевидение Tvigle.ru
|
- **tvigle**: Интернет-телевидение Tvigle.ru
|
||||||
- **tvp.pl**
|
- **tvland.com**
|
||||||
- **tvp.pl:Series**
|
- **tvp**: Telewizja Polska
|
||||||
|
- **tvp:series**
|
||||||
- **TVPlay**: TV3Play and related services
|
- **TVPlay**: TV3Play and related services
|
||||||
- **Tweakers**
|
- **Tweakers**
|
||||||
- **twitch:bookmarks**
|
|
||||||
- **twitch:chapter**
|
- **twitch:chapter**
|
||||||
- **twitch:past_broadcasts**
|
- **twitch:past_broadcasts**
|
||||||
- **twitch:profile**
|
- **twitch:profile**
|
||||||
- **twitch:stream**
|
- **twitch:stream**
|
||||||
- **twitch:video**
|
- **twitch:video**
|
||||||
- **twitch:vod**
|
- **twitch:vod**
|
||||||
- **Ubu**
|
- **twitter**
|
||||||
|
- **twitter:amplify**
|
||||||
|
- **twitter:card**
|
||||||
- **udemy**
|
- **udemy**
|
||||||
- **udemy:course**
|
- **udemy:course**
|
||||||
- **UDNEmbed**
|
- **UDNEmbed**: 聯合影音
|
||||||
- **Ultimedia**
|
|
||||||
- **Unistra**
|
- **Unistra**
|
||||||
- **Urort**: NRK P3 Urørt
|
- **Urort**: NRK P3 Urørt
|
||||||
|
- **USAToday**
|
||||||
- **ustream**
|
- **ustream**
|
||||||
- **ustream:channel**
|
- **ustream:channel**
|
||||||
|
- **ustudio**
|
||||||
|
- **ustudio:embed**
|
||||||
- **Varzesh3**
|
- **Varzesh3**
|
||||||
- **Vbox7**
|
- **Vbox7**
|
||||||
- **VeeHD**
|
- **VeeHD**
|
||||||
@@ -558,25 +717,33 @@
|
|||||||
- **Vessel**
|
- **Vessel**
|
||||||
- **Vesti**: Вести.Ru
|
- **Vesti**: Вести.Ru
|
||||||
- **Vevo**
|
- **Vevo**
|
||||||
- **VGTV**: VGTV and BTTV
|
- **VevoPlaylist**
|
||||||
|
- **VGTV**: VGTV, BTTV, FTV, Aftenposten and Aftonbladet
|
||||||
- **vh1.com**
|
- **vh1.com**
|
||||||
- **Vice**
|
- **Vice**
|
||||||
|
- **ViceShow**
|
||||||
- **Viddler**
|
- **Viddler**
|
||||||
- **video.google:search**: Google Video search
|
- **video.google:search**: Google Video search
|
||||||
- **video.mit.edu**
|
- **video.mit.edu**
|
||||||
- **VideoBam**
|
|
||||||
- **VideoDetective**
|
- **VideoDetective**
|
||||||
- **videofy.me**
|
- **videofy.me**
|
||||||
- **videolectures.net**
|
|
||||||
- **VideoMega**
|
- **VideoMega**
|
||||||
|
- **videomore**
|
||||||
|
- **videomore:season**
|
||||||
|
- **videomore:video**
|
||||||
- **VideoPremium**
|
- **VideoPremium**
|
||||||
- **VideoTt**: video.tt - Your True Tube
|
- **VideoTt**: video.tt - Your True Tube (Currently broken)
|
||||||
- **videoweed**: VideoWeed
|
- **videoweed**: VideoWeed
|
||||||
- **Vidme**
|
- **vidme**
|
||||||
|
- **vidme:user**
|
||||||
|
- **vidme:user:likes**
|
||||||
- **Vidzi**
|
- **Vidzi**
|
||||||
- **vier**
|
- **vier**
|
||||||
- **vier:videos**
|
- **vier:videos**
|
||||||
|
- **ViewLift**
|
||||||
|
- **ViewLiftEmbed**
|
||||||
- **Viewster**
|
- **Viewster**
|
||||||
|
- **Viidea**
|
||||||
- **viki**
|
- **viki**
|
||||||
- **viki:channel**
|
- **viki:channel**
|
||||||
- **vimeo**
|
- **vimeo**
|
||||||
@@ -584,63 +751,80 @@
|
|||||||
- **vimeo:channel**
|
- **vimeo:channel**
|
||||||
- **vimeo:group**
|
- **vimeo:group**
|
||||||
- **vimeo:likes**: Vimeo user likes
|
- **vimeo:likes**: Vimeo user likes
|
||||||
|
- **vimeo:ondemand**
|
||||||
- **vimeo:review**: Review pages on vimeo
|
- **vimeo:review**: Review pages on vimeo
|
||||||
- **vimeo:user**
|
- **vimeo:user**
|
||||||
- **vimeo:watchlater**: Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)
|
- **vimeo:watchlater**: Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)
|
||||||
- **Vimple**: Vimple - one-click video hosting
|
- **Vimple**: Vimple - one-click video hosting
|
||||||
- **Vine**
|
- **Vine**
|
||||||
- **vine:user**
|
- **vine:user**
|
||||||
- **vk.com**
|
- **vk**: VK
|
||||||
- **vk.com:user-videos**: vk.com:All of a user's videos
|
- **vk:uservideos**: VK - User's Videos
|
||||||
|
- **vlive**
|
||||||
- **Vodlocker**
|
- **Vodlocker**
|
||||||
- **VoiceRepublic**
|
- **VoiceRepublic**
|
||||||
|
- **VoxMedia**
|
||||||
- **Vporn**
|
- **Vporn**
|
||||||
|
- **vpro**: npo.nl and ntr.nl
|
||||||
- **VRT**
|
- **VRT**
|
||||||
- **vube**: Vube.com
|
- **vube**: Vube.com
|
||||||
- **VuClip**
|
- **VuClip**
|
||||||
- **vulture.com**
|
- **vulture.com**
|
||||||
- **Walla**
|
- **Walla**
|
||||||
- **WashingtonPost**
|
- **washingtonpost**
|
||||||
|
- **washingtonpost:article**
|
||||||
- **wat.tv**
|
- **wat.tv**
|
||||||
- **WayOfTheMaster**
|
- **WatchIndianPorn**: Watch Indian Porn
|
||||||
- **WDR**
|
- **WDR**
|
||||||
- **wdr:mobile**
|
- **wdr:mobile**
|
||||||
- **WDRMaus**: Sendung mit der Maus
|
- **WDRMaus**: Sendung mit der Maus
|
||||||
- **WebOfStories**
|
- **WebOfStories**
|
||||||
|
- **WebOfStoriesPlaylist**
|
||||||
- **Weibo**
|
- **Weibo**
|
||||||
|
- **WeiqiTV**: WQTV
|
||||||
|
- **wholecloud**: WholeCloud
|
||||||
- **Wimp**
|
- **Wimp**
|
||||||
- **Wistia**
|
- **Wistia**
|
||||||
|
- **WNL**
|
||||||
- **WorldStarHipHop**
|
- **WorldStarHipHop**
|
||||||
- **wrzuta.pl**
|
- **wrzuta.pl**
|
||||||
- **WSJ**: Wall Street Journal
|
- **WSJ**: Wall Street Journal
|
||||||
- **XBef**
|
- **XBef**
|
||||||
- **XboxClips**
|
- **XboxClips**
|
||||||
|
- **XFileShare**: XFileShare based sites: DaClips, FileHoot, GorillaVid, MovPod, PowerWatch, Rapidvideo.ws, TheVideoBee, Vidto, Streamin.To
|
||||||
- **XHamster**
|
- **XHamster**
|
||||||
- **XHamsterEmbed**
|
- **XHamsterEmbed**
|
||||||
|
- **xiami:album**: 虾米音乐 - 专辑
|
||||||
|
- **xiami:artist**: 虾米音乐 - 歌手
|
||||||
|
- **xiami:collection**: 虾米音乐 - 精选集
|
||||||
|
- **xiami:song**: 虾米音乐
|
||||||
- **XMinus**
|
- **XMinus**
|
||||||
- **XNXX**
|
- **XNXX**
|
||||||
- **Xstream**
|
- **Xstream**
|
||||||
- **XTube**
|
- **XTube**
|
||||||
- **XTubeUser**: XTube user profile
|
- **XTubeUser**: XTube user profile
|
||||||
- **Xuite**
|
- **Xuite**: 隨意窩Xuite影音
|
||||||
- **XVideos**
|
- **XVideos**
|
||||||
- **XXXYMovies**
|
- **XXXYMovies**
|
||||||
- **Yahoo**: Yahoo screen and movies
|
- **Yahoo**: Yahoo screen and movies
|
||||||
- **Yam**
|
- **Yam**: 蕃薯藤yam天空部落
|
||||||
- **yandexmusic:album**: Яндекс.Музыка - Альбом
|
- **yandexmusic:album**: Яндекс.Музыка - Альбом
|
||||||
- **yandexmusic:playlist**: Яндекс.Музыка - Плейлист
|
- **yandexmusic:playlist**: Яндекс.Музыка - Плейлист
|
||||||
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
||||||
- **YesJapan**
|
- **YesJapan**
|
||||||
|
- **yinyuetai:video**: 音悦Tai
|
||||||
- **Ynet**
|
- **Ynet**
|
||||||
- **YouJizz**
|
- **YouJizz**
|
||||||
- **youku**
|
- **youku**: 优酷
|
||||||
- **YouPorn**
|
- **YouPorn**
|
||||||
- **YourUpload**
|
- **YourUpload**
|
||||||
- **youtube**: YouTube.com
|
- **youtube**: YouTube.com
|
||||||
- **youtube:channel**: YouTube.com channels
|
- **youtube:channel**: YouTube.com channels
|
||||||
- **youtube:favorites**: YouTube.com favourite videos, ":ytfav" for short (requires authentication)
|
- **youtube:favorites**: YouTube.com favourite videos, ":ytfav" for short (requires authentication)
|
||||||
- **youtube:history**: Youtube watch history, ":ythistory" for short (requires authentication)
|
- **youtube:history**: Youtube watch history, ":ythistory" for short (requires authentication)
|
||||||
|
- **youtube:live**: YouTube.com live streams
|
||||||
- **youtube:playlist**: YouTube.com playlists
|
- **youtube:playlist**: YouTube.com playlists
|
||||||
|
- **youtube:playlists**: YouTube.com user/channel playlists
|
||||||
- **youtube:recommended**: YouTube.com recommended videos, ":ytrec" for short (requires authentication)
|
- **youtube:recommended**: YouTube.com recommended videos, ":ytrec" for short (requires authentication)
|
||||||
- **youtube:search**: YouTube.com searches
|
- **youtube:search**: YouTube.com searches
|
||||||
- **youtube:search:date**: YouTube.com searches, newest videos first
|
- **youtube:search:date**: YouTube.com searches, newest videos first
|
||||||
@@ -654,3 +838,4 @@
|
|||||||
- **ZDFChannel**
|
- **ZDFChannel**
|
||||||
- **zingmp3:album**: mp3.zing.vn albums
|
- **zingmp3:album**: mp3.zing.vn albums
|
||||||
- **zingmp3:song**: mp3.zing.vn songs
|
- **zingmp3:song**: mp3.zing.vn songs
|
||||||
|
- **ZippCast**
|
||||||
|
|||||||
@@ -2,5 +2,5 @@
|
|||||||
universal = True
|
universal = True
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
exclude = youtube_dl/extractor/__init__.py,devscripts/buildserver.py,setup.py,build,.git
|
exclude = youtube_dl/extractor/__init__.py,devscripts/buildserver.py,devscripts/lazy_load_template.py,devscripts/make_issue_template.py,setup.py,build,.git
|
||||||
ignore = E402,E501,E731
|
ignore = E402,E501,E731
|
||||||
|
|||||||
24
setup.py
24
setup.py
@@ -8,11 +8,12 @@ import warnings
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from setuptools import setup
|
from setuptools import setup, Command
|
||||||
setuptools_available = True
|
setuptools_available = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
from distutils.core import setup
|
from distutils.core import setup, Command
|
||||||
setuptools_available = False
|
setuptools_available = False
|
||||||
|
from distutils.spawn import spawn
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# This will create an exe that needs Microsoft Visual C++ 2008
|
# This will create an exe that needs Microsoft Visual C++ 2008
|
||||||
@@ -28,7 +29,7 @@ py2exe_options = {
|
|||||||
"compressed": 1,
|
"compressed": 1,
|
||||||
"optimize": 2,
|
"optimize": 2,
|
||||||
"dist_dir": '.',
|
"dist_dir": '.',
|
||||||
"dll_excludes": ['w9xpopen.exe'],
|
"dll_excludes": ['w9xpopen.exe', 'crypt32.dll'],
|
||||||
}
|
}
|
||||||
|
|
||||||
py2exe_console = [{
|
py2exe_console = [{
|
||||||
@@ -70,6 +71,22 @@ else:
|
|||||||
else:
|
else:
|
||||||
params['scripts'] = ['bin/youtube-dl']
|
params['scripts'] = ['bin/youtube-dl']
|
||||||
|
|
||||||
|
class build_lazy_extractors(Command):
|
||||||
|
description = "Build the extractor lazy loading module"
|
||||||
|
user_options = []
|
||||||
|
|
||||||
|
def initialize_options(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def finalize_options(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
spawn(
|
||||||
|
[sys.executable, 'devscripts/make_lazy_extractors.py', 'youtube_dl/extractor/lazy_extractors.py'],
|
||||||
|
dry_run=self.dry_run,
|
||||||
|
)
|
||||||
|
|
||||||
# Get the version from youtube_dl/version.py without importing the package
|
# Get the version from youtube_dl/version.py without importing the package
|
||||||
exec(compile(open('youtube_dl/version.py').read(),
|
exec(compile(open('youtube_dl/version.py').read(),
|
||||||
'youtube_dl/version.py', 'exec'))
|
'youtube_dl/version.py', 'exec'))
|
||||||
@@ -107,5 +124,6 @@ setup(
|
|||||||
"Programming Language :: Python :: 3.4",
|
"Programming Language :: Python :: 3.4",
|
||||||
],
|
],
|
||||||
|
|
||||||
|
cmdclass={'build_lazy_extractors': build_lazy_extractors},
|
||||||
**params
|
**params
|
||||||
)
|
)
|
||||||
|
|||||||
146
test/helper.py
146
test/helper.py
@@ -11,8 +11,11 @@ import sys
|
|||||||
|
|
||||||
import youtube_dl.extractor
|
import youtube_dl.extractor
|
||||||
from youtube_dl import YoutubeDL
|
from youtube_dl import YoutubeDL
|
||||||
from youtube_dl.utils import (
|
from youtube_dl.compat import (
|
||||||
|
compat_os_name,
|
||||||
compat_str,
|
compat_str,
|
||||||
|
)
|
||||||
|
from youtube_dl.utils import (
|
||||||
preferredencoding,
|
preferredencoding,
|
||||||
write_string,
|
write_string,
|
||||||
)
|
)
|
||||||
@@ -21,8 +24,13 @@ from youtube_dl.utils import (
|
|||||||
def get_params(override=None):
|
def get_params(override=None):
|
||||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||||
"parameters.json")
|
"parameters.json")
|
||||||
|
LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||||
|
"local_parameters.json")
|
||||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||||
parameters = json.load(pf)
|
parameters = json.load(pf)
|
||||||
|
if os.path.exists(LOCAL_PARAMETERS_FILE):
|
||||||
|
with io.open(LOCAL_PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||||
|
parameters.update(json.load(pf))
|
||||||
if override:
|
if override:
|
||||||
parameters.update(override)
|
parameters.update(override)
|
||||||
return parameters
|
return parameters
|
||||||
@@ -42,7 +50,7 @@ def report_warning(message):
|
|||||||
Print the message to stderr, it will be prefixed with 'WARNING:'
|
Print the message to stderr, it will be prefixed with 'WARNING:'
|
||||||
If stderr is a tty file the 'WARNING:' will be colored
|
If stderr is a tty file the 'WARNING:' will be colored
|
||||||
'''
|
'''
|
||||||
if sys.stderr.isatty() and os.name != 'nt':
|
if sys.stderr.isatty() and compat_os_name != 'nt':
|
||||||
_msg_header = '\033[0;33mWARNING:\033[0m'
|
_msg_header = '\033[0;33mWARNING:\033[0m'
|
||||||
else:
|
else:
|
||||||
_msg_header = 'WARNING:'
|
_msg_header = 'WARNING:'
|
||||||
@@ -89,66 +97,84 @@ def gettestcases(include_onlymatching=False):
|
|||||||
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
|
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
def expect_info_dict(self, got_dict, expected_dict):
|
def expect_value(self, got, expected, field):
|
||||||
|
if isinstance(expected, compat_str) and expected.startswith('re:'):
|
||||||
|
match_str = expected[len('re:'):]
|
||||||
|
match_rex = re.compile(match_str)
|
||||||
|
|
||||||
|
self.assertTrue(
|
||||||
|
isinstance(got, compat_str),
|
||||||
|
'Expected a %s object, but got %s for field %s' % (
|
||||||
|
compat_str.__name__, type(got).__name__, field))
|
||||||
|
self.assertTrue(
|
||||||
|
match_rex.match(got),
|
||||||
|
'field %s (value: %r) should match %r' % (field, got, match_str))
|
||||||
|
elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
|
||||||
|
start_str = expected[len('startswith:'):]
|
||||||
|
self.assertTrue(
|
||||||
|
isinstance(got, compat_str),
|
||||||
|
'Expected a %s object, but got %s for field %s' % (
|
||||||
|
compat_str.__name__, type(got).__name__, field))
|
||||||
|
self.assertTrue(
|
||||||
|
got.startswith(start_str),
|
||||||
|
'field %s (value: %r) should start with %r' % (field, got, start_str))
|
||||||
|
elif isinstance(expected, compat_str) and expected.startswith('contains:'):
|
||||||
|
contains_str = expected[len('contains:'):]
|
||||||
|
self.assertTrue(
|
||||||
|
isinstance(got, compat_str),
|
||||||
|
'Expected a %s object, but got %s for field %s' % (
|
||||||
|
compat_str.__name__, type(got).__name__, field))
|
||||||
|
self.assertTrue(
|
||||||
|
contains_str in got,
|
||||||
|
'field %s (value: %r) should contain %r' % (field, got, contains_str))
|
||||||
|
elif isinstance(expected, type):
|
||||||
|
self.assertTrue(
|
||||||
|
isinstance(got, expected),
|
||||||
|
'Expected type %r for field %s, but got value %r of type %r' % (expected, field, got, type(got)))
|
||||||
|
elif isinstance(expected, dict) and isinstance(got, dict):
|
||||||
|
expect_dict(self, got, expected)
|
||||||
|
elif isinstance(expected, list) and isinstance(got, list):
|
||||||
|
self.assertEqual(
|
||||||
|
len(expected), len(got),
|
||||||
|
'Expect a list of length %d, but got a list of length %d for field %s' % (
|
||||||
|
len(expected), len(got), field))
|
||||||
|
for index, (item_got, item_expected) in enumerate(zip(got, expected)):
|
||||||
|
type_got = type(item_got)
|
||||||
|
type_expected = type(item_expected)
|
||||||
|
self.assertEqual(
|
||||||
|
type_expected, type_got,
|
||||||
|
'Type mismatch for list item at index %d for field %s, expected %r, got %r' % (
|
||||||
|
index, field, type_expected, type_got))
|
||||||
|
expect_value(self, item_got, item_expected, field)
|
||||||
|
else:
|
||||||
|
if isinstance(expected, compat_str) and expected.startswith('md5:'):
|
||||||
|
self.assertTrue(
|
||||||
|
isinstance(got, compat_str),
|
||||||
|
'Expected field %s to be a unicode object, but got value %r of type %r' % (field, got, type(got)))
|
||||||
|
got = 'md5:' + md5(got)
|
||||||
|
elif isinstance(expected, compat_str) and expected.startswith('mincount:'):
|
||||||
|
self.assertTrue(
|
||||||
|
isinstance(got, (list, dict)),
|
||||||
|
'Expected field %s to be a list or a dict, but it is of type %s' % (
|
||||||
|
field, type(got).__name__))
|
||||||
|
expected_num = int(expected.partition(':')[2])
|
||||||
|
assertGreaterEqual(
|
||||||
|
self, len(got), expected_num,
|
||||||
|
'Expected %d items in field %s, but only got %d' % (expected_num, field, len(got)))
|
||||||
|
return
|
||||||
|
self.assertEqual(
|
||||||
|
expected, got,
|
||||||
|
'Invalid value for field %s, expected %r, got %r' % (field, expected, got))
|
||||||
|
|
||||||
|
|
||||||
|
def expect_dict(self, got_dict, expected_dict):
|
||||||
for info_field, expected in expected_dict.items():
|
for info_field, expected in expected_dict.items():
|
||||||
if isinstance(expected, compat_str) and expected.startswith('re:'):
|
got = got_dict.get(info_field)
|
||||||
got = got_dict.get(info_field)
|
expect_value(self, got, expected, info_field)
|
||||||
match_str = expected[len('re:'):]
|
|
||||||
match_rex = re.compile(match_str)
|
|
||||||
|
|
||||||
self.assertTrue(
|
|
||||||
isinstance(got, compat_str),
|
|
||||||
'Expected a %s object, but got %s for field %s' % (
|
|
||||||
compat_str.__name__, type(got).__name__, info_field))
|
|
||||||
self.assertTrue(
|
|
||||||
match_rex.match(got),
|
|
||||||
'field %s (value: %r) should match %r' % (info_field, got, match_str))
|
|
||||||
elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
|
|
||||||
got = got_dict.get(info_field)
|
|
||||||
start_str = expected[len('startswith:'):]
|
|
||||||
self.assertTrue(
|
|
||||||
isinstance(got, compat_str),
|
|
||||||
'Expected a %s object, but got %s for field %s' % (
|
|
||||||
compat_str.__name__, type(got).__name__, info_field))
|
|
||||||
self.assertTrue(
|
|
||||||
got.startswith(start_str),
|
|
||||||
'field %s (value: %r) should start with %r' % (info_field, got, start_str))
|
|
||||||
elif isinstance(expected, compat_str) and expected.startswith('contains:'):
|
|
||||||
got = got_dict.get(info_field)
|
|
||||||
contains_str = expected[len('contains:'):]
|
|
||||||
self.assertTrue(
|
|
||||||
isinstance(got, compat_str),
|
|
||||||
'Expected a %s object, but got %s for field %s' % (
|
|
||||||
compat_str.__name__, type(got).__name__, info_field))
|
|
||||||
self.assertTrue(
|
|
||||||
contains_str in got,
|
|
||||||
'field %s (value: %r) should contain %r' % (info_field, got, contains_str))
|
|
||||||
elif isinstance(expected, type):
|
|
||||||
got = got_dict.get(info_field)
|
|
||||||
self.assertTrue(isinstance(got, expected),
|
|
||||||
'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
|
|
||||||
else:
|
|
||||||
if isinstance(expected, compat_str) and expected.startswith('md5:'):
|
|
||||||
got = 'md5:' + md5(got_dict.get(info_field))
|
|
||||||
elif isinstance(expected, compat_str) and expected.startswith('mincount:'):
|
|
||||||
got = got_dict.get(info_field)
|
|
||||||
self.assertTrue(
|
|
||||||
isinstance(got, list),
|
|
||||||
'Expected field %s to be a list, but it is of type %s' % (
|
|
||||||
info_field, type(got).__name__))
|
|
||||||
expected_num = int(expected.partition(':')[2])
|
|
||||||
assertGreaterEqual(
|
|
||||||
self, len(got), expected_num,
|
|
||||||
'Expected %d items in field %s, but only got %d' % (
|
|
||||||
expected_num, info_field, len(got)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
got = got_dict.get(info_field)
|
|
||||||
self.assertEqual(expected, got,
|
|
||||||
'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
|
||||||
|
|
||||||
|
def expect_info_dict(self, got_dict, expected_dict):
|
||||||
|
expect_dict(self, got_dict, expected_dict)
|
||||||
# Check for the presence of mandatory fields
|
# Check for the presence of mandatory fields
|
||||||
if got_dict.get('_type') not in ('playlist', 'multi_video'):
|
if got_dict.get('_type') not in ('playlist', 'multi_video'):
|
||||||
for key in ('id', 'url', 'title', 'ext'):
|
for key in ('id', 'url', 'title', 'ext'):
|
||||||
@@ -160,7 +186,7 @@ def expect_info_dict(self, got_dict, expected_dict):
|
|||||||
# Are checkable fields missing from the test case definition?
|
# Are checkable fields missing from the test case definition?
|
||||||
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
||||||
for key, value in got_dict.items()
|
for key, value in got_dict.items()
|
||||||
if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
|
if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location', 'age_limit'))
|
||||||
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
|
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
|
||||||
if missing_keys:
|
if missing_keys:
|
||||||
def _repr(v):
|
def _repr(v):
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|||||||
from test.helper import FakeYDL
|
from test.helper import FakeYDL
|
||||||
from youtube_dl.extractor.common import InfoExtractor
|
from youtube_dl.extractor.common import InfoExtractor
|
||||||
from youtube_dl.extractor import YoutubeIE, get_info_extractor
|
from youtube_dl.extractor import YoutubeIE, get_info_extractor
|
||||||
|
from youtube_dl.utils import encode_data_uri, strip_jsonp, ExtractorError
|
||||||
|
|
||||||
|
|
||||||
class TestIE(InfoExtractor):
|
class TestIE(InfoExtractor):
|
||||||
@@ -35,10 +36,18 @@ class TestInfoExtractor(unittest.TestCase):
|
|||||||
<meta name="og:title" content='Foo'/>
|
<meta name="og:title" content='Foo'/>
|
||||||
<meta content="Some video's description " name="og:description"/>
|
<meta content="Some video's description " name="og:description"/>
|
||||||
<meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&key2=val2'/>
|
<meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&key2=val2'/>
|
||||||
|
<meta content='application/x-shockwave-flash' property='og:video:type'>
|
||||||
|
<meta content='Foo' property=og:foobar>
|
||||||
|
<meta name="og:test1" content='foo > < bar'/>
|
||||||
|
<meta name="og:test2" content="foo >//< bar"/>
|
||||||
'''
|
'''
|
||||||
self.assertEqual(ie._og_search_title(html), 'Foo')
|
self.assertEqual(ie._og_search_title(html), 'Foo')
|
||||||
self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
|
self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
|
||||||
self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2')
|
self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2')
|
||||||
|
self.assertEqual(ie._og_search_video_url(html, default=None), None)
|
||||||
|
self.assertEqual(ie._og_search_property('foobar', html), 'Foo')
|
||||||
|
self.assertEqual(ie._og_search_property('test1', html), 'foo > < bar')
|
||||||
|
self.assertEqual(ie._og_search_property('test2', html), 'foo >//< bar')
|
||||||
|
|
||||||
def test_html_search_meta(self):
|
def test_html_search_meta(self):
|
||||||
ie = self.ie
|
ie = self.ie
|
||||||
@@ -58,5 +67,14 @@ class TestInfoExtractor(unittest.TestCase):
|
|||||||
self.assertEqual(ie._html_search_meta('e', html), '5')
|
self.assertEqual(ie._html_search_meta('e', html), '5')
|
||||||
self.assertEqual(ie._html_search_meta('f', html), '6')
|
self.assertEqual(ie._html_search_meta('f', html), '6')
|
||||||
|
|
||||||
|
def test_download_json(self):
|
||||||
|
uri = encode_data_uri(b'{"foo": "blah"}', 'application/json')
|
||||||
|
self.assertEqual(self.ie._download_json(uri, None), {'foo': 'blah'})
|
||||||
|
uri = encode_data_uri(b'callback({"foo": "blah"})', 'application/javascript')
|
||||||
|
self.assertEqual(self.ie._download_json(uri, None, transform_source=strip_jsonp), {'foo': 'blah'})
|
||||||
|
uri = encode_data_uri(b'{"foo": invalid}', 'application/json')
|
||||||
|
self.assertRaises(ExtractorError, self.ie._download_json, uri, None)
|
||||||
|
self.assertEqual(self.ie._download_json(uri, None, fatal=False), None)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -12,10 +12,11 @@ import copy
|
|||||||
|
|
||||||
from test.helper import FakeYDL, assertRegexpMatches
|
from test.helper import FakeYDL, assertRegexpMatches
|
||||||
from youtube_dl import YoutubeDL
|
from youtube_dl import YoutubeDL
|
||||||
from youtube_dl.compat import compat_str
|
from youtube_dl.compat import compat_str, compat_urllib_error
|
||||||
from youtube_dl.extractor import YoutubeIE
|
from youtube_dl.extractor import YoutubeIE
|
||||||
|
from youtube_dl.extractor.common import InfoExtractor
|
||||||
from youtube_dl.postprocessor.common import PostProcessor
|
from youtube_dl.postprocessor.common import PostProcessor
|
||||||
from youtube_dl.utils import match_filter_func
|
from youtube_dl.utils import ExtractorError, match_filter_func
|
||||||
|
|
||||||
TEST_URL = 'http://localhost/sample.mp4'
|
TEST_URL = 'http://localhost/sample.mp4'
|
||||||
|
|
||||||
@@ -105,6 +106,7 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
def test_format_selection(self):
|
def test_format_selection(self):
|
||||||
formats = [
|
formats = [
|
||||||
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
|
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
|
||||||
|
{'format_id': 'example-with-dashes', 'ext': 'webm', 'preference': 1, 'url': TEST_URL},
|
||||||
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL},
|
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL},
|
||||||
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL},
|
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL},
|
||||||
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': TEST_URL},
|
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': TEST_URL},
|
||||||
@@ -136,6 +138,11 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], '35')
|
self.assertEqual(downloaded['format_id'], '35')
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'example-with-dashes'})
|
||||||
|
ydl.process_ie_result(info_dict.copy())
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'example-with-dashes')
|
||||||
|
|
||||||
def test_format_selection_audio(self):
|
def test_format_selection_audio(self):
|
||||||
formats = [
|
formats = [
|
||||||
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL},
|
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL},
|
||||||
@@ -215,9 +222,24 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], 'dash-video-low')
|
self.assertEqual(downloaded['format_id'], 'dash-video-low')
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'bestvideo[format_id^=dash][format_id$=low]'})
|
||||||
|
ydl.process_ie_result(info_dict.copy())
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'dash-video-low')
|
||||||
|
|
||||||
|
formats = [
|
||||||
|
{'format_id': 'vid-vcodec-dot', 'ext': 'mp4', 'preference': 1, 'vcodec': 'avc1.123456', 'acodec': 'none', 'url': TEST_URL},
|
||||||
|
]
|
||||||
|
info_dict = _make_result(formats)
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'bestvideo[vcodec=avc1.123456]'})
|
||||||
|
ydl.process_ie_result(info_dict.copy())
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'vid-vcodec-dot')
|
||||||
|
|
||||||
def test_youtube_format_selection(self):
|
def test_youtube_format_selection(self):
|
||||||
order = [
|
order = [
|
||||||
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '36', '17', '13',
|
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '17', '36', '13',
|
||||||
# Apple HTTP Live Streaming
|
# Apple HTTP Live Streaming
|
||||||
'96', '95', '94', '93', '92', '132', '151',
|
'96', '95', '94', '93', '92', '132', '151',
|
||||||
# 3D
|
# 3D
|
||||||
@@ -229,21 +251,81 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
'141', '172', '140', '171', '139',
|
'141', '172', '140', '171', '139',
|
||||||
]
|
]
|
||||||
|
|
||||||
for f1id, f2id in zip(order, order[1:]):
|
def format_info(f_id):
|
||||||
f1 = YoutubeIE._formats[f1id].copy()
|
info = YoutubeIE._formats[f_id].copy()
|
||||||
f1['format_id'] = f1id
|
|
||||||
f1['url'] = 'url:' + f1id
|
|
||||||
f2 = YoutubeIE._formats[f2id].copy()
|
|
||||||
f2['format_id'] = f2id
|
|
||||||
f2['url'] = 'url:' + f2id
|
|
||||||
|
|
||||||
|
# XXX: In real cases InfoExtractor._parse_mpd_formats() fills up 'acodec'
|
||||||
|
# and 'vcodec', while in tests such information is incomplete since
|
||||||
|
# commit a6c2c24479e5f4827ceb06f64d855329c0a6f593
|
||||||
|
# test_YoutubeDL.test_youtube_format_selection is broken without
|
||||||
|
# this fix
|
||||||
|
if 'acodec' in info and 'vcodec' not in info:
|
||||||
|
info['vcodec'] = 'none'
|
||||||
|
elif 'vcodec' in info and 'acodec' not in info:
|
||||||
|
info['acodec'] = 'none'
|
||||||
|
|
||||||
|
info['format_id'] = f_id
|
||||||
|
info['url'] = 'url:' + f_id
|
||||||
|
return info
|
||||||
|
formats_order = [format_info(f_id) for f_id in order]
|
||||||
|
|
||||||
|
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||||
|
ydl = YDL({'format': 'bestvideo+bestaudio'})
|
||||||
|
yie = YoutubeIE(ydl)
|
||||||
|
yie._sort_formats(info_dict['formats'])
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], '137+141')
|
||||||
|
self.assertEqual(downloaded['ext'], 'mp4')
|
||||||
|
|
||||||
|
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||||
|
ydl = YDL({'format': 'bestvideo[height>=999999]+bestaudio/best'})
|
||||||
|
yie = YoutubeIE(ydl)
|
||||||
|
yie._sort_formats(info_dict['formats'])
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], '38')
|
||||||
|
|
||||||
|
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||||
|
ydl = YDL({'format': 'bestvideo/best,bestaudio'})
|
||||||
|
yie = YoutubeIE(ydl)
|
||||||
|
yie._sort_formats(info_dict['formats'])
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||||
|
self.assertEqual(downloaded_ids, ['137', '141'])
|
||||||
|
|
||||||
|
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||||
|
ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])+bestaudio'})
|
||||||
|
yie = YoutubeIE(ydl)
|
||||||
|
yie._sort_formats(info_dict['formats'])
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||||
|
self.assertEqual(downloaded_ids, ['137+141', '248+141'])
|
||||||
|
|
||||||
|
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||||
|
ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])[height<=720]+bestaudio'})
|
||||||
|
yie = YoutubeIE(ydl)
|
||||||
|
yie._sort_formats(info_dict['formats'])
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||||
|
self.assertEqual(downloaded_ids, ['136+141', '247+141'])
|
||||||
|
|
||||||
|
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||||
|
ydl = YDL({'format': '(bestvideo[ext=none]/bestvideo[ext=webm])+bestaudio'})
|
||||||
|
yie = YoutubeIE(ydl)
|
||||||
|
yie._sort_formats(info_dict['formats'])
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||||
|
self.assertEqual(downloaded_ids, ['248+141'])
|
||||||
|
|
||||||
|
for f1, f2 in zip(formats_order, formats_order[1:]):
|
||||||
info_dict = _make_result([f1, f2], extractor='youtube')
|
info_dict = _make_result([f1, f2], extractor='youtube')
|
||||||
ydl = YDL({'format': 'best/bestvideo'})
|
ydl = YDL({'format': 'best/bestvideo'})
|
||||||
yie = YoutubeIE(ydl)
|
yie = YoutubeIE(ydl)
|
||||||
yie._sort_formats(info_dict['formats'])
|
yie._sort_formats(info_dict['formats'])
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], f1id)
|
self.assertEqual(downloaded['format_id'], f1['format_id'])
|
||||||
|
|
||||||
info_dict = _make_result([f2, f1], extractor='youtube')
|
info_dict = _make_result([f2, f1], extractor='youtube')
|
||||||
ydl = YDL({'format': 'best/bestvideo'})
|
ydl = YDL({'format': 'best/bestvideo'})
|
||||||
@@ -251,7 +333,18 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
yie._sort_formats(info_dict['formats'])
|
yie._sort_formats(info_dict['formats'])
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], f1id)
|
self.assertEqual(downloaded['format_id'], f1['format_id'])
|
||||||
|
|
||||||
|
def test_invalid_format_specs(self):
|
||||||
|
def assert_syntax_error(format_spec):
|
||||||
|
ydl = YDL({'format': format_spec})
|
||||||
|
info_dict = _make_result([{'format_id': 'foo', 'url': TEST_URL}])
|
||||||
|
self.assertRaises(SyntaxError, ydl.process_ie_result, info_dict)
|
||||||
|
|
||||||
|
assert_syntax_error('bestvideo,,best')
|
||||||
|
assert_syntax_error('+bestaudio')
|
||||||
|
assert_syntax_error('bestvideo+')
|
||||||
|
assert_syntax_error('/')
|
||||||
|
|
||||||
def test_format_filtering(self):
|
def test_format_filtering(self):
|
||||||
formats = [
|
formats = [
|
||||||
@@ -308,6 +401,18 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], 'G')
|
self.assertEqual(downloaded['format_id'], 'G')
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'all[width>=400][width<=600]'})
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||||
|
self.assertEqual(downloaded_ids, ['B', 'C', 'D'])
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'best[height<40]'})
|
||||||
|
try:
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
except ExtractorError:
|
||||||
|
pass
|
||||||
|
self.assertEqual(ydl.downloaded_info_dicts, [])
|
||||||
|
|
||||||
|
|
||||||
class TestYoutubeDL(unittest.TestCase):
|
class TestYoutubeDL(unittest.TestCase):
|
||||||
def test_subtitles(self):
|
def test_subtitles(self):
|
||||||
@@ -402,6 +507,9 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
assertRegexpMatches(self, ydl._format_note({
|
assertRegexpMatches(self, ydl._format_note({
|
||||||
'vbr': 10,
|
'vbr': 10,
|
||||||
}), '^\s*10k$')
|
}), '^\s*10k$')
|
||||||
|
assertRegexpMatches(self, ydl._format_note({
|
||||||
|
'fps': 30,
|
||||||
|
}), '^30fps$')
|
||||||
|
|
||||||
def test_postprocessors(self):
|
def test_postprocessors(self):
|
||||||
filename = 'post-processor-testfile.mp4'
|
filename = 'post-processor-testfile.mp4'
|
||||||
@@ -553,6 +661,47 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
result = get_ids({'playlist_items': '10'})
|
result = get_ids({'playlist_items': '10'})
|
||||||
self.assertEqual(result, [])
|
self.assertEqual(result, [])
|
||||||
|
|
||||||
|
def test_urlopen_no_file_protocol(self):
|
||||||
|
# see https://github.com/rg3/youtube-dl/issues/8227
|
||||||
|
ydl = YDL()
|
||||||
|
self.assertRaises(compat_urllib_error.URLError, ydl.urlopen, 'file:///etc/passwd')
|
||||||
|
|
||||||
|
def test_do_not_override_ie_key_in_url_transparent(self):
|
||||||
|
ydl = YDL()
|
||||||
|
|
||||||
|
class Foo1IE(InfoExtractor):
|
||||||
|
_VALID_URL = r'foo1:'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': 'foo2:',
|
||||||
|
'ie_key': 'Foo2',
|
||||||
|
}
|
||||||
|
|
||||||
|
class Foo2IE(InfoExtractor):
|
||||||
|
_VALID_URL = r'foo2:'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
return {
|
||||||
|
'_type': 'url',
|
||||||
|
'url': 'foo3:',
|
||||||
|
'ie_key': 'Foo3',
|
||||||
|
}
|
||||||
|
|
||||||
|
class Foo3IE(InfoExtractor):
|
||||||
|
_VALID_URL = r'foo3:'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
return _make_result([{'url': TEST_URL}])
|
||||||
|
|
||||||
|
ydl.add_info_extractor(Foo1IE(ydl))
|
||||||
|
ydl.add_info_extractor(Foo2IE(ydl))
|
||||||
|
ydl.add_info_extractor(Foo3IE(ydl))
|
||||||
|
ydl.extract_info('foo1:')
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['url'], TEST_URL)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
|
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
|
||||||
|
|
||||||
def test_youtube_user_matching(self):
|
def test_youtube_user_matching(self):
|
||||||
self.assertMatch('www.youtube.com/NASAgovVideo/videos', ['youtube:user'])
|
self.assertMatch('http://www.youtube.com/NASAgovVideo/videos', ['youtube:user'])
|
||||||
|
|
||||||
def test_youtube_feeds(self):
|
def test_youtube_feeds(self):
|
||||||
self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:watchlater'])
|
self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:watchlater'])
|
||||||
@@ -121,8 +121,8 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
|
|
||||||
def test_pbs(self):
|
def test_pbs(self):
|
||||||
# https://github.com/rg3/youtube-dl/issues/2350
|
# https://github.com/rg3/youtube-dl/issues/2350
|
||||||
self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['PBS'])
|
self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['pbs'])
|
||||||
self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['PBS'])
|
self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['pbs'])
|
||||||
|
|
||||||
def test_yahoo_https(self):
|
def test_yahoo_https(self):
|
||||||
# https://github.com/rg3/youtube-dl/issues/2701
|
# https://github.com/rg3/youtube-dl/issues/2701
|
||||||
|
|||||||
@@ -10,29 +10,39 @@ import unittest
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
from youtube_dl.utils import get_filesystem_encoding
|
|
||||||
from youtube_dl.compat import (
|
from youtube_dl.compat import (
|
||||||
compat_getenv,
|
compat_getenv,
|
||||||
|
compat_setenv,
|
||||||
|
compat_etree_fromstring,
|
||||||
compat_expanduser,
|
compat_expanduser,
|
||||||
|
compat_shlex_split,
|
||||||
|
compat_str,
|
||||||
|
compat_struct_unpack,
|
||||||
|
compat_urllib_parse_unquote,
|
||||||
|
compat_urllib_parse_unquote_plus,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestCompat(unittest.TestCase):
|
class TestCompat(unittest.TestCase):
|
||||||
def test_compat_getenv(self):
|
def test_compat_getenv(self):
|
||||||
test_str = 'тест'
|
test_str = 'тест'
|
||||||
os.environ['YOUTUBE-DL-TEST'] = (
|
compat_setenv('YOUTUBE-DL-TEST', test_str)
|
||||||
test_str if sys.version_info >= (3, 0)
|
|
||||||
else test_str.encode(get_filesystem_encoding()))
|
|
||||||
self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
|
self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
|
||||||
|
|
||||||
|
def test_compat_setenv(self):
|
||||||
|
test_var = 'YOUTUBE-DL-TEST'
|
||||||
|
test_str = 'тест'
|
||||||
|
compat_setenv(test_var, test_str)
|
||||||
|
compat_getenv(test_var)
|
||||||
|
self.assertEqual(compat_getenv(test_var), test_str)
|
||||||
|
|
||||||
def test_compat_expanduser(self):
|
def test_compat_expanduser(self):
|
||||||
old_home = os.environ.get('HOME')
|
old_home = os.environ.get('HOME')
|
||||||
test_str = 'C:\Documents and Settings\тест\Application Data'
|
test_str = 'C:\Documents and Settings\тест\Application Data'
|
||||||
os.environ['HOME'] = (
|
compat_setenv('HOME', test_str)
|
||||||
test_str if sys.version_info >= (3, 0)
|
|
||||||
else test_str.encode(get_filesystem_encoding()))
|
|
||||||
self.assertEqual(compat_expanduser('~'), test_str)
|
self.assertEqual(compat_expanduser('~'), test_str)
|
||||||
os.environ['HOME'] = old_home
|
compat_setenv('HOME', old_home or '')
|
||||||
|
|
||||||
def test_all_present(self):
|
def test_all_present(self):
|
||||||
import youtube_dl.compat
|
import youtube_dl.compat
|
||||||
@@ -42,5 +52,66 @@ class TestCompat(unittest.TestCase):
|
|||||||
dir(youtube_dl.compat))) - set(['unicode_literals'])
|
dir(youtube_dl.compat))) - set(['unicode_literals'])
|
||||||
self.assertEqual(all_names, sorted(present_names))
|
self.assertEqual(all_names, sorted(present_names))
|
||||||
|
|
||||||
|
def test_compat_urllib_parse_unquote(self):
|
||||||
|
self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def')
|
||||||
|
self.assertEqual(compat_urllib_parse_unquote('%7e/abc+def'), '~/abc+def')
|
||||||
|
self.assertEqual(compat_urllib_parse_unquote(''), '')
|
||||||
|
self.assertEqual(compat_urllib_parse_unquote('%'), '%')
|
||||||
|
self.assertEqual(compat_urllib_parse_unquote('%%'), '%%')
|
||||||
|
self.assertEqual(compat_urllib_parse_unquote('%%%'), '%%%')
|
||||||
|
self.assertEqual(compat_urllib_parse_unquote('%2F'), '/')
|
||||||
|
self.assertEqual(compat_urllib_parse_unquote('%2f'), '/')
|
||||||
|
self.assertEqual(compat_urllib_parse_unquote('%E6%B4%A5%E6%B3%A2'), '津波')
|
||||||
|
self.assertEqual(
|
||||||
|
compat_urllib_parse_unquote('''<meta property="og:description" content="%E2%96%81%E2%96%82%E2%96%83%E2%96%84%25%E2%96%85%E2%96%86%E2%96%87%E2%96%88" />
|
||||||
|
%<a href="https://ar.wikipedia.org/wiki/%D8%AA%D8%B3%D9%88%D9%86%D8%A7%D9%85%D9%8A">%a'''),
|
||||||
|
'''<meta property="og:description" content="▁▂▃▄%▅▆▇█" />
|
||||||
|
%<a href="https://ar.wikipedia.org/wiki/تسونامي">%a''')
|
||||||
|
self.assertEqual(
|
||||||
|
compat_urllib_parse_unquote('''%28%5E%E2%97%A3_%E2%97%A2%5E%29%E3%81%A3%EF%B8%BB%E3%83%87%E2%95%90%E4%B8%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%86%B6%I%Break%25Things%'''),
|
||||||
|
'''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%Things%''')
|
||||||
|
|
||||||
|
def test_compat_urllib_parse_unquote_plus(self):
|
||||||
|
self.assertEqual(compat_urllib_parse_unquote_plus('abc%20def'), 'abc def')
|
||||||
|
self.assertEqual(compat_urllib_parse_unquote_plus('%7e/abc+def'), '~/abc def')
|
||||||
|
|
||||||
|
def test_compat_urllib_parse_urlencode(self):
|
||||||
|
self.assertEqual(compat_urllib_parse_urlencode({'abc': 'def'}), 'abc=def')
|
||||||
|
self.assertEqual(compat_urllib_parse_urlencode({'abc': b'def'}), 'abc=def')
|
||||||
|
self.assertEqual(compat_urllib_parse_urlencode({b'abc': 'def'}), 'abc=def')
|
||||||
|
self.assertEqual(compat_urllib_parse_urlencode({b'abc': b'def'}), 'abc=def')
|
||||||
|
self.assertEqual(compat_urllib_parse_urlencode([('abc', 'def')]), 'abc=def')
|
||||||
|
self.assertEqual(compat_urllib_parse_urlencode([('abc', b'def')]), 'abc=def')
|
||||||
|
self.assertEqual(compat_urllib_parse_urlencode([(b'abc', 'def')]), 'abc=def')
|
||||||
|
self.assertEqual(compat_urllib_parse_urlencode([(b'abc', b'def')]), 'abc=def')
|
||||||
|
|
||||||
|
def test_compat_shlex_split(self):
|
||||||
|
self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two'])
|
||||||
|
|
||||||
|
def test_compat_etree_fromstring(self):
|
||||||
|
xml = '''
|
||||||
|
<root foo="bar" spam="中文">
|
||||||
|
<normal>foo</normal>
|
||||||
|
<chinese>中文</chinese>
|
||||||
|
<foo><bar>spam</bar></foo>
|
||||||
|
</root>
|
||||||
|
'''
|
||||||
|
doc = compat_etree_fromstring(xml.encode('utf-8'))
|
||||||
|
self.assertTrue(isinstance(doc.attrib['foo'], compat_str))
|
||||||
|
self.assertTrue(isinstance(doc.attrib['spam'], compat_str))
|
||||||
|
self.assertTrue(isinstance(doc.find('normal').text, compat_str))
|
||||||
|
self.assertTrue(isinstance(doc.find('chinese').text, compat_str))
|
||||||
|
self.assertTrue(isinstance(doc.find('foo/bar').text, compat_str))
|
||||||
|
|
||||||
|
def test_compat_etree_fromstring_doctype(self):
|
||||||
|
xml = '''<?xml version="1.0"?>
|
||||||
|
<!DOCTYPE smil PUBLIC "-//W3C//DTD SMIL 2.0//EN" "http://www.w3.org/2001/SMIL20/SMIL20.dtd">
|
||||||
|
<smil xmlns="http://www.w3.org/2001/SMIL20/Language"></smil>'''
|
||||||
|
compat_etree_fromstring(xml)
|
||||||
|
|
||||||
|
def test_struct_unpack(self):
|
||||||
|
self.assertEqual(compat_struct_unpack('!B', b'\x00'), (0,))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ def generator(test_case):
|
|||||||
|
|
||||||
params = get_params(test_case.get('params', {}))
|
params = get_params(test_case.get('params', {}))
|
||||||
if is_playlist and 'playlist' not in test_case:
|
if is_playlist and 'playlist' not in test_case:
|
||||||
params.setdefault('extract_flat', True)
|
params.setdefault('extract_flat', 'in_playlist')
|
||||||
params.setdefault('skip_download', True)
|
params.setdefault('skip_download', True)
|
||||||
|
|
||||||
ydl = YoutubeDL(params, auto_init=False)
|
ydl = YoutubeDL(params, auto_init=False)
|
||||||
@@ -136,7 +136,9 @@ def generator(test_case):
|
|||||||
# We're not using .download here sine that is just a shim
|
# We're not using .download here sine that is just a shim
|
||||||
# for outside error handling, and returns the exit code
|
# for outside error handling, and returns the exit code
|
||||||
# instead of the result dict.
|
# instead of the result dict.
|
||||||
res_dict = ydl.extract_info(test_case['url'])
|
res_dict = ydl.extract_info(
|
||||||
|
test_case['url'],
|
||||||
|
force_generic_extractor=params.get('force_generic_extractor', False))
|
||||||
except (DownloadError, ExtractorError) as err:
|
except (DownloadError, ExtractorError) as err:
|
||||||
# Check if the exception is not a network related one
|
# Check if the exception is not a network related one
|
||||||
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
|
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
@@ -52,7 +53,12 @@ class TestHTTP(unittest.TestCase):
|
|||||||
('localhost', 0), HTTPTestRequestHandler)
|
('localhost', 0), HTTPTestRequestHandler)
|
||||||
self.httpd.socket = ssl.wrap_socket(
|
self.httpd.socket = ssl.wrap_socket(
|
||||||
self.httpd.socket, certfile=certfn, server_side=True)
|
self.httpd.socket, certfile=certfn, server_side=True)
|
||||||
self.port = self.httpd.socket.getsockname()[1]
|
if os.name == 'java':
|
||||||
|
# In Jython SSLSocket is not a subclass of socket.socket
|
||||||
|
sock = self.httpd.socket.sock
|
||||||
|
else:
|
||||||
|
sock = self.httpd.socket
|
||||||
|
self.port = sock.getsockname()[1]
|
||||||
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
||||||
self.server_thread.daemon = True
|
self.server_thread.daemon = True
|
||||||
self.server_thread.start()
|
self.server_thread.start()
|
||||||
@@ -115,5 +121,14 @@ class TestProxy(unittest.TestCase):
|
|||||||
response = ydl.urlopen(req).read().decode('utf-8')
|
response = ydl.urlopen(req).read().decode('utf-8')
|
||||||
self.assertEqual(response, 'cn: {0}'.format(url))
|
self.assertEqual(response, 'cn: {0}'.format(url))
|
||||||
|
|
||||||
|
def test_proxy_with_idn(self):
|
||||||
|
ydl = YoutubeDL({
|
||||||
|
'proxy': 'localhost:{0}'.format(self.port),
|
||||||
|
})
|
||||||
|
url = 'http://中文.tw/'
|
||||||
|
response = ydl.urlopen(url).read().decode('utf-8')
|
||||||
|
# b'xn--fiq228c' is '中文'.encode('idna')
|
||||||
|
self.assertEqual(response, 'normal: http://xn--fiq228c.tw/')
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
47
test/test_iqiyi_sdk_interpreter.py
Normal file
47
test/test_iqiyi_sdk_interpreter.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from test.helper import FakeYDL
|
||||||
|
from youtube_dl.extractor import IqiyiIE
|
||||||
|
|
||||||
|
|
||||||
|
class IqiyiIEWithCredentials(IqiyiIE):
|
||||||
|
def _get_login_info(self):
|
||||||
|
return 'foo', 'bar'
|
||||||
|
|
||||||
|
|
||||||
|
class WarningLogger(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.messages = []
|
||||||
|
|
||||||
|
def warning(self, msg):
|
||||||
|
self.messages.append(msg)
|
||||||
|
|
||||||
|
def debug(self, msg):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def error(self, msg):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TestIqiyiSDKInterpreter(unittest.TestCase):
|
||||||
|
def test_iqiyi_sdk_interpreter(self):
|
||||||
|
'''
|
||||||
|
Test the functionality of IqiyiSDKInterpreter by trying to log in
|
||||||
|
|
||||||
|
If `sign` is incorrect, /validate call throws an HTTP 556 error
|
||||||
|
'''
|
||||||
|
logger = WarningLogger()
|
||||||
|
ie = IqiyiIEWithCredentials(FakeYDL({'logger': logger}))
|
||||||
|
ie._login()
|
||||||
|
self.assertTrue('unable to log in:' in logger.messages[0])
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
@@ -19,6 +19,9 @@ class TestJSInterpreter(unittest.TestCase):
|
|||||||
jsi = JSInterpreter('function x3(){return 42;}')
|
jsi = JSInterpreter('function x3(){return 42;}')
|
||||||
self.assertEqual(jsi.call_function('x3'), 42)
|
self.assertEqual(jsi.call_function('x3'), 42)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('var x5 = function(){return 42;}')
|
||||||
|
self.assertEqual(jsi.call_function('x5'), 42)
|
||||||
|
|
||||||
def test_calc(self):
|
def test_calc(self):
|
||||||
jsi = JSInterpreter('function x4(a){return 2*a+1;}')
|
jsi = JSInterpreter('function x4(a){return 2*a+1;}')
|
||||||
self.assertEqual(jsi.call_function('x4', 3), 7)
|
self.assertEqual(jsi.call_function('x4', 3), 7)
|
||||||
|
|||||||
118
test/test_socks.py
Normal file
118
test/test_socks.py
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
import random
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from test.helper import (
|
||||||
|
FakeYDL,
|
||||||
|
get_params,
|
||||||
|
)
|
||||||
|
from youtube_dl.compat import (
|
||||||
|
compat_str,
|
||||||
|
compat_urllib_request,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestMultipleSocks(unittest.TestCase):
|
||||||
|
@staticmethod
|
||||||
|
def _check_params(attrs):
|
||||||
|
params = get_params()
|
||||||
|
for attr in attrs:
|
||||||
|
if attr not in params:
|
||||||
|
print('Missing %s. Skipping.' % attr)
|
||||||
|
return
|
||||||
|
return params
|
||||||
|
|
||||||
|
def test_proxy_http(self):
|
||||||
|
params = self._check_params(['primary_proxy', 'primary_server_ip'])
|
||||||
|
if params is None:
|
||||||
|
return
|
||||||
|
ydl = FakeYDL({
|
||||||
|
'proxy': params['primary_proxy']
|
||||||
|
})
|
||||||
|
self.assertEqual(
|
||||||
|
ydl.urlopen('http://yt-dl.org/ip').read().decode('utf-8'),
|
||||||
|
params['primary_server_ip'])
|
||||||
|
|
||||||
|
def test_proxy_https(self):
|
||||||
|
params = self._check_params(['primary_proxy', 'primary_server_ip'])
|
||||||
|
if params is None:
|
||||||
|
return
|
||||||
|
ydl = FakeYDL({
|
||||||
|
'proxy': params['primary_proxy']
|
||||||
|
})
|
||||||
|
self.assertEqual(
|
||||||
|
ydl.urlopen('https://yt-dl.org/ip').read().decode('utf-8'),
|
||||||
|
params['primary_server_ip'])
|
||||||
|
|
||||||
|
def test_secondary_proxy_http(self):
|
||||||
|
params = self._check_params(['secondary_proxy', 'secondary_server_ip'])
|
||||||
|
if params is None:
|
||||||
|
return
|
||||||
|
ydl = FakeYDL()
|
||||||
|
req = compat_urllib_request.Request('http://yt-dl.org/ip')
|
||||||
|
req.add_header('Ytdl-request-proxy', params['secondary_proxy'])
|
||||||
|
self.assertEqual(
|
||||||
|
ydl.urlopen(req).read().decode('utf-8'),
|
||||||
|
params['secondary_server_ip'])
|
||||||
|
|
||||||
|
def test_secondary_proxy_https(self):
|
||||||
|
params = self._check_params(['secondary_proxy', 'secondary_server_ip'])
|
||||||
|
if params is None:
|
||||||
|
return
|
||||||
|
ydl = FakeYDL()
|
||||||
|
req = compat_urllib_request.Request('https://yt-dl.org/ip')
|
||||||
|
req.add_header('Ytdl-request-proxy', params['secondary_proxy'])
|
||||||
|
self.assertEqual(
|
||||||
|
ydl.urlopen(req).read().decode('utf-8'),
|
||||||
|
params['secondary_server_ip'])
|
||||||
|
|
||||||
|
|
||||||
|
class TestSocks(unittest.TestCase):
|
||||||
|
_SKIP_SOCKS_TEST = True
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
if self._SKIP_SOCKS_TEST:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.port = random.randint(20000, 30000)
|
||||||
|
self.server_process = subprocess.Popen([
|
||||||
|
'srelay', '-f', '-i', '127.0.0.1:%d' % self.port],
|
||||||
|
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
if self._SKIP_SOCKS_TEST:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.server_process.terminate()
|
||||||
|
self.server_process.communicate()
|
||||||
|
|
||||||
|
def _get_ip(self, protocol):
|
||||||
|
if self._SKIP_SOCKS_TEST:
|
||||||
|
return '127.0.0.1'
|
||||||
|
|
||||||
|
ydl = FakeYDL({
|
||||||
|
'proxy': '%s://127.0.0.1:%d' % (protocol, self.port),
|
||||||
|
})
|
||||||
|
return ydl.urlopen('http://yt-dl.org/ip').read().decode('utf-8')
|
||||||
|
|
||||||
|
def test_socks4(self):
|
||||||
|
self.assertTrue(isinstance(self._get_ip('socks4'), compat_str))
|
||||||
|
|
||||||
|
def test_socks4a(self):
|
||||||
|
self.assertTrue(isinstance(self._get_ip('socks4a'), compat_str))
|
||||||
|
|
||||||
|
def test_socks5(self):
|
||||||
|
self.assertTrue(isinstance(self._get_ip('socks5'), compat_str))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
@@ -11,7 +11,6 @@ from test.helper import FakeYDL, md5
|
|||||||
|
|
||||||
|
|
||||||
from youtube_dl.extractor import (
|
from youtube_dl.extractor import (
|
||||||
BlipTVIE,
|
|
||||||
YoutubeIE,
|
YoutubeIE,
|
||||||
DailymotionIE,
|
DailymotionIE,
|
||||||
TEDIE,
|
TEDIE,
|
||||||
@@ -22,11 +21,13 @@ from youtube_dl.extractor import (
|
|||||||
NPOIE,
|
NPOIE,
|
||||||
ComedyCentralIE,
|
ComedyCentralIE,
|
||||||
NRKTVIE,
|
NRKTVIE,
|
||||||
RaiIE,
|
RaiTVIE,
|
||||||
VikiIE,
|
VikiIE,
|
||||||
ThePlatformIE,
|
ThePlatformIE,
|
||||||
|
ThePlatformFeedIE,
|
||||||
RTVEALaCartaIE,
|
RTVEALaCartaIE,
|
||||||
FunnyOrDieIE,
|
FunnyOrDieIE,
|
||||||
|
DemocracynowIE,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -64,16 +65,16 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
|
|||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(len(subtitles.keys()), 13)
|
self.assertEqual(len(subtitles.keys()), 13)
|
||||||
self.assertEqual(md5(subtitles['en']), '4cd9278a35ba2305f47354ee13472260')
|
self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
|
||||||
self.assertEqual(md5(subtitles['it']), '164a51f16f260476a05b50fe4c2f161d')
|
self.assertEqual(md5(subtitles['it']), '6d752b98c31f1cf8d597050c7a2cb4b5')
|
||||||
for lang in ['it', 'fr', 'de']:
|
for lang in ['fr', 'de']:
|
||||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||||
|
|
||||||
def test_youtube_subtitles_sbv_format(self):
|
def test_youtube_subtitles_ttml_format(self):
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['subtitlesformat'] = 'sbv'
|
self.DL.params['subtitlesformat'] = 'ttml'
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(md5(subtitles['en']), '13aeaa0c245a8bed9a451cb643e3ad8b')
|
self.assertEqual(md5(subtitles['en']), 'e306f8c42842f723447d9f63ad65df54')
|
||||||
|
|
||||||
def test_youtube_subtitles_vtt_format(self):
|
def test_youtube_subtitles_vtt_format(self):
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
@@ -143,18 +144,6 @@ class TestTedSubtitles(BaseTestSubtitles):
|
|||||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||||
|
|
||||||
|
|
||||||
class TestBlipTVSubtitles(BaseTestSubtitles):
|
|
||||||
url = 'http://blip.tv/a/a-6603250'
|
|
||||||
IE = BlipTVIE
|
|
||||||
|
|
||||||
def test_allsubtitles(self):
|
|
||||||
self.DL.params['writesubtitles'] = True
|
|
||||||
self.DL.params['allsubtitles'] = True
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
|
||||||
self.assertEqual(md5(subtitles['en']), '5b75c300af65fe4476dff79478bb93e4')
|
|
||||||
|
|
||||||
|
|
||||||
class TestVimeoSubtitles(BaseTestSubtitles):
|
class TestVimeoSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://vimeo.com/76979871'
|
url = 'http://vimeo.com/76979871'
|
||||||
IE = VimeoIE
|
IE = VimeoIE
|
||||||
@@ -271,7 +260,7 @@ class TestNRKSubtitles(BaseTestSubtitles):
|
|||||||
|
|
||||||
class TestRaiSubtitles(BaseTestSubtitles):
|
class TestRaiSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html'
|
url = 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html'
|
||||||
IE = RaiIE
|
IE = RaiTVIE
|
||||||
|
|
||||||
def test_allsubtitles(self):
|
def test_allsubtitles(self):
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
@@ -307,6 +296,18 @@ class TestThePlatformSubtitles(BaseTestSubtitles):
|
|||||||
self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
|
self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
|
||||||
|
|
||||||
|
|
||||||
|
class TestThePlatformFeedSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207'
|
||||||
|
IE = ThePlatformFeedIE
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||||
|
self.assertEqual(md5(subtitles['en']), '48649a22e82b2da21c9a67a395eedade')
|
||||||
|
|
||||||
|
|
||||||
class TestRtveSubtitles(BaseTestSubtitles):
|
class TestRtveSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://www.rtve.es/alacarta/videos/los-misterios-de-laura/misterios-laura-capitulo-32-misterio-del-numero-17-2-parte/2428621/'
|
url = 'http://www.rtve.es/alacarta/videos/los-misterios-de-laura/misterios-laura-capitulo-32-misterio-del-numero-17-2-parte/2428621/'
|
||||||
IE = RTVEALaCartaIE
|
IE = RTVEALaCartaIE
|
||||||
@@ -333,5 +334,25 @@ class TestFunnyOrDieSubtitles(BaseTestSubtitles):
|
|||||||
self.assertEqual(md5(subtitles['en']), 'c5593c193eacd353596c11c2d4f9ecc4')
|
self.assertEqual(md5(subtitles['en']), 'c5593c193eacd353596c11c2d4f9ecc4')
|
||||||
|
|
||||||
|
|
||||||
|
class TestDemocracynowSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://www.democracynow.org/shows/2015/7/3'
|
||||||
|
IE = DemocracynowIE
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||||
|
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
|
||||||
|
|
||||||
|
def test_subtitles_in_page(self):
|
||||||
|
self.url = 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree'
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||||
|
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
30
test/test_update.py
Normal file
30
test/test_update.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
import json
|
||||||
|
from youtube_dl.update import rsa_verify
|
||||||
|
|
||||||
|
|
||||||
|
class TestUpdate(unittest.TestCase):
|
||||||
|
def test_rsa_verify(self):
|
||||||
|
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
|
||||||
|
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'versions.json'), 'rb') as f:
|
||||||
|
versions_info = f.read().decode()
|
||||||
|
versions_info = json.loads(versions_info)
|
||||||
|
signature = versions_info['signature']
|
||||||
|
del versions_info['signature']
|
||||||
|
self.assertTrue(rsa_verify(
|
||||||
|
json.dumps(versions_info, sort_keys=True).encode('utf-8'),
|
||||||
|
signature, UPDATES_RSA_KEY))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
@@ -18,12 +18,18 @@ import xml.etree.ElementTree
|
|||||||
from youtube_dl.utils import (
|
from youtube_dl.utils import (
|
||||||
age_restricted,
|
age_restricted,
|
||||||
args_to_str,
|
args_to_str,
|
||||||
|
encode_base_n,
|
||||||
clean_html,
|
clean_html,
|
||||||
|
date_from_str,
|
||||||
DateRange,
|
DateRange,
|
||||||
detect_exe_version,
|
detect_exe_version,
|
||||||
|
determine_ext,
|
||||||
|
dict_get,
|
||||||
|
encode_compat_str,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
escape_rfc3986,
|
escape_rfc3986,
|
||||||
escape_url,
|
escape_url,
|
||||||
|
extract_attributes,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
fix_xml_ampersands,
|
fix_xml_ampersands,
|
||||||
@@ -32,21 +38,25 @@ from youtube_dl.utils import (
|
|||||||
is_html,
|
is_html,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
limit_length,
|
limit_length,
|
||||||
|
ohdave_rsa_encrypt,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_filesize,
|
parse_filesize,
|
||||||
|
parse_count,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
read_batch_urls,
|
read_batch_urls,
|
||||||
sanitize_filename,
|
sanitize_filename,
|
||||||
sanitize_path,
|
sanitize_path,
|
||||||
prepend_extension,
|
prepend_extension,
|
||||||
replace_extension,
|
replace_extension,
|
||||||
|
remove_start,
|
||||||
|
remove_end,
|
||||||
|
remove_quotes,
|
||||||
shell_quote,
|
shell_quote,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
strip_jsonp,
|
strip_jsonp,
|
||||||
struct_unpack,
|
|
||||||
timeconvert,
|
timeconvert,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
@@ -55,13 +65,25 @@ from youtube_dl.utils import (
|
|||||||
lowercase_escape,
|
lowercase_escape,
|
||||||
url_basename,
|
url_basename,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
|
update_url_query,
|
||||||
version_tuple,
|
version_tuple,
|
||||||
xpath_with_ns,
|
xpath_with_ns,
|
||||||
|
xpath_element,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
|
xpath_attr,
|
||||||
render_table,
|
render_table,
|
||||||
match_str,
|
match_str,
|
||||||
parse_dfxp_time_expr,
|
parse_dfxp_time_expr,
|
||||||
dfxp2srt,
|
dfxp2srt,
|
||||||
|
cli_option,
|
||||||
|
cli_valueless_option,
|
||||||
|
cli_bool_option,
|
||||||
|
)
|
||||||
|
from youtube_dl.compat import (
|
||||||
|
compat_chr,
|
||||||
|
compat_etree_fromstring,
|
||||||
|
compat_urlparse,
|
||||||
|
compat_parse_qs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -118,8 +140,8 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
|
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
|
||||||
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
|
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
|
||||||
|
|
||||||
tests = 'a\xe4b\u4e2d\u56fd\u7684c'
|
tests = 'aäb\u4e2d\u56fd\u7684c'
|
||||||
self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
|
self.assertEqual(sanitize_filename(tests, restricted=True), 'aab_c')
|
||||||
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
|
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
|
||||||
|
|
||||||
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
|
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
|
||||||
@@ -134,6 +156,10 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertTrue(sanitize_filename('-', restricted=True) != '')
|
self.assertTrue(sanitize_filename('-', restricted=True) != '')
|
||||||
self.assertTrue(sanitize_filename(':', restricted=True) != '')
|
self.assertTrue(sanitize_filename(':', restricted=True) != '')
|
||||||
|
|
||||||
|
self.assertEqual(sanitize_filename(
|
||||||
|
'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØŒÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøœùúûüýþÿ', restricted=True),
|
||||||
|
'AAAAAAAECEEEEIIIIDNOOOOOOOEUUUUYPssaaaaaaaeceeeeiiiionoooooooeuuuuypy')
|
||||||
|
|
||||||
def test_sanitize_ids(self):
|
def test_sanitize_ids(self):
|
||||||
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
|
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
|
||||||
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
|
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
|
||||||
@@ -191,6 +217,25 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
|
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
|
||||||
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
|
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
|
||||||
|
|
||||||
|
def test_remove_start(self):
|
||||||
|
self.assertEqual(remove_start(None, 'A - '), None)
|
||||||
|
self.assertEqual(remove_start('A - B', 'A - '), 'B')
|
||||||
|
self.assertEqual(remove_start('B - A', 'A - '), 'B - A')
|
||||||
|
|
||||||
|
def test_remove_end(self):
|
||||||
|
self.assertEqual(remove_end(None, ' - B'), None)
|
||||||
|
self.assertEqual(remove_end('A - B', ' - B'), 'A')
|
||||||
|
self.assertEqual(remove_end('B - A', ' - B'), 'B - A')
|
||||||
|
|
||||||
|
def test_remove_quotes(self):
|
||||||
|
self.assertEqual(remove_quotes(None), None)
|
||||||
|
self.assertEqual(remove_quotes('"'), '"')
|
||||||
|
self.assertEqual(remove_quotes("'"), "'")
|
||||||
|
self.assertEqual(remove_quotes(';'), ';')
|
||||||
|
self.assertEqual(remove_quotes('";'), '";')
|
||||||
|
self.assertEqual(remove_quotes('""'), '')
|
||||||
|
self.assertEqual(remove_quotes('";"'), ';')
|
||||||
|
|
||||||
def test_ordered_set(self):
|
def test_ordered_set(self):
|
||||||
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
|
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
|
||||||
self.assertEqual(orderedSet([]), [])
|
self.assertEqual(orderedSet([]), [])
|
||||||
@@ -202,8 +247,15 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(unescapeHTML('%20;'), '%20;')
|
self.assertEqual(unescapeHTML('%20;'), '%20;')
|
||||||
self.assertEqual(unescapeHTML('/'), '/')
|
self.assertEqual(unescapeHTML('/'), '/')
|
||||||
self.assertEqual(unescapeHTML('/'), '/')
|
self.assertEqual(unescapeHTML('/'), '/')
|
||||||
self.assertEqual(
|
self.assertEqual(unescapeHTML('é'), 'é')
|
||||||
unescapeHTML('é'), 'é')
|
self.assertEqual(unescapeHTML('�'), '�')
|
||||||
|
|
||||||
|
def test_date_from_str(self):
|
||||||
|
self.assertEqual(date_from_str('yesterday'), date_from_str('now-1day'))
|
||||||
|
self.assertEqual(date_from_str('now+7day'), date_from_str('now+1week'))
|
||||||
|
self.assertEqual(date_from_str('now+14day'), date_from_str('now+2week'))
|
||||||
|
self.assertEqual(date_from_str('now+365day'), date_from_str('now+1year'))
|
||||||
|
self.assertEqual(date_from_str('now+30day'), date_from_str('now+1month'))
|
||||||
|
|
||||||
def test_daterange(self):
|
def test_daterange(self):
|
||||||
_20century = DateRange("19000101", "20000101")
|
_20century = DateRange("19000101", "20000101")
|
||||||
@@ -227,7 +279,16 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
|
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
|
||||||
'20150202')
|
'20150202')
|
||||||
|
self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214')
|
||||||
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
|
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
|
||||||
|
self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
|
||||||
|
|
||||||
|
def test_determine_ext(self):
|
||||||
|
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
|
||||||
|
self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None)
|
||||||
|
self.assertEqual(determine_ext('http://example.com/foo/bar.nonext/?download', None), None)
|
||||||
|
self.assertEqual(determine_ext('http://example.com/foo/bar/mp4?download', None), None)
|
||||||
|
self.assertEqual(determine_ext('http://example.com/foo/bar.m3u8//?download'), 'm3u8')
|
||||||
|
|
||||||
def test_find_xpath_attr(self):
|
def test_find_xpath_attr(self):
|
||||||
testxml = '''<root>
|
testxml = '''<root>
|
||||||
@@ -235,12 +296,21 @@ class TestUtil(unittest.TestCase):
|
|||||||
<node x="a"/>
|
<node x="a"/>
|
||||||
<node x="a" y="c" />
|
<node x="a" y="c" />
|
||||||
<node x="b" y="d" />
|
<node x="b" y="d" />
|
||||||
|
<node x="" />
|
||||||
</root>'''
|
</root>'''
|
||||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
doc = compat_etree_fromstring(testxml)
|
||||||
|
|
||||||
|
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
|
||||||
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
|
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
|
||||||
|
self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None)
|
||||||
|
self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None)
|
||||||
|
self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1])
|
||||||
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
|
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
|
||||||
|
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3])
|
||||||
|
self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2])
|
||||||
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
|
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
|
||||||
|
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3])
|
||||||
|
self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4])
|
||||||
|
|
||||||
def test_xpath_with_ns(self):
|
def test_xpath_with_ns(self):
|
||||||
testxml = '''<root xmlns:media="http://example.com/">
|
testxml = '''<root xmlns:media="http://example.com/">
|
||||||
@@ -249,23 +319,56 @@ class TestUtil(unittest.TestCase):
|
|||||||
<url>http://server.com/download.mp3</url>
|
<url>http://server.com/download.mp3</url>
|
||||||
</media:song>
|
</media:song>
|
||||||
</root>'''
|
</root>'''
|
||||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
doc = compat_etree_fromstring(testxml)
|
||||||
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
|
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
|
||||||
self.assertTrue(find('media:song') is not None)
|
self.assertTrue(find('media:song') is not None)
|
||||||
self.assertEqual(find('media:song/media:author').text, 'The Author')
|
self.assertEqual(find('media:song/media:author').text, 'The Author')
|
||||||
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
|
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
|
||||||
|
|
||||||
|
def test_xpath_element(self):
|
||||||
|
doc = xml.etree.ElementTree.Element('root')
|
||||||
|
div = xml.etree.ElementTree.SubElement(doc, 'div')
|
||||||
|
p = xml.etree.ElementTree.SubElement(div, 'p')
|
||||||
|
p.text = 'Foo'
|
||||||
|
self.assertEqual(xpath_element(doc, 'div/p'), p)
|
||||||
|
self.assertEqual(xpath_element(doc, ['div/p']), p)
|
||||||
|
self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p)
|
||||||
|
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
|
||||||
|
self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default')
|
||||||
|
self.assertTrue(xpath_element(doc, 'div/bar') is None)
|
||||||
|
self.assertTrue(xpath_element(doc, ['div/bar']) is None)
|
||||||
|
self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None)
|
||||||
|
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
|
||||||
|
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True)
|
||||||
|
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True)
|
||||||
|
|
||||||
def test_xpath_text(self):
|
def test_xpath_text(self):
|
||||||
testxml = '''<root>
|
testxml = '''<root>
|
||||||
<div>
|
<div>
|
||||||
<p>Foo</p>
|
<p>Foo</p>
|
||||||
</div>
|
</div>
|
||||||
</root>'''
|
</root>'''
|
||||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
doc = compat_etree_fromstring(testxml)
|
||||||
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
|
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
|
||||||
|
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
|
||||||
self.assertTrue(xpath_text(doc, 'div/bar') is None)
|
self.assertTrue(xpath_text(doc, 'div/bar') is None)
|
||||||
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
|
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
|
||||||
|
|
||||||
|
def test_xpath_attr(self):
|
||||||
|
testxml = '''<root>
|
||||||
|
<div>
|
||||||
|
<p x="a">Foo</p>
|
||||||
|
</div>
|
||||||
|
</root>'''
|
||||||
|
doc = compat_etree_fromstring(testxml)
|
||||||
|
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
|
||||||
|
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
|
||||||
|
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
|
||||||
|
self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default')
|
||||||
|
self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default')
|
||||||
|
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True)
|
||||||
|
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
|
||||||
|
|
||||||
def test_smuggle_url(self):
|
def test_smuggle_url(self):
|
||||||
data = {"ö": "ö", "abc": [3]}
|
data = {"ö": "ö", "abc": [3]}
|
||||||
url = 'https://foo.bar/baz?x=y#a'
|
url = 'https://foo.bar/baz?x=y#a'
|
||||||
@@ -324,6 +427,8 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(parse_duration('02:03:04'), 7384)
|
self.assertEqual(parse_duration('02:03:04'), 7384)
|
||||||
self.assertEqual(parse_duration('01:02:03:04'), 93784)
|
self.assertEqual(parse_duration('01:02:03:04'), 93784)
|
||||||
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
|
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
|
||||||
|
self.assertEqual(parse_duration('87 Min.'), 5220)
|
||||||
|
self.assertEqual(parse_duration('PT1H0.040S'), 3600.04)
|
||||||
|
|
||||||
def test_fix_xml_ampersands(self):
|
def test_fix_xml_ampersands(self):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
@@ -363,9 +468,6 @@ class TestUtil(unittest.TestCase):
|
|||||||
testPL(5, 2, (2, 99), [2, 3, 4])
|
testPL(5, 2, (2, 99), [2, 3, 4])
|
||||||
testPL(5, 2, (20, 99), [])
|
testPL(5, 2, (20, 99), [])
|
||||||
|
|
||||||
def test_struct_unpack(self):
|
|
||||||
self.assertEqual(struct_unpack('!B', b'\x00'), (0,))
|
|
||||||
|
|
||||||
def test_read_batch_urls(self):
|
def test_read_batch_urls(self):
|
||||||
f = io.StringIO('''\xef\xbb\xbf foo
|
f = io.StringIO('''\xef\xbb\xbf foo
|
||||||
bar\r
|
bar\r
|
||||||
@@ -379,11 +481,73 @@ class TestUtil(unittest.TestCase):
|
|||||||
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
|
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
|
||||||
self.assertTrue(isinstance(data, bytes))
|
self.assertTrue(isinstance(data, bytes))
|
||||||
|
|
||||||
|
def test_update_url_query(self):
|
||||||
|
def query_dict(url):
|
||||||
|
return compat_parse_qs(compat_urlparse.urlparse(url).query)
|
||||||
|
self.assertEqual(query_dict(update_url_query(
|
||||||
|
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
|
||||||
|
query_dict('http://example.com/path?quality=HD&format=mp4'))
|
||||||
|
self.assertEqual(query_dict(update_url_query(
|
||||||
|
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
|
||||||
|
query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
|
||||||
|
self.assertEqual(query_dict(update_url_query(
|
||||||
|
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
|
||||||
|
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
||||||
|
self.assertEqual(query_dict(update_url_query(
|
||||||
|
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
|
||||||
|
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
|
||||||
|
self.assertEqual(query_dict(update_url_query(
|
||||||
|
'http://example.com/path?manifest=f4m', {'manifest': []})),
|
||||||
|
query_dict('http://example.com/path'))
|
||||||
|
self.assertEqual(query_dict(update_url_query(
|
||||||
|
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
|
||||||
|
query_dict('http://example.com/path?system=LINUX'))
|
||||||
|
self.assertEqual(query_dict(update_url_query(
|
||||||
|
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
|
||||||
|
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
||||||
|
self.assertEqual(query_dict(update_url_query(
|
||||||
|
'http://example.com/path', {'width': 1080, 'height': 720})),
|
||||||
|
query_dict('http://example.com/path?width=1080&height=720'))
|
||||||
|
self.assertEqual(query_dict(update_url_query(
|
||||||
|
'http://example.com/path', {'bitrate': 5020.43})),
|
||||||
|
query_dict('http://example.com/path?bitrate=5020.43'))
|
||||||
|
self.assertEqual(query_dict(update_url_query(
|
||||||
|
'http://example.com/path', {'test': '第二行тест'})),
|
||||||
|
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
|
||||||
|
|
||||||
|
def test_dict_get(self):
|
||||||
|
FALSE_VALUES = {
|
||||||
|
'none': None,
|
||||||
|
'false': False,
|
||||||
|
'zero': 0,
|
||||||
|
'empty_string': '',
|
||||||
|
'empty_list': [],
|
||||||
|
}
|
||||||
|
d = FALSE_VALUES.copy()
|
||||||
|
d['a'] = 42
|
||||||
|
self.assertEqual(dict_get(d, 'a'), 42)
|
||||||
|
self.assertEqual(dict_get(d, 'b'), None)
|
||||||
|
self.assertEqual(dict_get(d, 'b', 42), 42)
|
||||||
|
self.assertEqual(dict_get(d, ('a', )), 42)
|
||||||
|
self.assertEqual(dict_get(d, ('b', 'a', )), 42)
|
||||||
|
self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42)
|
||||||
|
self.assertEqual(dict_get(d, ('b', 'c', )), None)
|
||||||
|
self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42)
|
||||||
|
for key, false_value in FALSE_VALUES.items():
|
||||||
|
self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
|
||||||
|
self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
|
||||||
|
|
||||||
|
def test_encode_compat_str(self):
|
||||||
|
self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест')
|
||||||
|
self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест')
|
||||||
|
|
||||||
def test_parse_iso8601(self):
|
def test_parse_iso8601(self):
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
|
||||||
|
self.assertEqual(parse_iso8601('2015-09-29T08:27:31.727'), 1443515251)
|
||||||
|
self.assertEqual(parse_iso8601('2015-09-29T08-27-31.727'), None)
|
||||||
|
|
||||||
def test_strip_jsonp(self):
|
def test_strip_jsonp(self):
|
||||||
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
|
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
|
||||||
@@ -394,6 +558,10 @@ class TestUtil(unittest.TestCase):
|
|||||||
d = json.loads(stripped)
|
d = json.loads(stripped)
|
||||||
self.assertEqual(d, {'STATUS': 'OK'})
|
self.assertEqual(d, {'STATUS': 'OK'})
|
||||||
|
|
||||||
|
stripped = strip_jsonp('ps.embedHandler({"status": "success"});')
|
||||||
|
d = json.loads(stripped)
|
||||||
|
self.assertEqual(d, {'status': 'success'})
|
||||||
|
|
||||||
def test_uppercase_escape(self):
|
def test_uppercase_escape(self):
|
||||||
self.assertEqual(uppercase_escape('aä'), 'aä')
|
self.assertEqual(uppercase_escape('aä'), 'aä')
|
||||||
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
|
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
|
||||||
@@ -430,11 +598,11 @@ class TestUtil(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
escape_url('http://тест.рф/фрагмент'),
|
escape_url('http://тест.рф/фрагмент'),
|
||||||
'http://тест.рф/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
|
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
escape_url('http://тест.рф/абв?абв=абв#абв'),
|
escape_url('http://тест.рф/абв?абв=абв#абв'),
|
||||||
'http://тест.рф/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
|
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
|
||||||
)
|
)
|
||||||
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
||||||
|
|
||||||
@@ -454,10 +622,22 @@ class TestUtil(unittest.TestCase):
|
|||||||
"playlist":[{"controls":{"all":null}}]
|
"playlist":[{"controls":{"all":null}}]
|
||||||
}''')
|
}''')
|
||||||
|
|
||||||
|
inp = '''"The CW\\'s \\'Crazy Ex-Girlfriend\\'"'''
|
||||||
|
self.assertEqual(js_to_json(inp), '''"The CW's 'Crazy Ex-Girlfriend'"''')
|
||||||
|
|
||||||
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
|
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
|
||||||
json_code = js_to_json(inp)
|
json_code = js_to_json(inp)
|
||||||
self.assertEqual(json.loads(json_code), json.loads(inp))
|
self.assertEqual(json.loads(json_code), json.loads(inp))
|
||||||
|
|
||||||
|
inp = '''{
|
||||||
|
0:{src:'skipped', type: 'application/dash+xml'},
|
||||||
|
1:{src:'skipped', type: 'application/vnd.apple.mpegURL'},
|
||||||
|
}'''
|
||||||
|
self.assertEqual(js_to_json(inp), '''{
|
||||||
|
"0":{"src":"skipped", "type": "application/dash+xml"},
|
||||||
|
"1":{"src":"skipped", "type": "application/vnd.apple.mpegURL"}
|
||||||
|
}''')
|
||||||
|
|
||||||
def test_js_to_json_edgecases(self):
|
def test_js_to_json_edgecases(self):
|
||||||
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
|
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
|
||||||
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
|
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
|
||||||
@@ -481,6 +661,65 @@ class TestUtil(unittest.TestCase):
|
|||||||
on = js_to_json('{"abc": "def",}')
|
on = js_to_json('{"abc": "def",}')
|
||||||
self.assertEqual(json.loads(on), {'abc': 'def'})
|
self.assertEqual(json.loads(on), {'abc': 'def'})
|
||||||
|
|
||||||
|
on = js_to_json('{ 0: /* " \n */ ",]" , }')
|
||||||
|
self.assertEqual(json.loads(on), {'0': ',]'})
|
||||||
|
|
||||||
|
on = js_to_json(r'["<p>x<\/p>"]')
|
||||||
|
self.assertEqual(json.loads(on), ['<p>x</p>'])
|
||||||
|
|
||||||
|
on = js_to_json(r'["\xaa"]')
|
||||||
|
self.assertEqual(json.loads(on), ['\u00aa'])
|
||||||
|
|
||||||
|
on = js_to_json("['a\\\nb']")
|
||||||
|
self.assertEqual(json.loads(on), ['ab'])
|
||||||
|
|
||||||
|
on = js_to_json('{0xff:0xff}')
|
||||||
|
self.assertEqual(json.loads(on), {'255': 255})
|
||||||
|
|
||||||
|
on = js_to_json('{077:077}')
|
||||||
|
self.assertEqual(json.loads(on), {'63': 63})
|
||||||
|
|
||||||
|
on = js_to_json('{42:42}')
|
||||||
|
self.assertEqual(json.loads(on), {'42': 42})
|
||||||
|
|
||||||
|
def test_extract_attributes(self):
|
||||||
|
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
||||||
|
self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
|
||||||
|
self.assertEqual(extract_attributes('<e x=y>'), {'x': 'y'})
|
||||||
|
self.assertEqual(extract_attributes('<e x="a \'b\' c">'), {'x': "a 'b' c"})
|
||||||
|
self.assertEqual(extract_attributes('<e x=\'a "b" c\'>'), {'x': 'a "b" c'})
|
||||||
|
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
||||||
|
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
||||||
|
self.assertEqual(extract_attributes('<e x="&">'), {'x': '&'}) # XML
|
||||||
|
self.assertEqual(extract_attributes('<e x=""">'), {'x': '"'})
|
||||||
|
self.assertEqual(extract_attributes('<e x="£">'), {'x': '£'}) # HTML 3.2
|
||||||
|
self.assertEqual(extract_attributes('<e x="λ">'), {'x': 'λ'}) # HTML 4.0
|
||||||
|
self.assertEqual(extract_attributes('<e x="&foo">'), {'x': '&foo'})
|
||||||
|
self.assertEqual(extract_attributes('<e x="\'">'), {'x': "'"})
|
||||||
|
self.assertEqual(extract_attributes('<e x=\'"\'>'), {'x': '"'})
|
||||||
|
self.assertEqual(extract_attributes('<e x >'), {'x': None})
|
||||||
|
self.assertEqual(extract_attributes('<e x=y a>'), {'x': 'y', 'a': None})
|
||||||
|
self.assertEqual(extract_attributes('<e x= y>'), {'x': 'y'})
|
||||||
|
self.assertEqual(extract_attributes('<e x=1 y=2 x=3>'), {'y': '2', 'x': '3'})
|
||||||
|
self.assertEqual(extract_attributes('<e \nx=\ny\n>'), {'x': 'y'})
|
||||||
|
self.assertEqual(extract_attributes('<e \nx=\n"y"\n>'), {'x': 'y'})
|
||||||
|
self.assertEqual(extract_attributes("<e \nx=\n'y'\n>"), {'x': 'y'})
|
||||||
|
self.assertEqual(extract_attributes('<e \nx="\ny\n">'), {'x': '\ny\n'})
|
||||||
|
self.assertEqual(extract_attributes('<e CAPS=x>'), {'caps': 'x'}) # Names lowercased
|
||||||
|
self.assertEqual(extract_attributes('<e x=1 X=2>'), {'x': '2'})
|
||||||
|
self.assertEqual(extract_attributes('<e X=1 x=2>'), {'x': '2'})
|
||||||
|
self.assertEqual(extract_attributes('<e _:funny-name1=1>'), {'_:funny-name1': '1'})
|
||||||
|
self.assertEqual(extract_attributes('<e x="Fáilte 世界 \U0001f600">'), {'x': 'Fáilte 世界 \U0001f600'})
|
||||||
|
self.assertEqual(extract_attributes('<e x="décomposé">'), {'x': 'décompose\u0301'})
|
||||||
|
# "Narrow" Python builds don't support unicode code points outside BMP.
|
||||||
|
try:
|
||||||
|
compat_chr(0x10000)
|
||||||
|
supports_outside_bmp = True
|
||||||
|
except ValueError:
|
||||||
|
supports_outside_bmp = False
|
||||||
|
if supports_outside_bmp:
|
||||||
|
self.assertEqual(extract_attributes('<e x="Smile 😀!">'), {'x': 'Smile \U0001f600!'})
|
||||||
|
|
||||||
def test_clean_html(self):
|
def test_clean_html(self):
|
||||||
self.assertEqual(clean_html('a:\nb'), 'a: b')
|
self.assertEqual(clean_html('a:\nb'), 'a: b')
|
||||||
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
|
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
|
||||||
@@ -506,6 +745,17 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
|
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
|
||||||
self.assertEqual(parse_filesize('1,24 KB'), 1240)
|
self.assertEqual(parse_filesize('1,24 KB'), 1240)
|
||||||
|
|
||||||
|
def test_parse_count(self):
|
||||||
|
self.assertEqual(parse_count(None), None)
|
||||||
|
self.assertEqual(parse_count(''), None)
|
||||||
|
self.assertEqual(parse_count('0'), 0)
|
||||||
|
self.assertEqual(parse_count('1000'), 1000)
|
||||||
|
self.assertEqual(parse_count('1.000'), 1000)
|
||||||
|
self.assertEqual(parse_count('1.1k'), 1100)
|
||||||
|
self.assertEqual(parse_count('1.1kk'), 1100000)
|
||||||
|
self.assertEqual(parse_count('1.1kk '), 1100000)
|
||||||
|
self.assertEqual(parse_count('1.1kk views'), 1100000)
|
||||||
|
|
||||||
def test_version_tuple(self):
|
def test_version_tuple(self):
|
||||||
self.assertEqual(version_tuple('1'), (1,))
|
self.assertEqual(version_tuple('1'), (1,))
|
||||||
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
|
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
|
||||||
@@ -586,12 +836,13 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
|
|||||||
{'like_count': 190, 'dislike_count': 10}))
|
{'like_count': 190, 'dislike_count': 10}))
|
||||||
|
|
||||||
def test_parse_dfxp_time_expr(self):
|
def test_parse_dfxp_time_expr(self):
|
||||||
self.assertEqual(parse_dfxp_time_expr(None), 0.0)
|
self.assertEqual(parse_dfxp_time_expr(None), None)
|
||||||
self.assertEqual(parse_dfxp_time_expr(''), 0.0)
|
self.assertEqual(parse_dfxp_time_expr(''), None)
|
||||||
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
|
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
|
||||||
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
|
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
|
||||||
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
|
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
|
||||||
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
|
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
|
||||||
|
self.assertEqual(parse_dfxp_time_expr('00:00:01:100'), 1.1)
|
||||||
|
|
||||||
def test_dfxp2srt(self):
|
def test_dfxp2srt(self):
|
||||||
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
|
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
|
||||||
@@ -601,6 +852,9 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
|
|||||||
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
|
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
|
||||||
<p begin="1" end="2">第二行<br/>♪♪</p>
|
<p begin="1" end="2">第二行<br/>♪♪</p>
|
||||||
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
|
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
|
||||||
|
<p begin="3" end="-1">Lines with invalid timestamps are ignored</p>
|
||||||
|
<p begin="-1" end="-1">Ignore, two</p>
|
||||||
|
<p begin="3" dur="-1">Ignored, three</p>
|
||||||
</div>
|
</div>
|
||||||
</body>
|
</body>
|
||||||
</tt>'''
|
</tt>'''
|
||||||
@@ -636,6 +890,69 @@ The first line
|
|||||||
'''
|
'''
|
||||||
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
|
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
|
||||||
|
|
||||||
|
def test_cli_option(self):
|
||||||
|
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
|
||||||
|
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
|
||||||
|
self.assertEqual(cli_option({}, '--proxy', 'proxy'), [])
|
||||||
|
|
||||||
|
def test_cli_valueless_option(self):
|
||||||
|
self.assertEqual(cli_valueless_option(
|
||||||
|
{'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader'])
|
||||||
|
self.assertEqual(cli_valueless_option(
|
||||||
|
{'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), [])
|
||||||
|
self.assertEqual(cli_valueless_option(
|
||||||
|
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate'])
|
||||||
|
self.assertEqual(cli_valueless_option(
|
||||||
|
{'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), [])
|
||||||
|
self.assertEqual(cli_valueless_option(
|
||||||
|
{'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), [])
|
||||||
|
self.assertEqual(cli_valueless_option(
|
||||||
|
{'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate'])
|
||||||
|
|
||||||
|
def test_cli_bool_option(self):
|
||||||
|
self.assertEqual(
|
||||||
|
cli_bool_option(
|
||||||
|
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'),
|
||||||
|
['--no-check-certificate', 'true'])
|
||||||
|
self.assertEqual(
|
||||||
|
cli_bool_option(
|
||||||
|
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='),
|
||||||
|
['--no-check-certificate=true'])
|
||||||
|
self.assertEqual(
|
||||||
|
cli_bool_option(
|
||||||
|
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
|
||||||
|
['--check-certificate', 'false'])
|
||||||
|
self.assertEqual(
|
||||||
|
cli_bool_option(
|
||||||
|
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
|
||||||
|
['--check-certificate=false'])
|
||||||
|
self.assertEqual(
|
||||||
|
cli_bool_option(
|
||||||
|
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
|
||||||
|
['--check-certificate', 'true'])
|
||||||
|
self.assertEqual(
|
||||||
|
cli_bool_option(
|
||||||
|
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
|
||||||
|
['--check-certificate=true'])
|
||||||
|
|
||||||
|
def test_ohdave_rsa_encrypt(self):
|
||||||
|
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
|
||||||
|
e = 65537
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
ohdave_rsa_encrypt(b'aa111222', e, N),
|
||||||
|
'726664bd9a23fd0c70f9f1b84aab5e3905ce1e45a584e9cbcf9bcc7510338fc1986d6c599ff990d923aa43c51c0d9013cd572e13bc58f4ae48f2ed8c0b0ba881')
|
||||||
|
|
||||||
|
def test_encode_base_n(self):
|
||||||
|
self.assertEqual(encode_base_n(0, 30), '0')
|
||||||
|
self.assertEqual(encode_base_n(80, 30), '2k')
|
||||||
|
|
||||||
|
custom_table = '9876543210ZYXWVUTSRQPONMLKJIHGFEDCBA'
|
||||||
|
self.assertEqual(encode_base_n(0, 30, custom_table), '9')
|
||||||
|
self.assertEqual(encode_base_n(80, 30, custom_table), '7P')
|
||||||
|
|
||||||
|
self.assertRaises(ValueError, encode_base_n, 0, 70)
|
||||||
|
self.assertRaises(ValueError, encode_base_n, 0, 60, custom_table)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ params = get_params({
|
|||||||
|
|
||||||
|
|
||||||
TEST_ID = 'gr51aVj-mLg'
|
TEST_ID = 'gr51aVj-mLg'
|
||||||
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
|
ANNOTATIONS_FILE = TEST_ID + '.annotations.xml'
|
||||||
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
||||||
|
|
||||||
|
|
||||||
@@ -66,7 +66,7 @@ class TestAnnotations(unittest.TestCase):
|
|||||||
textTag = a.find('TEXT')
|
textTag = a.find('TEXT')
|
||||||
text = textTag.text
|
text = textTag.text
|
||||||
self.assertTrue(text in expected) # assertIn only added in python 2.7
|
self.assertTrue(text in expected) # assertIn only added in python 2.7
|
||||||
# remove the first occurance, there could be more than one annotation with the same text
|
# remove the first occurrence, there could be more than one annotation with the same text
|
||||||
expected.remove(text)
|
expected.remove(text)
|
||||||
# We should have seen (and removed) all the expected annotation texts.
|
# We should have seen (and removed) all the expected annotation texts.
|
||||||
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
|
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
ie = YoutubePlaylistIE(dl)
|
ie = YoutubePlaylistIE(dl)
|
||||||
# TODO find a > 100 (paginating?) videos course
|
# TODO find a > 100 (paginating?) videos course
|
||||||
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||||
entries = result['entries']
|
entries = list(result['entries'])
|
||||||
self.assertEqual(YoutubeIE().extract_id(entries[0]['url']), 'j9WZyLZCBzs')
|
self.assertEqual(YoutubeIE().extract_id(entries[0]['url']), 'j9WZyLZCBzs')
|
||||||
self.assertEqual(len(entries), 25)
|
self.assertEqual(len(entries), 25)
|
||||||
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
||||||
@@ -44,7 +44,7 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
ie = YoutubePlaylistIE(dl)
|
ie = YoutubePlaylistIE(dl)
|
||||||
result = ie.extract('https://www.youtube.com/watch?v=W01L70IGBgE&index=2&list=RDOQpdSVF_k_w')
|
result = ie.extract('https://www.youtube.com/watch?v=W01L70IGBgE&index=2&list=RDOQpdSVF_k_w')
|
||||||
entries = result['entries']
|
entries = result['entries']
|
||||||
self.assertTrue(len(entries) >= 20)
|
self.assertTrue(len(entries) >= 50)
|
||||||
original_video = entries[0]
|
original_video = entries[0]
|
||||||
self.assertEqual(original_video['id'], 'OQpdSVF_k_w')
|
self.assertEqual(original_video['id'], 'OQpdSVF_k_w')
|
||||||
|
|
||||||
@@ -57,5 +57,14 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
entries = result['entries']
|
entries = result['entries']
|
||||||
self.assertEqual(len(entries), 100)
|
self.assertEqual(len(entries), 100)
|
||||||
|
|
||||||
|
def test_youtube_flat_playlist_titles(self):
|
||||||
|
dl = FakeYDL()
|
||||||
|
dl.params['extract_flat'] = True
|
||||||
|
ie = YoutubePlaylistIE(dl)
|
||||||
|
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||||
|
self.assertIsPlaylist(result)
|
||||||
|
for entry in result['entries']:
|
||||||
|
self.assertTrue(entry.get('title'))
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
34
test/versions.json
Normal file
34
test/versions.json
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
{
|
||||||
|
"latest": "2013.01.06",
|
||||||
|
"signature": "72158cdba391628569ffdbea259afbcf279bbe3d8aeb7492690735dc1cfa6afa754f55c61196f3871d429599ab22f2667f1fec98865527b32632e7f4b3675a7ef0f0fbe084d359256ae4bba68f0d33854e531a70754712f244be71d4b92e664302aa99653ee4df19800d955b6c4149cd2b3f24288d6e4b40b16126e01f4c8ce6",
|
||||||
|
"versions": {
|
||||||
|
"2013.01.02": {
|
||||||
|
"bin": [
|
||||||
|
"http://youtube-dl.org/downloads/2013.01.02/youtube-dl",
|
||||||
|
"f5b502f8aaa77675c4884938b1e4871ebca2611813a0c0e74f60c0fbd6dcca6b"
|
||||||
|
],
|
||||||
|
"exe": [
|
||||||
|
"http://youtube-dl.org/downloads/2013.01.02/youtube-dl.exe",
|
||||||
|
"75fa89d2ce297d102ff27675aa9d92545bbc91013f52ec52868c069f4f9f0422"
|
||||||
|
],
|
||||||
|
"tar": [
|
||||||
|
"http://youtube-dl.org/downloads/2013.01.02/youtube-dl-2013.01.02.tar.gz",
|
||||||
|
"6a66d022ac8e1c13da284036288a133ec8dba003b7bd3a5179d0c0daca8c8196"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"2013.01.06": {
|
||||||
|
"bin": [
|
||||||
|
"http://youtube-dl.org/downloads/2013.01.06/youtube-dl",
|
||||||
|
"64b6ed8865735c6302e836d4d832577321b4519aa02640dc508580c1ee824049"
|
||||||
|
],
|
||||||
|
"exe": [
|
||||||
|
"http://youtube-dl.org/downloads/2013.01.06/youtube-dl.exe",
|
||||||
|
"58609baf91e4389d36e3ba586e21dab882daaaee537e4448b1265392ae86ff84"
|
||||||
|
],
|
||||||
|
"tar": [
|
||||||
|
"http://youtube-dl.org/downloads/2013.01.06/youtube-dl-2013.01.06.tar.gz",
|
||||||
|
"fe77ab20a95d980ed17a659aa67e371fdd4d656d19c4c7950e7b720b0c2f1a86"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
5
tox.ini
5
tox.ini
@@ -1,5 +1,5 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py26,py27,py33,py34
|
envlist = py26,py27,py33,py34,py35
|
||||||
[testenv]
|
[testenv]
|
||||||
deps =
|
deps =
|
||||||
nose
|
nose
|
||||||
@@ -8,6 +8,7 @@ deps =
|
|||||||
passenv = HOME
|
passenv = HOME
|
||||||
defaultargs = test --exclude test_download.py --exclude test_age_restriction.py
|
defaultargs = test --exclude test_download.py --exclude test_age_restriction.py
|
||||||
--exclude test_subtitles.py --exclude test_write_annotations.py
|
--exclude test_subtitles.py --exclude test_write_annotations.py
|
||||||
--exclude test_youtube_lists.py
|
--exclude test_youtube_lists.py --exclude test_iqiyi_sdk_interpreter.py
|
||||||
|
--exclude test_socks.py
|
||||||
commands = nosetests --verbose {posargs:{[testenv]defaultargs}} # --with-coverage --cover-package=youtube_dl --cover-html
|
commands = nosetests --verbose {posargs:{[testenv]defaultargs}} # --with-coverage --cover-package=youtube_dl --cover-html
|
||||||
# test.test_download:TestDownload.test_NowVideo
|
# test.test_download:TestDownload.test_NowVideo
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -9,7 +9,6 @@ import codecs
|
|||||||
import io
|
import io
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import shlex
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|
||||||
@@ -20,6 +19,7 @@ from .compat import (
|
|||||||
compat_expanduser,
|
compat_expanduser,
|
||||||
compat_getpass,
|
compat_getpass,
|
||||||
compat_print,
|
compat_print,
|
||||||
|
compat_shlex_split,
|
||||||
workaround_optparse_bug9161,
|
workaround_optparse_bug9161,
|
||||||
)
|
)
|
||||||
from .utils import (
|
from .utils import (
|
||||||
@@ -67,9 +67,9 @@ def _real_main(argv=None):
|
|||||||
# Custom HTTP headers
|
# Custom HTTP headers
|
||||||
if opts.headers is not None:
|
if opts.headers is not None:
|
||||||
for h in opts.headers:
|
for h in opts.headers:
|
||||||
if h.find(':', 1) < 0:
|
if ':' not in h:
|
||||||
parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
|
parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
|
||||||
key, value = h.split(':', 2)
|
key, value = h.split(':', 1)
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
|
write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
|
||||||
std_headers[key] = value
|
std_headers[key] = value
|
||||||
@@ -86,7 +86,9 @@ def _real_main(argv=None):
|
|||||||
if opts.batchfile == '-':
|
if opts.batchfile == '-':
|
||||||
batchfd = sys.stdin
|
batchfd = sys.stdin
|
||||||
else:
|
else:
|
||||||
batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore')
|
batchfd = io.open(
|
||||||
|
compat_expanduser(opts.batchfile),
|
||||||
|
'r', encoding='utf-8', errors='ignore')
|
||||||
batch_urls = read_batch_urls(batchfd)
|
batch_urls = read_batch_urls(batchfd)
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
|
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
|
||||||
@@ -144,14 +146,20 @@ def _real_main(argv=None):
|
|||||||
if numeric_limit is None:
|
if numeric_limit is None:
|
||||||
parser.error('invalid max_filesize specified')
|
parser.error('invalid max_filesize specified')
|
||||||
opts.max_filesize = numeric_limit
|
opts.max_filesize = numeric_limit
|
||||||
if opts.retries is not None:
|
|
||||||
if opts.retries in ('inf', 'infinite'):
|
def parse_retries(retries):
|
||||||
opts_retries = float('inf')
|
if retries in ('inf', 'infinite'):
|
||||||
|
parsed_retries = float('inf')
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
opts_retries = int(opts.retries)
|
parsed_retries = int(retries)
|
||||||
except (TypeError, ValueError):
|
except (TypeError, ValueError):
|
||||||
parser.error('invalid retry count specified')
|
parser.error('invalid retry count specified')
|
||||||
|
return parsed_retries
|
||||||
|
if opts.retries is not None:
|
||||||
|
opts.retries = parse_retries(opts.retries)
|
||||||
|
if opts.fragment_retries is not None:
|
||||||
|
opts.fragment_retries = parse_retries(opts.fragment_retries)
|
||||||
if opts.buffersize is not None:
|
if opts.buffersize is not None:
|
||||||
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
|
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
|
||||||
if numeric_buffersize is None:
|
if numeric_buffersize is None:
|
||||||
@@ -169,7 +177,7 @@ def _real_main(argv=None):
|
|||||||
if not opts.audioquality.isdigit():
|
if not opts.audioquality.isdigit():
|
||||||
parser.error('invalid audio quality specified')
|
parser.error('invalid audio quality specified')
|
||||||
if opts.recodevideo is not None:
|
if opts.recodevideo is not None:
|
||||||
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']:
|
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']:
|
||||||
parser.error('invalid video recode format specified')
|
parser.error('invalid video recode format specified')
|
||||||
if opts.convertsubtitles is not None:
|
if opts.convertsubtitles is not None:
|
||||||
if opts.convertsubtitles not in ['srt', 'vtt', 'ass']:
|
if opts.convertsubtitles not in ['srt', 'vtt', 'ass']:
|
||||||
@@ -262,7 +270,10 @@ def _real_main(argv=None):
|
|||||||
parser.error('setting filesize xattr requested but python-xattr is not available')
|
parser.error('setting filesize xattr requested but python-xattr is not available')
|
||||||
external_downloader_args = None
|
external_downloader_args = None
|
||||||
if opts.external_downloader_args:
|
if opts.external_downloader_args:
|
||||||
external_downloader_args = shlex.split(opts.external_downloader_args)
|
external_downloader_args = compat_shlex_split(opts.external_downloader_args)
|
||||||
|
postprocessor_args = None
|
||||||
|
if opts.postprocessor_args:
|
||||||
|
postprocessor_args = compat_shlex_split(opts.postprocessor_args)
|
||||||
match_filter = (
|
match_filter = (
|
||||||
None if opts.match_filter is None
|
None if opts.match_filter is None
|
||||||
else match_filter_func(opts.match_filter))
|
else match_filter_func(opts.match_filter))
|
||||||
@@ -296,7 +307,8 @@ def _real_main(argv=None):
|
|||||||
'force_generic_extractor': opts.force_generic_extractor,
|
'force_generic_extractor': opts.force_generic_extractor,
|
||||||
'ratelimit': opts.ratelimit,
|
'ratelimit': opts.ratelimit,
|
||||||
'nooverwrites': opts.nooverwrites,
|
'nooverwrites': opts.nooverwrites,
|
||||||
'retries': opts_retries,
|
'retries': opts.retries,
|
||||||
|
'fragment_retries': opts.fragment_retries,
|
||||||
'buffersize': opts.buffersize,
|
'buffersize': opts.buffersize,
|
||||||
'noresizebuffer': opts.noresizebuffer,
|
'noresizebuffer': opts.noresizebuffer,
|
||||||
'continuedl': opts.continue_dl,
|
'continuedl': opts.continue_dl,
|
||||||
@@ -352,6 +364,7 @@ def _real_main(argv=None):
|
|||||||
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
||||||
'encoding': opts.encoding,
|
'encoding': opts.encoding,
|
||||||
'extract_flat': opts.extract_flat,
|
'extract_flat': opts.extract_flat,
|
||||||
|
'mark_watched': opts.mark_watched,
|
||||||
'merge_output_format': opts.merge_output_format,
|
'merge_output_format': opts.merge_output_format,
|
||||||
'postprocessors': postprocessors,
|
'postprocessors': postprocessors,
|
||||||
'fixup': opts.fixup,
|
'fixup': opts.fixup,
|
||||||
@@ -366,14 +379,16 @@ def _real_main(argv=None):
|
|||||||
'no_color': opts.no_color,
|
'no_color': opts.no_color,
|
||||||
'ffmpeg_location': opts.ffmpeg_location,
|
'ffmpeg_location': opts.ffmpeg_location,
|
||||||
'hls_prefer_native': opts.hls_prefer_native,
|
'hls_prefer_native': opts.hls_prefer_native,
|
||||||
|
'hls_use_mpegts': opts.hls_use_mpegts,
|
||||||
'external_downloader_args': external_downloader_args,
|
'external_downloader_args': external_downloader_args,
|
||||||
|
'postprocessor_args': postprocessor_args,
|
||||||
'cn_verification_proxy': opts.cn_verification_proxy,
|
'cn_verification_proxy': opts.cn_verification_proxy,
|
||||||
}
|
}
|
||||||
|
|
||||||
with YoutubeDL(ydl_opts) as ydl:
|
with YoutubeDL(ydl_opts) as ydl:
|
||||||
# Update version
|
# Update version
|
||||||
if opts.update_self:
|
if opts.update_self:
|
||||||
update_self(ydl.to_screen, opts.verbose)
|
update_self(ydl.to_screen, opts.verbose, ydl._opener)
|
||||||
|
|
||||||
# Remove cache dir
|
# Remove cache dir
|
||||||
if opts.rm_cachedir:
|
if opts.rm_cachedir:
|
||||||
@@ -391,7 +406,7 @@ def _real_main(argv=None):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
if opts.load_info_filename is not None:
|
if opts.load_info_filename is not None:
|
||||||
retcode = ydl.download_with_info_file(opts.load_info_filename)
|
retcode = ydl.download_with_info_file(compat_expanduser(opts.load_info_filename))
|
||||||
else:
|
else:
|
||||||
retcode = ydl.download(all_urls)
|
retcode = ydl.download(all_urls)
|
||||||
except MaxDownloadsReached:
|
except MaxDownloadsReached:
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
if __package__ is None and not hasattr(sys, "frozen"):
|
if __package__ is None and not hasattr(sys, 'frozen'):
|
||||||
# direct call of __main__.py
|
# direct call of __main__.py
|
||||||
import os.path
|
import os.path
|
||||||
path = os.path.realpath(os.path.abspath(__file__))
|
path = os.path.realpath(os.path.abspath(__file__))
|
||||||
sys.path.append(os.path.dirname(os.path.dirname(path)))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(path)))
|
||||||
|
|
||||||
import youtube_dl
|
import youtube_dl
|
||||||
|
|
||||||
|
|||||||
@@ -161,7 +161,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
|||||||
nonce = data[:NONCE_LENGTH_BYTES]
|
nonce = data[:NONCE_LENGTH_BYTES]
|
||||||
cipher = data[NONCE_LENGTH_BYTES:]
|
cipher = data[NONCE_LENGTH_BYTES:]
|
||||||
|
|
||||||
class Counter:
|
class Counter(object):
|
||||||
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
||||||
|
|
||||||
def next_value(self):
|
def next_value(self):
|
||||||
|
|||||||
@@ -1,14 +1,21 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import binascii
|
||||||
import collections
|
import collections
|
||||||
|
import email
|
||||||
import getpass
|
import getpass
|
||||||
|
import io
|
||||||
import optparse
|
import optparse
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
import shlex
|
||||||
import shutil
|
import shutil
|
||||||
import socket
|
import socket
|
||||||
|
import struct
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
import itertools
|
||||||
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -36,11 +43,21 @@ try:
|
|||||||
except ImportError: # Python 2
|
except ImportError: # Python 2
|
||||||
import urlparse as compat_urlparse
|
import urlparse as compat_urlparse
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib.response as compat_urllib_response
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import urllib as compat_urllib_response
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import http.cookiejar as compat_cookiejar
|
import http.cookiejar as compat_cookiejar
|
||||||
except ImportError: # Python 2
|
except ImportError: # Python 2
|
||||||
import cookielib as compat_cookiejar
|
import cookielib as compat_cookiejar
|
||||||
|
|
||||||
|
try:
|
||||||
|
import http.cookies as compat_cookies
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import Cookie as compat_cookies
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import html.entities as compat_html_entities
|
import html.entities as compat_html_entities
|
||||||
except ImportError: # Python 2
|
except ImportError: # Python 2
|
||||||
@@ -61,6 +78,11 @@ try:
|
|||||||
except ImportError: # Python 2
|
except ImportError: # Python 2
|
||||||
from urllib import urlretrieve as compat_urlretrieve
|
from urllib import urlretrieve as compat_urlretrieve
|
||||||
|
|
||||||
|
try:
|
||||||
|
from html.parser import HTMLParser as compat_HTMLParser
|
||||||
|
except ImportError: # Python 2
|
||||||
|
from HTMLParser import HTMLParser as compat_HTMLParser
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from subprocess import DEVNULL
|
from subprocess import DEVNULL
|
||||||
@@ -74,47 +96,139 @@ except ImportError:
|
|||||||
import BaseHTTPServer as compat_http_server
|
import BaseHTTPServer as compat_http_server
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
compat_str = unicode # Python 2
|
||||||
|
except NameError:
|
||||||
|
compat_str = str
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
|
||||||
from urllib.parse import unquote as compat_urllib_parse_unquote
|
from urllib.parse import unquote as compat_urllib_parse_unquote
|
||||||
except ImportError:
|
from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
|
||||||
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
|
except ImportError: # Python 2
|
||||||
if string == '':
|
_asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire')
|
||||||
|
else re.compile('([\x00-\x7f]+)'))
|
||||||
|
|
||||||
|
# HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus
|
||||||
|
# implementations from cpython 3.4.3's stdlib. Python 2's version
|
||||||
|
# is apparently broken (see https://github.com/rg3/youtube-dl/pull/6244)
|
||||||
|
|
||||||
|
def compat_urllib_parse_unquote_to_bytes(string):
|
||||||
|
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
|
||||||
|
# Note: strings are encoded as UTF-8. This is only an issue if it contains
|
||||||
|
# unescaped non-ASCII characters, which URIs should not.
|
||||||
|
if not string:
|
||||||
|
# Is it a string-like object?
|
||||||
|
string.split
|
||||||
|
return b''
|
||||||
|
if isinstance(string, compat_str):
|
||||||
|
string = string.encode('utf-8')
|
||||||
|
bits = string.split(b'%')
|
||||||
|
if len(bits) == 1:
|
||||||
return string
|
return string
|
||||||
res = string.split('%')
|
res = [bits[0]]
|
||||||
if len(res) == 1:
|
append = res.append
|
||||||
|
for item in bits[1:]:
|
||||||
|
try:
|
||||||
|
append(compat_urllib_parse._hextochr[item[:2]])
|
||||||
|
append(item[2:])
|
||||||
|
except KeyError:
|
||||||
|
append(b'%')
|
||||||
|
append(item)
|
||||||
|
return b''.join(res)
|
||||||
|
|
||||||
|
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
|
||||||
|
"""Replace %xx escapes by their single-character equivalent. The optional
|
||||||
|
encoding and errors parameters specify how to decode percent-encoded
|
||||||
|
sequences into Unicode characters, as accepted by the bytes.decode()
|
||||||
|
method.
|
||||||
|
By default, percent-encoded sequences are decoded with UTF-8, and invalid
|
||||||
|
sequences are replaced by a placeholder character.
|
||||||
|
|
||||||
|
unquote('abc%20def') -> 'abc def'.
|
||||||
|
"""
|
||||||
|
if '%' not in string:
|
||||||
|
string.split
|
||||||
return string
|
return string
|
||||||
if encoding is None:
|
if encoding is None:
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
if errors is None:
|
if errors is None:
|
||||||
errors = 'replace'
|
errors = 'replace'
|
||||||
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
|
bits = _asciire.split(string)
|
||||||
pct_sequence = b''
|
res = [bits[0]]
|
||||||
string = res[0]
|
append = res.append
|
||||||
for item in res[1:]:
|
for i in range(1, len(bits), 2):
|
||||||
try:
|
append(compat_urllib_parse_unquote_to_bytes(bits[i]).decode(encoding, errors))
|
||||||
if not item:
|
append(bits[i + 1])
|
||||||
raise ValueError
|
return ''.join(res)
|
||||||
pct_sequence += item[:2].decode('hex')
|
|
||||||
rest = item[2:]
|
def compat_urllib_parse_unquote_plus(string, encoding='utf-8', errors='replace'):
|
||||||
if not rest:
|
"""Like unquote(), but also replace plus signs by spaces, as required for
|
||||||
# This segment was just a single percent-encoded character.
|
unquoting HTML form values.
|
||||||
# May be part of a sequence of code units, so delay decoding.
|
|
||||||
# (Stored in pct_sequence).
|
unquote_plus('%7e/abc+def') -> '~/abc def'
|
||||||
continue
|
"""
|
||||||
except ValueError:
|
string = string.replace('+', ' ')
|
||||||
rest = '%' + item
|
return compat_urllib_parse_unquote(string, encoding, errors)
|
||||||
# Encountered non-percent-encoded characters. Flush the current
|
|
||||||
# pct_sequence.
|
|
||||||
string += pct_sequence.decode(encoding, errors) + rest
|
|
||||||
pct_sequence = b''
|
|
||||||
if pct_sequence:
|
|
||||||
# Flush the final pct_sequence
|
|
||||||
string += pct_sequence.decode(encoding, errors)
|
|
||||||
return string
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
compat_str = unicode # Python 2
|
from urllib.parse import urlencode as compat_urllib_parse_urlencode
|
||||||
except NameError:
|
except ImportError: # Python 2
|
||||||
compat_str = str
|
# Python 2 will choke in urlencode on mixture of byte and unicode strings.
|
||||||
|
# Possible solutions are to either port it from python 3 with all
|
||||||
|
# the friends or manually ensure input query contains only byte strings.
|
||||||
|
# We will stick with latter thus recursively encoding the whole query.
|
||||||
|
def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
|
||||||
|
def encode_elem(e):
|
||||||
|
if isinstance(e, dict):
|
||||||
|
e = encode_dict(e)
|
||||||
|
elif isinstance(e, (list, tuple,)):
|
||||||
|
list_e = encode_list(e)
|
||||||
|
e = tuple(list_e) if isinstance(e, tuple) else list_e
|
||||||
|
elif isinstance(e, compat_str):
|
||||||
|
e = e.encode(encoding)
|
||||||
|
return e
|
||||||
|
|
||||||
|
def encode_dict(d):
|
||||||
|
return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
|
||||||
|
|
||||||
|
def encode_list(l):
|
||||||
|
return [encode_elem(e) for e in l]
|
||||||
|
|
||||||
|
return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.request import DataHandler as compat_urllib_request_DataHandler
|
||||||
|
except ImportError: # Python < 3.4
|
||||||
|
# Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
|
||||||
|
class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler):
|
||||||
|
def data_open(self, req):
|
||||||
|
# data URLs as specified in RFC 2397.
|
||||||
|
#
|
||||||
|
# ignores POSTed data
|
||||||
|
#
|
||||||
|
# syntax:
|
||||||
|
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
|
||||||
|
# mediatype := [ type "/" subtype ] *( ";" parameter )
|
||||||
|
# data := *urlchar
|
||||||
|
# parameter := attribute "=" value
|
||||||
|
url = req.get_full_url()
|
||||||
|
|
||||||
|
scheme, data = url.split(':', 1)
|
||||||
|
mediatype, data = data.split(',', 1)
|
||||||
|
|
||||||
|
# even base64 encoded data URLs might be quoted so unquote in any case:
|
||||||
|
data = compat_urllib_parse_unquote_to_bytes(data)
|
||||||
|
if mediatype.endswith(';base64'):
|
||||||
|
data = binascii.a2b_base64(data)
|
||||||
|
mediatype = mediatype[:-7]
|
||||||
|
|
||||||
|
if not mediatype:
|
||||||
|
mediatype = 'text/plain;charset=US-ASCII'
|
||||||
|
|
||||||
|
headers = email.message_from_string(
|
||||||
|
'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
|
||||||
|
|
||||||
|
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
compat_basestring = basestring # Python 2
|
compat_basestring = basestring # Python 2
|
||||||
@@ -132,6 +246,60 @@ except ImportError: # Python 2.6
|
|||||||
from xml.parsers.expat import ExpatError as compat_xml_parse_error
|
from xml.parsers.expat import ExpatError as compat_xml_parse_error
|
||||||
|
|
||||||
|
|
||||||
|
etree = xml.etree.ElementTree
|
||||||
|
|
||||||
|
|
||||||
|
class _TreeBuilder(etree.TreeBuilder):
|
||||||
|
def doctype(self, name, pubid, system):
|
||||||
|
pass
|
||||||
|
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
def compat_etree_fromstring(text):
|
||||||
|
return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
|
||||||
|
else:
|
||||||
|
# python 2.x tries to encode unicode strings with ascii (see the
|
||||||
|
# XMLParser._fixtext method)
|
||||||
|
try:
|
||||||
|
_etree_iter = etree.Element.iter
|
||||||
|
except AttributeError: # Python <=2.6
|
||||||
|
def _etree_iter(root):
|
||||||
|
for el in root.findall('*'):
|
||||||
|
yield el
|
||||||
|
for sub in _etree_iter(el):
|
||||||
|
yield sub
|
||||||
|
|
||||||
|
# on 2.6 XML doesn't have a parser argument, function copied from CPython
|
||||||
|
# 2.7 source
|
||||||
|
def _XML(text, parser=None):
|
||||||
|
if not parser:
|
||||||
|
parser = etree.XMLParser(target=_TreeBuilder())
|
||||||
|
parser.feed(text)
|
||||||
|
return parser.close()
|
||||||
|
|
||||||
|
def _element_factory(*args, **kwargs):
|
||||||
|
el = etree.Element(*args, **kwargs)
|
||||||
|
for k, v in el.items():
|
||||||
|
if isinstance(v, bytes):
|
||||||
|
el.set(k, v.decode('utf-8'))
|
||||||
|
return el
|
||||||
|
|
||||||
|
def compat_etree_fromstring(text):
|
||||||
|
doc = _XML(text, parser=etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory)))
|
||||||
|
for el in _etree_iter(doc):
|
||||||
|
if el.text is not None and isinstance(el.text, bytes):
|
||||||
|
el.text = el.text.decode('utf-8')
|
||||||
|
return doc
|
||||||
|
|
||||||
|
if sys.version_info < (2, 7):
|
||||||
|
# Here comes the crazy part: In 2.6, if the xpath is a unicode,
|
||||||
|
# .//node does not match if a node is a direct child of . !
|
||||||
|
def compat_xpath(xpath):
|
||||||
|
if isinstance(xpath, compat_str):
|
||||||
|
xpath = xpath.encode('ascii')
|
||||||
|
return xpath
|
||||||
|
else:
|
||||||
|
compat_xpath = lambda xpath: xpath
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from urllib.parse import parse_qs as compat_parse_qs
|
from urllib.parse import parse_qs as compat_parse_qs
|
||||||
except ImportError: # Python 2
|
except ImportError: # Python 2
|
||||||
@@ -149,7 +317,7 @@ except ImportError: # Python 2
|
|||||||
nv = name_value.split('=', 1)
|
nv = name_value.split('=', 1)
|
||||||
if len(nv) != 2:
|
if len(nv) != 2:
|
||||||
if strict_parsing:
|
if strict_parsing:
|
||||||
raise ValueError("bad query field: %r" % (name_value,))
|
raise ValueError('bad query field: %r' % (name_value,))
|
||||||
# Handle case of a control-name with no equal sign
|
# Handle case of a control-name with no equal sign
|
||||||
if keep_blank_values:
|
if keep_blank_values:
|
||||||
nv.append('')
|
nv.append('')
|
||||||
@@ -180,15 +348,26 @@ except ImportError: # Python 2
|
|||||||
return parsed_result
|
return parsed_result
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from shlex import quote as shlex_quote
|
from shlex import quote as compat_shlex_quote
|
||||||
except ImportError: # Python < 3.3
|
except ImportError: # Python < 3.3
|
||||||
def shlex_quote(s):
|
def compat_shlex_quote(s):
|
||||||
if re.match(r'^[-_\w./]+$', s):
|
if re.match(r'^[-_\w./]+$', s):
|
||||||
return s
|
return s
|
||||||
else:
|
else:
|
||||||
return "'" + s.replace("'", "'\"'\"'") + "'"
|
return "'" + s.replace("'", "'\"'\"'") + "'"
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info >= (2, 7, 3):
|
||||||
|
compat_shlex_split = shlex.split
|
||||||
|
else:
|
||||||
|
# Working around shlex issue with unicode strings on some python 2
|
||||||
|
# versions (see http://bugs.python.org/issue1548891)
|
||||||
|
def compat_shlex_split(s, comments=False, posix=True):
|
||||||
|
if isinstance(s, compat_str):
|
||||||
|
s = s.encode('utf-8')
|
||||||
|
return shlex.split(s, comments, posix)
|
||||||
|
|
||||||
|
|
||||||
def compat_ord(c):
|
def compat_ord(c):
|
||||||
if type(c) is int:
|
if type(c) is int:
|
||||||
return c
|
return c
|
||||||
@@ -196,9 +375,15 @@ def compat_ord(c):
|
|||||||
return ord(c)
|
return ord(c)
|
||||||
|
|
||||||
|
|
||||||
|
compat_os_name = os._name if os.name == 'java' else os.name
|
||||||
|
|
||||||
|
|
||||||
if sys.version_info >= (3, 0):
|
if sys.version_info >= (3, 0):
|
||||||
compat_getenv = os.getenv
|
compat_getenv = os.getenv
|
||||||
compat_expanduser = os.path.expanduser
|
compat_expanduser = os.path.expanduser
|
||||||
|
|
||||||
|
def compat_setenv(key, value, env=os.environ):
|
||||||
|
env[key] = value
|
||||||
else:
|
else:
|
||||||
# Environment variables should be decoded with filesystem encoding.
|
# Environment variables should be decoded with filesystem encoding.
|
||||||
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
|
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
|
||||||
@@ -210,13 +395,19 @@ else:
|
|||||||
env = env.decode(get_filesystem_encoding())
|
env = env.decode(get_filesystem_encoding())
|
||||||
return env
|
return env
|
||||||
|
|
||||||
|
def compat_setenv(key, value, env=os.environ):
|
||||||
|
def encode(v):
|
||||||
|
from .utils import get_filesystem_encoding
|
||||||
|
return v.encode(get_filesystem_encoding()) if isinstance(v, compat_str) else v
|
||||||
|
env[encode(key)] = encode(value)
|
||||||
|
|
||||||
# HACK: The default implementations of os.path.expanduser from cpython do not decode
|
# HACK: The default implementations of os.path.expanduser from cpython do not decode
|
||||||
# environment variables with filesystem encoding. We will work around this by
|
# environment variables with filesystem encoding. We will work around this by
|
||||||
# providing adjusted implementations.
|
# providing adjusted implementations.
|
||||||
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
|
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
|
||||||
# for different platforms with correct environment variables decoding.
|
# for different platforms with correct environment variables decoding.
|
||||||
|
|
||||||
if os.name == 'posix':
|
if compat_os_name == 'posix':
|
||||||
def compat_expanduser(path):
|
def compat_expanduser(path):
|
||||||
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
|
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
|
||||||
do nothing."""
|
do nothing."""
|
||||||
@@ -240,7 +431,7 @@ else:
|
|||||||
userhome = pwent.pw_dir
|
userhome = pwent.pw_dir
|
||||||
userhome = userhome.rstrip('/')
|
userhome = userhome.rstrip('/')
|
||||||
return (userhome + path[i:]) or '/'
|
return (userhome + path[i:]) or '/'
|
||||||
elif os.name == 'nt' or os.name == 'ce':
|
elif compat_os_name == 'nt' or compat_os_name == 'ce':
|
||||||
def compat_expanduser(path):
|
def compat_expanduser(path):
|
||||||
"""Expand ~ and ~user constructs.
|
"""Expand ~ and ~user constructs.
|
||||||
|
|
||||||
@@ -282,18 +473,6 @@ else:
|
|||||||
print(s)
|
print(s)
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
subprocess_check_output = subprocess.check_output
|
|
||||||
except AttributeError:
|
|
||||||
def subprocess_check_output(*args, **kwargs):
|
|
||||||
assert 'input' not in kwargs
|
|
||||||
p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
|
|
||||||
output, _ = p.communicate()
|
|
||||||
ret = p.poll()
|
|
||||||
if ret:
|
|
||||||
raise subprocess.CalledProcessError(ret, p.args, output=output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
if sys.version_info < (3, 0) and sys.platform == 'win32':
|
if sys.version_info < (3, 0) and sys.platform == 'win32':
|
||||||
def compat_getpass(prompt, *args, **kwargs):
|
def compat_getpass(prompt, *args, **kwargs):
|
||||||
if isinstance(prompt, compat_str):
|
if isinstance(prompt, compat_str):
|
||||||
@@ -303,7 +482,7 @@ if sys.version_info < (3, 0) and sys.platform == 'win32':
|
|||||||
else:
|
else:
|
||||||
compat_getpass = getpass.getpass
|
compat_getpass = getpass.getpass
|
||||||
|
|
||||||
# Old 2.6 and 2.7 releases require kwargs to be bytes
|
# Python < 2.6.5 require kwargs to be bytes
|
||||||
try:
|
try:
|
||||||
def _testfunc(x):
|
def _testfunc(x):
|
||||||
pass
|
pass
|
||||||
@@ -336,7 +515,7 @@ if sys.version_info < (2, 7):
|
|||||||
if err is not None:
|
if err is not None:
|
||||||
raise err
|
raise err
|
||||||
else:
|
else:
|
||||||
raise socket.error("getaddrinfo returns an empty list")
|
raise socket.error('getaddrinfo returns an empty list')
|
||||||
else:
|
else:
|
||||||
compat_socket_create_connection = socket.create_connection
|
compat_socket_create_connection = socket.create_connection
|
||||||
|
|
||||||
@@ -366,34 +545,77 @@ if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
|
|||||||
else:
|
else:
|
||||||
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
|
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
|
||||||
|
|
||||||
def compat_get_terminal_size():
|
def compat_get_terminal_size(fallback=(80, 24)):
|
||||||
columns = compat_getenv('COLUMNS', None)
|
columns = compat_getenv('COLUMNS')
|
||||||
if columns:
|
if columns:
|
||||||
columns = int(columns)
|
columns = int(columns)
|
||||||
else:
|
else:
|
||||||
columns = None
|
columns = None
|
||||||
lines = compat_getenv('LINES', None)
|
lines = compat_getenv('LINES')
|
||||||
if lines:
|
if lines:
|
||||||
lines = int(lines)
|
lines = int(lines)
|
||||||
else:
|
else:
|
||||||
lines = None
|
lines = None
|
||||||
|
|
||||||
try:
|
if columns is None or lines is None or columns <= 0 or lines <= 0:
|
||||||
sp = subprocess.Popen(
|
try:
|
||||||
['stty', 'size'],
|
sp = subprocess.Popen(
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
['stty', 'size'],
|
||||||
out, err = sp.communicate()
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
lines, columns = map(int, out.split())
|
out, err = sp.communicate()
|
||||||
except Exception:
|
_lines, _columns = map(int, out.split())
|
||||||
pass
|
except Exception:
|
||||||
|
_columns, _lines = _terminal_size(*fallback)
|
||||||
|
|
||||||
|
if columns is None or columns <= 0:
|
||||||
|
columns = _columns
|
||||||
|
if lines is None or lines <= 0:
|
||||||
|
lines = _lines
|
||||||
return _terminal_size(columns, lines)
|
return _terminal_size(columns, lines)
|
||||||
|
|
||||||
|
try:
|
||||||
|
itertools.count(start=0, step=1)
|
||||||
|
compat_itertools_count = itertools.count
|
||||||
|
except TypeError: # Python 2.6
|
||||||
|
def compat_itertools_count(start=0, step=1):
|
||||||
|
n = start
|
||||||
|
while True:
|
||||||
|
yield n
|
||||||
|
n += step
|
||||||
|
|
||||||
|
if sys.version_info >= (3, 0):
|
||||||
|
from tokenize import tokenize as compat_tokenize_tokenize
|
||||||
|
else:
|
||||||
|
from tokenize import generate_tokens as compat_tokenize_tokenize
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
struct.pack('!I', 0)
|
||||||
|
except TypeError:
|
||||||
|
# In Python 2.6 and 2.7.x < 2.7.7, struct requires a bytes argument
|
||||||
|
# See https://bugs.python.org/issue19099
|
||||||
|
def compat_struct_pack(spec, *args):
|
||||||
|
if isinstance(spec, compat_str):
|
||||||
|
spec = spec.encode('ascii')
|
||||||
|
return struct.pack(spec, *args)
|
||||||
|
|
||||||
|
def compat_struct_unpack(spec, *args):
|
||||||
|
if isinstance(spec, compat_str):
|
||||||
|
spec = spec.encode('ascii')
|
||||||
|
return struct.unpack(spec, *args)
|
||||||
|
else:
|
||||||
|
compat_struct_pack = struct.pack
|
||||||
|
compat_struct_unpack = struct.unpack
|
||||||
|
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
|
'compat_HTMLParser',
|
||||||
'compat_HTTPError',
|
'compat_HTTPError',
|
||||||
'compat_basestring',
|
'compat_basestring',
|
||||||
'compat_chr',
|
'compat_chr',
|
||||||
'compat_cookiejar',
|
'compat_cookiejar',
|
||||||
|
'compat_cookies',
|
||||||
|
'compat_etree_fromstring',
|
||||||
'compat_expanduser',
|
'compat_expanduser',
|
||||||
'compat_get_terminal_size',
|
'compat_get_terminal_size',
|
||||||
'compat_getenv',
|
'compat_getenv',
|
||||||
@@ -401,22 +623,34 @@ __all__ = [
|
|||||||
'compat_html_entities',
|
'compat_html_entities',
|
||||||
'compat_http_client',
|
'compat_http_client',
|
||||||
'compat_http_server',
|
'compat_http_server',
|
||||||
|
'compat_itertools_count',
|
||||||
'compat_kwargs',
|
'compat_kwargs',
|
||||||
'compat_ord',
|
'compat_ord',
|
||||||
|
'compat_os_name',
|
||||||
'compat_parse_qs',
|
'compat_parse_qs',
|
||||||
'compat_print',
|
'compat_print',
|
||||||
|
'compat_setenv',
|
||||||
|
'compat_shlex_quote',
|
||||||
|
'compat_shlex_split',
|
||||||
'compat_socket_create_connection',
|
'compat_socket_create_connection',
|
||||||
'compat_str',
|
'compat_str',
|
||||||
|
'compat_struct_pack',
|
||||||
|
'compat_struct_unpack',
|
||||||
'compat_subprocess_get_DEVNULL',
|
'compat_subprocess_get_DEVNULL',
|
||||||
|
'compat_tokenize_tokenize',
|
||||||
'compat_urllib_error',
|
'compat_urllib_error',
|
||||||
'compat_urllib_parse',
|
'compat_urllib_parse',
|
||||||
'compat_urllib_parse_unquote',
|
'compat_urllib_parse_unquote',
|
||||||
|
'compat_urllib_parse_unquote_plus',
|
||||||
|
'compat_urllib_parse_unquote_to_bytes',
|
||||||
|
'compat_urllib_parse_urlencode',
|
||||||
'compat_urllib_parse_urlparse',
|
'compat_urllib_parse_urlparse',
|
||||||
'compat_urllib_request',
|
'compat_urllib_request',
|
||||||
|
'compat_urllib_request_DataHandler',
|
||||||
|
'compat_urllib_response',
|
||||||
'compat_urlparse',
|
'compat_urlparse',
|
||||||
'compat_urlretrieve',
|
'compat_urlretrieve',
|
||||||
'compat_xml_parse_error',
|
'compat_xml_parse_error',
|
||||||
'shlex_quote',
|
'compat_xpath',
|
||||||
'subprocess_check_output',
|
|
||||||
'workaround_optparse_bug9161',
|
'workaround_optparse_bug9161',
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -1,13 +1,16 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from .external import get_external_downloader
|
|
||||||
from .f4m import F4mFD
|
from .f4m import F4mFD
|
||||||
from .hls import HlsFD
|
from .hls import HlsFD
|
||||||
from .hls import NativeHlsFD
|
|
||||||
from .http import HttpFD
|
from .http import HttpFD
|
||||||
from .rtsp import RtspFD
|
|
||||||
from .rtmp import RtmpFD
|
from .rtmp import RtmpFD
|
||||||
|
from .dash import DashSegmentsFD
|
||||||
|
from .rtsp import RtspFD
|
||||||
|
from .external import (
|
||||||
|
get_external_downloader,
|
||||||
|
FFmpegFD,
|
||||||
|
)
|
||||||
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_protocol,
|
determine_protocol,
|
||||||
@@ -15,11 +18,12 @@ from ..utils import (
|
|||||||
|
|
||||||
PROTOCOL_MAP = {
|
PROTOCOL_MAP = {
|
||||||
'rtmp': RtmpFD,
|
'rtmp': RtmpFD,
|
||||||
'm3u8_native': NativeHlsFD,
|
'm3u8_native': HlsFD,
|
||||||
'm3u8': HlsFD,
|
'm3u8': FFmpegFD,
|
||||||
'mms': RtspFD,
|
'mms': RtspFD,
|
||||||
'rtsp': RtspFD,
|
'rtsp': RtspFD,
|
||||||
'f4m': F4mFD,
|
'f4m': F4mFD,
|
||||||
|
'http_dash_segments': DashSegmentsFD,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -28,14 +32,20 @@ def get_suitable_downloader(info_dict, params={}):
|
|||||||
protocol = determine_protocol(info_dict)
|
protocol = determine_protocol(info_dict)
|
||||||
info_dict['protocol'] = protocol
|
info_dict['protocol'] = protocol
|
||||||
|
|
||||||
|
# if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict):
|
||||||
|
# return FFmpegFD
|
||||||
|
|
||||||
external_downloader = params.get('external_downloader')
|
external_downloader = params.get('external_downloader')
|
||||||
if external_downloader is not None:
|
if external_downloader is not None:
|
||||||
ed = get_external_downloader(external_downloader)
|
ed = get_external_downloader(external_downloader)
|
||||||
if ed.supports(info_dict):
|
if ed.can_download(info_dict):
|
||||||
return ed
|
return ed
|
||||||
|
|
||||||
if protocol == 'm3u8' and params.get('hls_prefer_native'):
|
if protocol == 'm3u8' and params.get('hls_prefer_native') is True:
|
||||||
return NativeHlsFD
|
return HlsFD
|
||||||
|
|
||||||
|
if protocol == 'm3u8_native' and params.get('hls_prefer_native') is False:
|
||||||
|
return FFmpegFD
|
||||||
|
|
||||||
return PROTOCOL_MAP.get(protocol, HttpFD)
|
return PROTOCOL_MAP.get(protocol, HttpFD)
|
||||||
|
|
||||||
|
|||||||
@@ -5,9 +5,10 @@ import re
|
|||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from ..compat import compat_str
|
from ..compat import compat_os_name
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
|
error_to_compat_str,
|
||||||
decodeArgument,
|
decodeArgument,
|
||||||
format_bytes,
|
format_bytes,
|
||||||
timeconvert,
|
timeconvert,
|
||||||
@@ -42,9 +43,10 @@ class FileDownloader(object):
|
|||||||
min_filesize: Skip files smaller than this size
|
min_filesize: Skip files smaller than this size
|
||||||
max_filesize: Skip files larger than this size
|
max_filesize: Skip files larger than this size
|
||||||
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
|
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
|
||||||
(experimenatal)
|
(experimental)
|
||||||
external_downloader_args: A list of additional command-line arguments for the
|
external_downloader_args: A list of additional command-line arguments for the
|
||||||
external downloader.
|
external downloader.
|
||||||
|
hls_use_mpegts: Use the mpegts container for HLS videos.
|
||||||
|
|
||||||
Subclasses of this one must re-define the real_download method.
|
Subclasses of this one must re-define the real_download method.
|
||||||
"""
|
"""
|
||||||
@@ -113,6 +115,10 @@ class FileDownloader(object):
|
|||||||
return '%10s' % '---b/s'
|
return '%10s' % '---b/s'
|
||||||
return '%10s' % ('%s/s' % format_bytes(speed))
|
return '%10s' % ('%s/s' % format_bytes(speed))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def format_retries(retries):
|
||||||
|
return 'inf' if retries == float('inf') else '%.0f' % retries
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def best_block_size(elapsed_time, bytes):
|
def best_block_size(elapsed_time, bytes):
|
||||||
new_min = max(bytes / 2.0, 1.0)
|
new_min = max(bytes / 2.0, 1.0)
|
||||||
@@ -156,7 +162,7 @@ class FileDownloader(object):
|
|||||||
|
|
||||||
def slow_down(self, start_time, now, byte_counter):
|
def slow_down(self, start_time, now, byte_counter):
|
||||||
"""Sleep if the download speed is over the rate limit."""
|
"""Sleep if the download speed is over the rate limit."""
|
||||||
rate_limit = self.params.get('ratelimit', None)
|
rate_limit = self.params.get('ratelimit')
|
||||||
if rate_limit is None or byte_counter == 0:
|
if rate_limit is None or byte_counter == 0:
|
||||||
return
|
return
|
||||||
if now is None:
|
if now is None:
|
||||||
@@ -186,7 +192,7 @@ class FileDownloader(object):
|
|||||||
return
|
return
|
||||||
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
||||||
except (IOError, OSError) as err:
|
except (IOError, OSError) as err:
|
||||||
self.report_error('unable to rename file: %s' % compat_str(err))
|
self.report_error('unable to rename file: %s' % error_to_compat_str(err))
|
||||||
|
|
||||||
def try_utime(self, filename, last_modified_hdr):
|
def try_utime(self, filename, last_modified_hdr):
|
||||||
"""Try to set the last-modified time of the given file."""
|
"""Try to set the last-modified time of the given file."""
|
||||||
@@ -218,7 +224,7 @@ class FileDownloader(object):
|
|||||||
if self.params.get('progress_with_newline', False):
|
if self.params.get('progress_with_newline', False):
|
||||||
self.to_screen(fullmsg)
|
self.to_screen(fullmsg)
|
||||||
else:
|
else:
|
||||||
if os.name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
prev_len = getattr(self, '_report_progress_prev_line_length',
|
prev_len = getattr(self, '_report_progress_prev_line_length',
|
||||||
0)
|
0)
|
||||||
if prev_len > len(fullmsg):
|
if prev_len > len(fullmsg):
|
||||||
@@ -295,7 +301,9 @@ class FileDownloader(object):
|
|||||||
|
|
||||||
def report_retry(self, count, retries):
|
def report_retry(self, count, retries):
|
||||||
"""Report retry in case of HTTP error 5xx"""
|
"""Report retry in case of HTTP error 5xx"""
|
||||||
self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
|
self.to_screen(
|
||||||
|
'[download] Got server HTTP error. Retrying (attempt %d of %s)...'
|
||||||
|
% (count, self.format_retries(retries)))
|
||||||
|
|
||||||
def report_file_already_downloaded(self, file_name):
|
def report_file_already_downloaded(self, file_name):
|
||||||
"""Report file has already been fully downloaded."""
|
"""Report file has already been fully downloaded."""
|
||||||
@@ -325,7 +333,7 @@ class FileDownloader(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Check file already present
|
# Check file already present
|
||||||
if filename != '-' and nooverwrites_and_exists or continuedl_and_exists:
|
if filename != '-' and (nooverwrites_and_exists or continuedl_and_exists):
|
||||||
self.report_file_already_downloaded(filename)
|
self.report_file_already_downloaded(filename)
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
|
|||||||
81
youtube_dl/downloader/dash.py
Normal file
81
youtube_dl/downloader/dash.py
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .fragment import FragmentFD
|
||||||
|
from ..compat import compat_urllib_error
|
||||||
|
from ..utils import (
|
||||||
|
sanitize_open,
|
||||||
|
encodeFilename,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class DashSegmentsFD(FragmentFD):
|
||||||
|
"""
|
||||||
|
Download segments in a DASH manifest
|
||||||
|
"""
|
||||||
|
|
||||||
|
FD_NAME = 'dashsegments'
|
||||||
|
|
||||||
|
def real_download(self, filename, info_dict):
|
||||||
|
base_url = info_dict['url']
|
||||||
|
segment_urls = [info_dict['segment_urls'][0]] if self.params.get('test', False) else info_dict['segment_urls']
|
||||||
|
initialization_url = info_dict.get('initialization_url')
|
||||||
|
|
||||||
|
ctx = {
|
||||||
|
'filename': filename,
|
||||||
|
'total_frags': len(segment_urls) + (1 if initialization_url else 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
self._prepare_and_start_frag_download(ctx)
|
||||||
|
|
||||||
|
def combine_url(base_url, target_url):
|
||||||
|
if re.match(r'^https?://', target_url):
|
||||||
|
return target_url
|
||||||
|
return '%s%s%s' % (base_url, '' if base_url.endswith('/') else '/', target_url)
|
||||||
|
|
||||||
|
segments_filenames = []
|
||||||
|
|
||||||
|
fragment_retries = self.params.get('fragment_retries', 0)
|
||||||
|
|
||||||
|
def append_url_to_file(target_url, tmp_filename, segment_name):
|
||||||
|
target_filename = '%s-%s' % (tmp_filename, segment_name)
|
||||||
|
count = 0
|
||||||
|
while count <= fragment_retries:
|
||||||
|
try:
|
||||||
|
success = ctx['dl'].download(target_filename, {'url': combine_url(base_url, target_url)})
|
||||||
|
if not success:
|
||||||
|
return False
|
||||||
|
down, target_sanitized = sanitize_open(target_filename, 'rb')
|
||||||
|
ctx['dest_stream'].write(down.read())
|
||||||
|
down.close()
|
||||||
|
segments_filenames.append(target_sanitized)
|
||||||
|
break
|
||||||
|
except (compat_urllib_error.HTTPError, ) as err:
|
||||||
|
# YouTube may often return 404 HTTP error for a fragment causing the
|
||||||
|
# whole download to fail. However if the same fragment is immediately
|
||||||
|
# retried with the same request data this usually succeeds (1-2 attemps
|
||||||
|
# is usually enough) thus allowing to download the whole file successfully.
|
||||||
|
# So, we will retry all fragments that fail with 404 HTTP error for now.
|
||||||
|
if err.code != 404:
|
||||||
|
raise
|
||||||
|
# Retry fragment
|
||||||
|
count += 1
|
||||||
|
if count <= fragment_retries:
|
||||||
|
self.report_retry_fragment(segment_name, count, fragment_retries)
|
||||||
|
if count > fragment_retries:
|
||||||
|
self.report_error('giving up after %s fragment retries' % fragment_retries)
|
||||||
|
return False
|
||||||
|
|
||||||
|
if initialization_url:
|
||||||
|
append_url_to_file(initialization_url, ctx['tmpfilename'], 'Init')
|
||||||
|
for i, segment_url in enumerate(segment_urls):
|
||||||
|
append_url_to_file(segment_url, ctx['tmpfilename'], 'Seg%d' % i)
|
||||||
|
|
||||||
|
self._finish_frag_download(ctx)
|
||||||
|
|
||||||
|
for segment_file in segments_filenames:
|
||||||
|
os.remove(encodeFilename(segment_file))
|
||||||
|
|
||||||
|
return True
|
||||||
@@ -2,11 +2,21 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
import os.path
|
import os.path
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
|
from ..compat import compat_setenv
|
||||||
|
from ..postprocessor.ffmpeg import FFmpegPostProcessor, EXT_TO_OUT_FORMATS
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
cli_option,
|
||||||
|
cli_valueless_option,
|
||||||
|
cli_bool_option,
|
||||||
|
cli_configuration_args,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
encodeArgument,
|
encodeArgument,
|
||||||
|
handle_youtubedl_headers,
|
||||||
|
check_executable,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -41,22 +51,29 @@ class ExternalFD(FileDownloader):
|
|||||||
def exe(self):
|
def exe(self):
|
||||||
return self.params.get('external_downloader')
|
return self.params.get('external_downloader')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def available(cls):
|
||||||
|
return check_executable(cls.get_basename(), [cls.AVAILABLE_OPT])
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def supports(cls, info_dict):
|
def supports(cls, info_dict):
|
||||||
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps')
|
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps')
|
||||||
|
|
||||||
def _source_address(self, command_option):
|
@classmethod
|
||||||
source_address = self.params.get('source_address')
|
def can_download(cls, info_dict):
|
||||||
if source_address is None:
|
return cls.available() and cls.supports(info_dict)
|
||||||
return []
|
|
||||||
return [command_option, source_address]
|
def _option(self, command_option, param):
|
||||||
|
return cli_option(self.params, command_option, param)
|
||||||
|
|
||||||
|
def _bool_option(self, command_option, param, true_value='true', false_value='false', separator=None):
|
||||||
|
return cli_bool_option(self.params, command_option, param, true_value, false_value, separator)
|
||||||
|
|
||||||
|
def _valueless_option(self, command_option, param, expected_value=True):
|
||||||
|
return cli_valueless_option(self.params, command_option, param, expected_value)
|
||||||
|
|
||||||
def _configuration_args(self, default=[]):
|
def _configuration_args(self, default=[]):
|
||||||
ex_args = self.params.get('external_downloader_args')
|
return cli_configuration_args(self.params, 'external_downloader_args', default)
|
||||||
if ex_args is None:
|
|
||||||
return default
|
|
||||||
assert isinstance(ex_args, list)
|
|
||||||
return ex_args
|
|
||||||
|
|
||||||
def _call_downloader(self, tmpfilename, info_dict):
|
def _call_downloader(self, tmpfilename, info_dict):
|
||||||
""" Either overwrite this or implement _make_cmd """
|
""" Either overwrite this or implement _make_cmd """
|
||||||
@@ -73,28 +90,50 @@ class ExternalFD(FileDownloader):
|
|||||||
|
|
||||||
|
|
||||||
class CurlFD(ExternalFD):
|
class CurlFD(ExternalFD):
|
||||||
|
AVAILABLE_OPT = '-V'
|
||||||
|
|
||||||
def _make_cmd(self, tmpfilename, info_dict):
|
def _make_cmd(self, tmpfilename, info_dict):
|
||||||
cmd = [self.exe, '--location', '-o', tmpfilename]
|
cmd = [self.exe, '--location', '-o', tmpfilename]
|
||||||
for key, val in info_dict['http_headers'].items():
|
for key, val in info_dict['http_headers'].items():
|
||||||
cmd += ['--header', '%s: %s' % (key, val)]
|
cmd += ['--header', '%s: %s' % (key, val)]
|
||||||
cmd += self._source_address('--interface')
|
cmd += self._option('--interface', 'source_address')
|
||||||
|
cmd += self._option('--proxy', 'proxy')
|
||||||
|
cmd += self._valueless_option('--insecure', 'nocheckcertificate')
|
||||||
|
cmd += self._configuration_args()
|
||||||
|
cmd += ['--', info_dict['url']]
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
|
class AxelFD(ExternalFD):
|
||||||
|
AVAILABLE_OPT = '-V'
|
||||||
|
|
||||||
|
def _make_cmd(self, tmpfilename, info_dict):
|
||||||
|
cmd = [self.exe, '-o', tmpfilename]
|
||||||
|
for key, val in info_dict['http_headers'].items():
|
||||||
|
cmd += ['-H', '%s: %s' % (key, val)]
|
||||||
cmd += self._configuration_args()
|
cmd += self._configuration_args()
|
||||||
cmd += ['--', info_dict['url']]
|
cmd += ['--', info_dict['url']]
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
class WgetFD(ExternalFD):
|
class WgetFD(ExternalFD):
|
||||||
|
AVAILABLE_OPT = '--version'
|
||||||
|
|
||||||
def _make_cmd(self, tmpfilename, info_dict):
|
def _make_cmd(self, tmpfilename, info_dict):
|
||||||
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies']
|
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies']
|
||||||
for key, val in info_dict['http_headers'].items():
|
for key, val in info_dict['http_headers'].items():
|
||||||
cmd += ['--header', '%s: %s' % (key, val)]
|
cmd += ['--header', '%s: %s' % (key, val)]
|
||||||
cmd += self._source_address('--bind-address')
|
cmd += self._option('--bind-address', 'source_address')
|
||||||
|
cmd += self._option('--proxy', 'proxy')
|
||||||
|
cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate')
|
||||||
cmd += self._configuration_args()
|
cmd += self._configuration_args()
|
||||||
cmd += ['--', info_dict['url']]
|
cmd += ['--', info_dict['url']]
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
class Aria2cFD(ExternalFD):
|
class Aria2cFD(ExternalFD):
|
||||||
|
AVAILABLE_OPT = '-v'
|
||||||
|
|
||||||
def _make_cmd(self, tmpfilename, info_dict):
|
def _make_cmd(self, tmpfilename, info_dict):
|
||||||
cmd = [self.exe, '-c']
|
cmd = [self.exe, '-c']
|
||||||
cmd += self._configuration_args([
|
cmd += self._configuration_args([
|
||||||
@@ -105,10 +144,132 @@ class Aria2cFD(ExternalFD):
|
|||||||
cmd += ['--out', os.path.basename(tmpfilename)]
|
cmd += ['--out', os.path.basename(tmpfilename)]
|
||||||
for key, val in info_dict['http_headers'].items():
|
for key, val in info_dict['http_headers'].items():
|
||||||
cmd += ['--header', '%s: %s' % (key, val)]
|
cmd += ['--header', '%s: %s' % (key, val)]
|
||||||
cmd += self._source_address('--interface')
|
cmd += self._option('--interface', 'source_address')
|
||||||
|
cmd += self._option('--all-proxy', 'proxy')
|
||||||
|
cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
|
||||||
cmd += ['--', info_dict['url']]
|
cmd += ['--', info_dict['url']]
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
|
class HttpieFD(ExternalFD):
|
||||||
|
@classmethod
|
||||||
|
def available(cls):
|
||||||
|
return check_executable('http', ['--version'])
|
||||||
|
|
||||||
|
def _make_cmd(self, tmpfilename, info_dict):
|
||||||
|
cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']]
|
||||||
|
for key, val in info_dict['http_headers'].items():
|
||||||
|
cmd += ['%s:%s' % (key, val)]
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
|
class FFmpegFD(ExternalFD):
|
||||||
|
@classmethod
|
||||||
|
def supports(cls, info_dict):
|
||||||
|
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps', 'm3u8', 'rtsp', 'rtmp', 'mms')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def available(cls):
|
||||||
|
return FFmpegPostProcessor().available
|
||||||
|
|
||||||
|
def _call_downloader(self, tmpfilename, info_dict):
|
||||||
|
url = info_dict['url']
|
||||||
|
ffpp = FFmpegPostProcessor(downloader=self)
|
||||||
|
if not ffpp.available:
|
||||||
|
self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
|
||||||
|
return False
|
||||||
|
ffpp.check_version()
|
||||||
|
|
||||||
|
args = [ffpp.executable, '-y']
|
||||||
|
|
||||||
|
args += self._configuration_args()
|
||||||
|
|
||||||
|
# start_time = info_dict.get('start_time') or 0
|
||||||
|
# if start_time:
|
||||||
|
# args += ['-ss', compat_str(start_time)]
|
||||||
|
# end_time = info_dict.get('end_time')
|
||||||
|
# if end_time:
|
||||||
|
# args += ['-t', compat_str(end_time - start_time)]
|
||||||
|
|
||||||
|
if info_dict['http_headers'] and re.match(r'^https?://', url):
|
||||||
|
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
||||||
|
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||||
|
headers = handle_youtubedl_headers(info_dict['http_headers'])
|
||||||
|
args += [
|
||||||
|
'-headers',
|
||||||
|
''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())]
|
||||||
|
|
||||||
|
env = None
|
||||||
|
proxy = self.params.get('proxy')
|
||||||
|
if proxy:
|
||||||
|
if not re.match(r'^[\da-zA-Z]+://', proxy):
|
||||||
|
proxy = 'http://%s' % proxy
|
||||||
|
# Since December 2015 ffmpeg supports -http_proxy option (see
|
||||||
|
# http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd)
|
||||||
|
# We could switch to the following code if we are able to detect version properly
|
||||||
|
# args += ['-http_proxy', proxy]
|
||||||
|
env = os.environ.copy()
|
||||||
|
compat_setenv('HTTP_PROXY', proxy, env=env)
|
||||||
|
|
||||||
|
protocol = info_dict.get('protocol')
|
||||||
|
|
||||||
|
if protocol == 'rtmp':
|
||||||
|
player_url = info_dict.get('player_url')
|
||||||
|
page_url = info_dict.get('page_url')
|
||||||
|
app = info_dict.get('app')
|
||||||
|
play_path = info_dict.get('play_path')
|
||||||
|
tc_url = info_dict.get('tc_url')
|
||||||
|
flash_version = info_dict.get('flash_version')
|
||||||
|
live = info_dict.get('rtmp_live', False)
|
||||||
|
if player_url is not None:
|
||||||
|
args += ['-rtmp_swfverify', player_url]
|
||||||
|
if page_url is not None:
|
||||||
|
args += ['-rtmp_pageurl', page_url]
|
||||||
|
if app is not None:
|
||||||
|
args += ['-rtmp_app', app]
|
||||||
|
if play_path is not None:
|
||||||
|
args += ['-rtmp_playpath', play_path]
|
||||||
|
if tc_url is not None:
|
||||||
|
args += ['-rtmp_tcurl', tc_url]
|
||||||
|
if flash_version is not None:
|
||||||
|
args += ['-rtmp_flashver', flash_version]
|
||||||
|
if live:
|
||||||
|
args += ['-rtmp_live', 'live']
|
||||||
|
|
||||||
|
args += ['-i', url, '-c', 'copy']
|
||||||
|
if protocol in ('m3u8', 'm3u8_native'):
|
||||||
|
if self.params.get('hls_use_mpegts', False) or tmpfilename == '-':
|
||||||
|
args += ['-f', 'mpegts']
|
||||||
|
else:
|
||||||
|
args += ['-f', 'mp4', '-bsf:a', 'aac_adtstoasc']
|
||||||
|
elif protocol == 'rtmp':
|
||||||
|
args += ['-f', 'flv']
|
||||||
|
else:
|
||||||
|
args += ['-f', EXT_TO_OUT_FORMATS.get(info_dict['ext'], info_dict['ext'])]
|
||||||
|
|
||||||
|
args = [encodeArgument(opt) for opt in args]
|
||||||
|
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
|
||||||
|
|
||||||
|
self._debug_cmd(args)
|
||||||
|
|
||||||
|
proc = subprocess.Popen(args, stdin=subprocess.PIPE, env=env)
|
||||||
|
try:
|
||||||
|
retval = proc.wait()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
# subprocces.run would send the SIGKILL signal to ffmpeg and the
|
||||||
|
# mp4 file couldn't be played, but if we ask ffmpeg to quit it
|
||||||
|
# produces a file that is playable (this is mostly useful for live
|
||||||
|
# streams). Note that Windows is not affected and produces playable
|
||||||
|
# files (see https://github.com/rg3/youtube-dl/issues/8300).
|
||||||
|
if sys.platform != 'win32':
|
||||||
|
proc.communicate(b'q')
|
||||||
|
raise
|
||||||
|
return retval
|
||||||
|
|
||||||
|
|
||||||
|
class AVconvFD(FFmpegFD):
|
||||||
|
pass
|
||||||
|
|
||||||
_BY_NAME = dict(
|
_BY_NAME = dict(
|
||||||
(klass.get_basename(), klass)
|
(klass.get_basename(), klass)
|
||||||
for name, klass in globals().items()
|
for name, klass in globals().items()
|
||||||
@@ -123,5 +284,6 @@ def list_external_downloaders():
|
|||||||
def get_external_downloader(external_downloader):
|
def get_external_downloader(external_downloader):
|
||||||
""" Given the name of the executable, see whether we support the given
|
""" Given the name of the executable, see whether we support the given
|
||||||
downloader . """
|
downloader . """
|
||||||
bn = os.path.basename(external_downloader)
|
# Drop .exe extension on Windows
|
||||||
|
bn = os.path.splitext(os.path.basename(external_downloader))[0]
|
||||||
return _BY_NAME[bn]
|
return _BY_NAME[bn]
|
||||||
|
|||||||
@@ -5,43 +5,56 @@ import io
|
|||||||
import itertools
|
import itertools
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import xml.etree.ElementTree as etree
|
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .fragment import FragmentFD
|
||||||
from .http import HttpFD
|
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
|
compat_etree_fromstring,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
|
compat_urllib_parse_urlparse,
|
||||||
|
compat_struct_pack,
|
||||||
|
compat_struct_unpack,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
struct_pack,
|
|
||||||
struct_unpack,
|
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
|
fix_xml_ampersands,
|
||||||
sanitize_open,
|
sanitize_open,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class DataTruncatedError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class FlvReader(io.BytesIO):
|
class FlvReader(io.BytesIO):
|
||||||
"""
|
"""
|
||||||
Reader for Flv files
|
Reader for Flv files
|
||||||
The file format is documented in https://www.adobe.com/devnet/f4v.html
|
The file format is documented in https://www.adobe.com/devnet/f4v.html
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def read_bytes(self, n):
|
||||||
|
data = self.read(n)
|
||||||
|
if len(data) < n:
|
||||||
|
raise DataTruncatedError(
|
||||||
|
'FlvReader error: need %d bytes while only %d bytes got' % (
|
||||||
|
n, len(data)))
|
||||||
|
return data
|
||||||
|
|
||||||
# Utility functions for reading numbers and strings
|
# Utility functions for reading numbers and strings
|
||||||
def read_unsigned_long_long(self):
|
def read_unsigned_long_long(self):
|
||||||
return struct_unpack('!Q', self.read(8))[0]
|
return compat_struct_unpack('!Q', self.read_bytes(8))[0]
|
||||||
|
|
||||||
def read_unsigned_int(self):
|
def read_unsigned_int(self):
|
||||||
return struct_unpack('!I', self.read(4))[0]
|
return compat_struct_unpack('!I', self.read_bytes(4))[0]
|
||||||
|
|
||||||
def read_unsigned_char(self):
|
def read_unsigned_char(self):
|
||||||
return struct_unpack('!B', self.read(1))[0]
|
return compat_struct_unpack('!B', self.read_bytes(1))[0]
|
||||||
|
|
||||||
def read_string(self):
|
def read_string(self):
|
||||||
res = b''
|
res = b''
|
||||||
while True:
|
while True:
|
||||||
char = self.read(1)
|
char = self.read_bytes(1)
|
||||||
if char == b'\x00':
|
if char == b'\x00':
|
||||||
break
|
break
|
||||||
res += char
|
res += char
|
||||||
@@ -52,18 +65,18 @@ class FlvReader(io.BytesIO):
|
|||||||
Read a box and return the info as a tuple: (box_size, box_type, box_data)
|
Read a box and return the info as a tuple: (box_size, box_type, box_data)
|
||||||
"""
|
"""
|
||||||
real_size = size = self.read_unsigned_int()
|
real_size = size = self.read_unsigned_int()
|
||||||
box_type = self.read(4)
|
box_type = self.read_bytes(4)
|
||||||
header_end = 8
|
header_end = 8
|
||||||
if size == 1:
|
if size == 1:
|
||||||
real_size = self.read_unsigned_long_long()
|
real_size = self.read_unsigned_long_long()
|
||||||
header_end = 16
|
header_end = 16
|
||||||
return real_size, box_type, self.read(real_size - header_end)
|
return real_size, box_type, self.read_bytes(real_size - header_end)
|
||||||
|
|
||||||
def read_asrt(self):
|
def read_asrt(self):
|
||||||
# version
|
# version
|
||||||
self.read_unsigned_char()
|
self.read_unsigned_char()
|
||||||
# flags
|
# flags
|
||||||
self.read(3)
|
self.read_bytes(3)
|
||||||
quality_entry_count = self.read_unsigned_char()
|
quality_entry_count = self.read_unsigned_char()
|
||||||
# QualityEntryCount
|
# QualityEntryCount
|
||||||
for i in range(quality_entry_count):
|
for i in range(quality_entry_count):
|
||||||
@@ -84,7 +97,7 @@ class FlvReader(io.BytesIO):
|
|||||||
# version
|
# version
|
||||||
self.read_unsigned_char()
|
self.read_unsigned_char()
|
||||||
# flags
|
# flags
|
||||||
self.read(3)
|
self.read_bytes(3)
|
||||||
# time scale
|
# time scale
|
||||||
self.read_unsigned_int()
|
self.read_unsigned_int()
|
||||||
|
|
||||||
@@ -118,7 +131,7 @@ class FlvReader(io.BytesIO):
|
|||||||
# version
|
# version
|
||||||
self.read_unsigned_char()
|
self.read_unsigned_char()
|
||||||
# flags
|
# flags
|
||||||
self.read(3)
|
self.read_bytes(3)
|
||||||
|
|
||||||
self.read_unsigned_int() # BootstrapinfoVersion
|
self.read_unsigned_int() # BootstrapinfoVersion
|
||||||
# Profile,Live,Update,Reserved
|
# Profile,Live,Update,Reserved
|
||||||
@@ -193,11 +206,11 @@ def build_fragments_list(boot_info):
|
|||||||
|
|
||||||
|
|
||||||
def write_unsigned_int(stream, val):
|
def write_unsigned_int(stream, val):
|
||||||
stream.write(struct_pack('!I', val))
|
stream.write(compat_struct_pack('!I', val))
|
||||||
|
|
||||||
|
|
||||||
def write_unsigned_int_24(stream, val):
|
def write_unsigned_int_24(stream, val):
|
||||||
stream.write(struct_pack('!I', val)[1:])
|
stream.write(compat_struct_pack('!I', val)[1:])
|
||||||
|
|
||||||
|
|
||||||
def write_flv_header(stream):
|
def write_flv_header(stream):
|
||||||
@@ -222,20 +235,23 @@ def write_metadata_tag(stream, metadata):
|
|||||||
write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
|
write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
|
||||||
|
|
||||||
|
|
||||||
|
def remove_encrypted_media(media):
|
||||||
|
return list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
|
||||||
|
'drmAdditionalHeaderSetId' not in e.attrib,
|
||||||
|
media))
|
||||||
|
|
||||||
|
|
||||||
def _add_ns(prop):
|
def _add_ns(prop):
|
||||||
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
|
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
|
||||||
|
|
||||||
|
|
||||||
class HttpQuietDownloader(HttpFD):
|
class F4mFD(FragmentFD):
|
||||||
def to_screen(self, *args, **kargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class F4mFD(FileDownloader):
|
|
||||||
"""
|
"""
|
||||||
A downloader for f4m manifests or AdobeHDS.
|
A downloader for f4m manifests or AdobeHDS.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
FD_NAME = 'f4m'
|
||||||
|
|
||||||
def _get_unencrypted_media(self, doc):
|
def _get_unencrypted_media(self, doc):
|
||||||
media = doc.findall(_add_ns('media'))
|
media = doc.findall(_add_ns('media'))
|
||||||
if not media:
|
if not media:
|
||||||
@@ -246,9 +262,7 @@ class F4mFD(FileDownloader):
|
|||||||
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
|
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
|
||||||
if 'id' not in e.attrib:
|
if 'id' not in e.attrib:
|
||||||
self.report_error('Missing ID in f4m DRM')
|
self.report_error('Missing ID in f4m DRM')
|
||||||
media = list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
|
media = remove_encrypted_media(media)
|
||||||
'drmAdditionalHeaderSetId' not in e.attrib,
|
|
||||||
media))
|
|
||||||
if not media:
|
if not media:
|
||||||
self.report_error('Unsupported DRM')
|
self.report_error('Unsupported DRM')
|
||||||
return media
|
return media
|
||||||
@@ -275,26 +289,37 @@ class F4mFD(FileDownloader):
|
|||||||
return fragments_list
|
return fragments_list
|
||||||
|
|
||||||
def _parse_bootstrap_node(self, node, base_url):
|
def _parse_bootstrap_node(self, node, base_url):
|
||||||
if node.text is None:
|
# Sometimes non empty inline bootstrap info can be specified along
|
||||||
|
# with bootstrap url attribute (e.g. dummy inline bootstrap info
|
||||||
|
# contains whitespace characters in [1]). We will prefer bootstrap
|
||||||
|
# url over inline bootstrap info when present.
|
||||||
|
# 1. http://live-1-1.rutube.ru/stream/1024/HDS/SD/C2NKsS85HQNckgn5HdEmOQ/1454167650/S-s604419906/move/four/dirs/upper/1024-576p.f4m
|
||||||
|
bootstrap_url = node.get('url')
|
||||||
|
if bootstrap_url:
|
||||||
bootstrap_url = compat_urlparse.urljoin(
|
bootstrap_url = compat_urlparse.urljoin(
|
||||||
base_url, node.attrib['url'])
|
base_url, bootstrap_url)
|
||||||
boot_info = self._get_bootstrap_from_url(bootstrap_url)
|
boot_info = self._get_bootstrap_from_url(bootstrap_url)
|
||||||
else:
|
else:
|
||||||
bootstrap_url = None
|
bootstrap_url = None
|
||||||
bootstrap = base64.b64decode(node.text.encode('ascii'))
|
bootstrap = base64.b64decode(node.text.encode('ascii'))
|
||||||
boot_info = read_bootstrap_info(bootstrap)
|
boot_info = read_bootstrap_info(bootstrap)
|
||||||
return (boot_info, bootstrap_url)
|
return boot_info, bootstrap_url
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
man_url = info_dict['url']
|
man_url = info_dict['url']
|
||||||
requested_bitrate = info_dict.get('tbr')
|
requested_bitrate = info_dict.get('tbr')
|
||||||
self.to_screen('[download] Downloading f4m manifest')
|
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
|
||||||
manifest = self.ydl.urlopen(man_url).read()
|
urlh = self.ydl.urlopen(man_url)
|
||||||
|
man_url = urlh.geturl()
|
||||||
|
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
|
||||||
|
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244
|
||||||
|
# and https://github.com/rg3/youtube-dl/issues/7823)
|
||||||
|
manifest = fix_xml_ampersands(urlh.read().decode('utf-8', 'ignore')).strip()
|
||||||
|
|
||||||
doc = etree.fromstring(manifest)
|
doc = compat_etree_fromstring(manifest)
|
||||||
formats = [(int(f.attrib.get('bitrate', -1)), f)
|
formats = [(int(f.attrib.get('bitrate', -1)), f)
|
||||||
for f in self._get_unencrypted_media(doc)]
|
for f in self._get_unencrypted_media(doc)]
|
||||||
if requested_bitrate is None:
|
if requested_bitrate is None or len(formats) == 1:
|
||||||
# get the best format
|
# get the best format
|
||||||
formats = sorted(formats, key=lambda f: f[0])
|
formats = sorted(formats, key=lambda f: f[0])
|
||||||
rate, media = formats[-1]
|
rate, media = formats[-1]
|
||||||
@@ -313,101 +338,72 @@ class F4mFD(FileDownloader):
|
|||||||
metadata = None
|
metadata = None
|
||||||
|
|
||||||
fragments_list = build_fragments_list(boot_info)
|
fragments_list = build_fragments_list(boot_info)
|
||||||
if self.params.get('test', False):
|
test = self.params.get('test', False)
|
||||||
|
if test:
|
||||||
# We only download the first fragment
|
# We only download the first fragment
|
||||||
fragments_list = fragments_list[:1]
|
fragments_list = fragments_list[:1]
|
||||||
total_frags = len(fragments_list)
|
total_frags = len(fragments_list)
|
||||||
# For some akamai manifests we'll need to add a query to the fragment url
|
# For some akamai manifests we'll need to add a query to the fragment url
|
||||||
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
|
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
|
||||||
|
|
||||||
self.report_destination(filename)
|
ctx = {
|
||||||
http_dl = HttpQuietDownloader(
|
'filename': filename,
|
||||||
self.ydl,
|
'total_frags': total_frags,
|
||||||
{
|
'live': live,
|
||||||
'continuedl': True,
|
}
|
||||||
'quiet': True,
|
|
||||||
'noprogress': True,
|
self._prepare_frag_download(ctx)
|
||||||
'ratelimit': self.params.get('ratelimit', None),
|
|
||||||
'test': self.params.get('test', False),
|
dest_stream = ctx['dest_stream']
|
||||||
}
|
|
||||||
)
|
|
||||||
tmpfilename = self.temp_name(filename)
|
|
||||||
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
|
|
||||||
|
|
||||||
write_flv_header(dest_stream)
|
write_flv_header(dest_stream)
|
||||||
if not live:
|
if not live:
|
||||||
write_metadata_tag(dest_stream, metadata)
|
write_metadata_tag(dest_stream, metadata)
|
||||||
|
|
||||||
# This dict stores the download progress, it's updated by the progress
|
base_url_parsed = compat_urllib_parse_urlparse(base_url)
|
||||||
# hook
|
|
||||||
state = {
|
|
||||||
'status': 'downloading',
|
|
||||||
'downloaded_bytes': 0,
|
|
||||||
'frag_index': 0,
|
|
||||||
'frag_count': total_frags,
|
|
||||||
'filename': filename,
|
|
||||||
'tmpfilename': tmpfilename,
|
|
||||||
}
|
|
||||||
start = time.time()
|
|
||||||
|
|
||||||
def frag_progress_hook(s):
|
self._start_frag_download(ctx)
|
||||||
if s['status'] not in ('downloading', 'finished'):
|
|
||||||
return
|
|
||||||
|
|
||||||
frag_total_bytes = s.get('total_bytes', 0)
|
|
||||||
if s['status'] == 'finished':
|
|
||||||
state['downloaded_bytes'] += frag_total_bytes
|
|
||||||
state['frag_index'] += 1
|
|
||||||
|
|
||||||
estimated_size = (
|
|
||||||
(state['downloaded_bytes'] + frag_total_bytes) /
|
|
||||||
(state['frag_index'] + 1) * total_frags)
|
|
||||||
time_now = time.time()
|
|
||||||
state['total_bytes_estimate'] = estimated_size
|
|
||||||
state['elapsed'] = time_now - start
|
|
||||||
|
|
||||||
if s['status'] == 'finished':
|
|
||||||
progress = self.calc_percent(state['frag_index'], total_frags)
|
|
||||||
else:
|
|
||||||
frag_downloaded_bytes = s['downloaded_bytes']
|
|
||||||
frag_progress = self.calc_percent(frag_downloaded_bytes,
|
|
||||||
frag_total_bytes)
|
|
||||||
progress = self.calc_percent(state['frag_index'], total_frags)
|
|
||||||
progress += frag_progress / float(total_frags)
|
|
||||||
|
|
||||||
state['eta'] = self.calc_eta(
|
|
||||||
start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
|
|
||||||
state['speed'] = s.get('speed')
|
|
||||||
self._hook_progress(state)
|
|
||||||
|
|
||||||
http_dl.add_progress_hook(frag_progress_hook)
|
|
||||||
|
|
||||||
frags_filenames = []
|
frags_filenames = []
|
||||||
while fragments_list:
|
while fragments_list:
|
||||||
seg_i, frag_i = fragments_list.pop(0)
|
seg_i, frag_i = fragments_list.pop(0)
|
||||||
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
|
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
|
||||||
url = base_url + name
|
query = []
|
||||||
|
if base_url_parsed.query:
|
||||||
|
query.append(base_url_parsed.query)
|
||||||
if akamai_pv:
|
if akamai_pv:
|
||||||
url += '?' + akamai_pv.strip(';')
|
query.append(akamai_pv.strip(';'))
|
||||||
if info_dict.get('extra_param_to_segment_url'):
|
if info_dict.get('extra_param_to_segment_url'):
|
||||||
url += info_dict.get('extra_param_to_segment_url')
|
query.append(info_dict['extra_param_to_segment_url'])
|
||||||
frag_filename = '%s-%s' % (tmpfilename, name)
|
url_parsed = base_url_parsed._replace(path=base_url_parsed.path + name, query='&'.join(query))
|
||||||
|
frag_filename = '%s-%s' % (ctx['tmpfilename'], name)
|
||||||
try:
|
try:
|
||||||
success = http_dl.download(frag_filename, {'url': url})
|
success = ctx['dl'].download(frag_filename, {'url': url_parsed.geturl()})
|
||||||
if not success:
|
if not success:
|
||||||
return False
|
return False
|
||||||
with open(frag_filename, 'rb') as down:
|
(down, frag_sanitized) = sanitize_open(frag_filename, 'rb')
|
||||||
down_data = down.read()
|
down_data = down.read()
|
||||||
reader = FlvReader(down_data)
|
down.close()
|
||||||
while True:
|
reader = FlvReader(down_data)
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
_, box_type, box_data = reader.read_box_info()
|
_, box_type, box_data = reader.read_box_info()
|
||||||
if box_type == b'mdat':
|
except DataTruncatedError:
|
||||||
dest_stream.write(box_data)
|
if test:
|
||||||
|
# In tests, segments may be truncated, and thus
|
||||||
|
# FlvReader may not be able to parse the whole
|
||||||
|
# chunk. If so, write the segment as is
|
||||||
|
# See https://github.com/rg3/youtube-dl/issues/9214
|
||||||
|
dest_stream.write(down_data)
|
||||||
break
|
break
|
||||||
|
raise
|
||||||
|
if box_type == b'mdat':
|
||||||
|
dest_stream.write(box_data)
|
||||||
|
break
|
||||||
if live:
|
if live:
|
||||||
os.remove(frag_filename)
|
os.remove(encodeFilename(frag_sanitized))
|
||||||
else:
|
else:
|
||||||
frags_filenames.append(frag_filename)
|
frags_filenames.append(frag_sanitized)
|
||||||
except (compat_urllib_error.HTTPError, ) as err:
|
except (compat_urllib_error.HTTPError, ) as err:
|
||||||
if live and (err.code == 404 or err.code == 410):
|
if live and (err.code == 404 or err.code == 410):
|
||||||
# We didn't keep up with the live window. Continue
|
# We didn't keep up with the live window. Continue
|
||||||
@@ -418,27 +414,16 @@ class F4mFD(FileDownloader):
|
|||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
if not fragments_list and live and bootstrap_url:
|
if not fragments_list and not test and live and bootstrap_url:
|
||||||
fragments_list = self._update_live_fragments(bootstrap_url, frag_i)
|
fragments_list = self._update_live_fragments(bootstrap_url, frag_i)
|
||||||
total_frags += len(fragments_list)
|
total_frags += len(fragments_list)
|
||||||
if fragments_list and (fragments_list[0][1] > frag_i + 1):
|
if fragments_list and (fragments_list[0][1] > frag_i + 1):
|
||||||
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
|
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
|
||||||
self.report_warning(msg)
|
self.report_warning(msg)
|
||||||
|
|
||||||
dest_stream.close()
|
self._finish_frag_download(ctx)
|
||||||
|
|
||||||
elapsed = time.time() - start
|
|
||||||
self.try_rename(tmpfilename, filename)
|
|
||||||
for frag_file in frags_filenames:
|
for frag_file in frags_filenames:
|
||||||
os.remove(frag_file)
|
os.remove(encodeFilename(frag_file))
|
||||||
|
|
||||||
fsize = os.path.getsize(encodeFilename(filename))
|
|
||||||
self._hook_progress({
|
|
||||||
'downloaded_bytes': fsize,
|
|
||||||
'total_bytes': fsize,
|
|
||||||
'filename': filename,
|
|
||||||
'status': 'finished',
|
|
||||||
'elapsed': elapsed,
|
|
||||||
})
|
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|||||||
132
youtube_dl/downloader/fragment.py
Normal file
132
youtube_dl/downloader/fragment.py
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
from __future__ import division, unicode_literals
|
||||||
|
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
|
||||||
|
from .common import FileDownloader
|
||||||
|
from .http import HttpFD
|
||||||
|
from ..utils import (
|
||||||
|
encodeFilename,
|
||||||
|
sanitize_open,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class HttpQuietDownloader(HttpFD):
|
||||||
|
def to_screen(self, *args, **kargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class FragmentFD(FileDownloader):
|
||||||
|
"""
|
||||||
|
A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests).
|
||||||
|
|
||||||
|
Available options:
|
||||||
|
|
||||||
|
fragment_retries: Number of times to retry a fragment for HTTP error (DASH only)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def report_retry_fragment(self, fragment_name, count, retries):
|
||||||
|
self.to_screen(
|
||||||
|
'[download] Got server HTTP error. Retrying fragment %s (attempt %d of %s)...'
|
||||||
|
% (fragment_name, count, self.format_retries(retries)))
|
||||||
|
|
||||||
|
def _prepare_and_start_frag_download(self, ctx):
|
||||||
|
self._prepare_frag_download(ctx)
|
||||||
|
self._start_frag_download(ctx)
|
||||||
|
|
||||||
|
def _prepare_frag_download(self, ctx):
|
||||||
|
if 'live' not in ctx:
|
||||||
|
ctx['live'] = False
|
||||||
|
self.to_screen(
|
||||||
|
'[%s] Total fragments: %s'
|
||||||
|
% (self.FD_NAME, ctx['total_frags'] if not ctx['live'] else 'unknown (live)'))
|
||||||
|
self.report_destination(ctx['filename'])
|
||||||
|
dl = HttpQuietDownloader(
|
||||||
|
self.ydl,
|
||||||
|
{
|
||||||
|
'continuedl': True,
|
||||||
|
'quiet': True,
|
||||||
|
'noprogress': True,
|
||||||
|
'ratelimit': self.params.get('ratelimit'),
|
||||||
|
'retries': self.params.get('retries', 0),
|
||||||
|
'test': self.params.get('test', False),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
tmpfilename = self.temp_name(ctx['filename'])
|
||||||
|
dest_stream, tmpfilename = sanitize_open(tmpfilename, 'wb')
|
||||||
|
ctx.update({
|
||||||
|
'dl': dl,
|
||||||
|
'dest_stream': dest_stream,
|
||||||
|
'tmpfilename': tmpfilename,
|
||||||
|
})
|
||||||
|
|
||||||
|
def _start_frag_download(self, ctx):
|
||||||
|
total_frags = ctx['total_frags']
|
||||||
|
# This dict stores the download progress, it's updated by the progress
|
||||||
|
# hook
|
||||||
|
state = {
|
||||||
|
'status': 'downloading',
|
||||||
|
'downloaded_bytes': 0,
|
||||||
|
'frag_index': 0,
|
||||||
|
'frag_count': total_frags,
|
||||||
|
'filename': ctx['filename'],
|
||||||
|
'tmpfilename': ctx['tmpfilename'],
|
||||||
|
}
|
||||||
|
|
||||||
|
start = time.time()
|
||||||
|
ctx.update({
|
||||||
|
'started': start,
|
||||||
|
# Total complete fragments downloaded so far in bytes
|
||||||
|
'complete_frags_downloaded_bytes': 0,
|
||||||
|
# Amount of fragment's bytes downloaded by the time of the previous
|
||||||
|
# frag progress hook invocation
|
||||||
|
'prev_frag_downloaded_bytes': 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
def frag_progress_hook(s):
|
||||||
|
if s['status'] not in ('downloading', 'finished'):
|
||||||
|
return
|
||||||
|
|
||||||
|
time_now = time.time()
|
||||||
|
state['elapsed'] = time_now - start
|
||||||
|
frag_total_bytes = s.get('total_bytes') or 0
|
||||||
|
if not ctx['live']:
|
||||||
|
estimated_size = (
|
||||||
|
(ctx['complete_frags_downloaded_bytes'] + frag_total_bytes) /
|
||||||
|
(state['frag_index'] + 1) * total_frags)
|
||||||
|
state['total_bytes_estimate'] = estimated_size
|
||||||
|
|
||||||
|
if s['status'] == 'finished':
|
||||||
|
state['frag_index'] += 1
|
||||||
|
state['downloaded_bytes'] += frag_total_bytes - ctx['prev_frag_downloaded_bytes']
|
||||||
|
ctx['complete_frags_downloaded_bytes'] = state['downloaded_bytes']
|
||||||
|
ctx['prev_frag_downloaded_bytes'] = 0
|
||||||
|
else:
|
||||||
|
frag_downloaded_bytes = s['downloaded_bytes']
|
||||||
|
state['downloaded_bytes'] += frag_downloaded_bytes - ctx['prev_frag_downloaded_bytes']
|
||||||
|
if not ctx['live']:
|
||||||
|
state['eta'] = self.calc_eta(
|
||||||
|
start, time_now, estimated_size,
|
||||||
|
state['downloaded_bytes'])
|
||||||
|
state['speed'] = s.get('speed') or ctx.get('speed')
|
||||||
|
ctx['speed'] = state['speed']
|
||||||
|
ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
|
||||||
|
self._hook_progress(state)
|
||||||
|
|
||||||
|
ctx['dl'].add_progress_hook(frag_progress_hook)
|
||||||
|
|
||||||
|
return start
|
||||||
|
|
||||||
|
def _finish_frag_download(self, ctx):
|
||||||
|
ctx['dest_stream'].close()
|
||||||
|
elapsed = time.time() - ctx['started']
|
||||||
|
self.try_rename(ctx['tmpfilename'], ctx['filename'])
|
||||||
|
fsize = os.path.getsize(encodeFilename(ctx['filename']))
|
||||||
|
|
||||||
|
self._hook_progress({
|
||||||
|
'downloaded_bytes': fsize,
|
||||||
|
'total_bytes': fsize,
|
||||||
|
'filename': ctx['filename'],
|
||||||
|
'status': 'finished',
|
||||||
|
'elapsed': elapsed,
|
||||||
|
})
|
||||||
@@ -1,104 +1,90 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import os
|
import os.path
|
||||||
import re
|
import re
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from ..postprocessor.ffmpeg import FFmpegPostProcessor
|
from .fragment import FragmentFD
|
||||||
from .common import FileDownloader
|
from .external import FFmpegFD
|
||||||
from ..compat import (
|
|
||||||
compat_urlparse,
|
from ..compat import compat_urlparse
|
||||||
compat_urllib_request,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
encodeArgument,
|
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
|
sanitize_open,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class HlsFD(FileDownloader):
|
class HlsFD(FragmentFD):
|
||||||
def real_download(self, filename, info_dict):
|
""" A limited implementation that does not require ffmpeg """
|
||||||
url = info_dict['url']
|
|
||||||
self.report_destination(filename)
|
|
||||||
tmpfilename = self.temp_name(filename)
|
|
||||||
|
|
||||||
ffpp = FFmpegPostProcessor(downloader=self)
|
FD_NAME = 'hlsnative'
|
||||||
if not ffpp.available:
|
|
||||||
self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
|
|
||||||
return False
|
|
||||||
ffpp.check_version()
|
|
||||||
|
|
||||||
args = [
|
@staticmethod
|
||||||
encodeArgument(opt)
|
def can_download(manifest):
|
||||||
for opt in (ffpp.executable, '-y', '-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc')]
|
UNSUPPORTED_FEATURES = (
|
||||||
args.append(encodeFilename(tmpfilename, True))
|
r'#EXT-X-KEY:METHOD=(?!NONE)', # encrypted streams [1]
|
||||||
|
r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [2]
|
||||||
retval = subprocess.call(args)
|
# Live streams heuristic does not always work (e.g. geo restricted to Germany
|
||||||
if retval == 0:
|
# http://hls-geo.daserste.de/i/videoportal/Film/c_620000/622873/format,716451,716457,716450,716458,716459,.mp4.csmil/index_4_av.m3u8?null=0)
|
||||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
# r'#EXT-X-MEDIA-SEQUENCE:(?!0$)', # live streams [3]
|
||||||
self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
|
r'#EXT-X-PLAYLIST-TYPE:EVENT', # media segments may be appended to the end of
|
||||||
self.try_rename(tmpfilename, filename)
|
# event media playlists [4]
|
||||||
self._hook_progress({
|
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.4
|
||||||
'downloaded_bytes': fsize,
|
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.2
|
||||||
'total_bytes': fsize,
|
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.2
|
||||||
'filename': filename,
|
# 4. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.5
|
||||||
'status': 'finished',
|
)
|
||||||
})
|
return all(not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES)
|
||||||
return True
|
|
||||||
else:
|
|
||||||
self.to_stderr('\n')
|
|
||||||
self.report_error('%s exited with code %d' % (ffpp.basename, retval))
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class NativeHlsFD(FileDownloader):
|
|
||||||
""" A more limited implementation that does not require ffmpeg """
|
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
url = info_dict['url']
|
man_url = info_dict['url']
|
||||||
self.report_destination(filename)
|
self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
|
||||||
tmpfilename = self.temp_name(filename)
|
manifest = self.ydl.urlopen(man_url).read()
|
||||||
|
|
||||||
self.to_screen(
|
s = manifest.decode('utf-8', 'ignore')
|
||||||
'[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id'])
|
|
||||||
data = self.ydl.urlopen(url).read()
|
if not self.can_download(s):
|
||||||
s = data.decode('utf-8', 'ignore')
|
self.report_warning(
|
||||||
segment_urls = []
|
'hlsnative has detected features it does not support, '
|
||||||
|
'extraction will be delegated to ffmpeg')
|
||||||
|
fd = FFmpegFD(self.ydl, self.params)
|
||||||
|
for ph in self._progress_hooks:
|
||||||
|
fd.add_progress_hook(ph)
|
||||||
|
return fd.real_download(filename, info_dict)
|
||||||
|
|
||||||
|
fragment_urls = []
|
||||||
for line in s.splitlines():
|
for line in s.splitlines():
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
if line and not line.startswith('#'):
|
if line and not line.startswith('#'):
|
||||||
segment_url = (
|
segment_url = (
|
||||||
line
|
line
|
||||||
if re.match(r'^https?://', line)
|
if re.match(r'^https?://', line)
|
||||||
else compat_urlparse.urljoin(url, line))
|
else compat_urlparse.urljoin(man_url, line))
|
||||||
segment_urls.append(segment_url)
|
fragment_urls.append(segment_url)
|
||||||
|
# We only download the first fragment during the test
|
||||||
is_test = self.params.get('test', False)
|
if self.params.get('test', False):
|
||||||
remaining_bytes = self._TEST_FILE_SIZE if is_test else None
|
|
||||||
byte_counter = 0
|
|
||||||
with open(tmpfilename, 'wb') as outf:
|
|
||||||
for i, segurl in enumerate(segment_urls):
|
|
||||||
self.to_screen(
|
|
||||||
'[hlsnative] %s: Downloading segment %d / %d' %
|
|
||||||
(info_dict['id'], i + 1, len(segment_urls)))
|
|
||||||
seg_req = compat_urllib_request.Request(segurl)
|
|
||||||
if remaining_bytes is not None:
|
|
||||||
seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
|
|
||||||
|
|
||||||
segment = self.ydl.urlopen(seg_req).read()
|
|
||||||
if remaining_bytes is not None:
|
|
||||||
segment = segment[:remaining_bytes]
|
|
||||||
remaining_bytes -= len(segment)
|
|
||||||
outf.write(segment)
|
|
||||||
byte_counter += len(segment)
|
|
||||||
if remaining_bytes is not None and remaining_bytes <= 0:
|
|
||||||
break
|
break
|
||||||
|
|
||||||
self._hook_progress({
|
ctx = {
|
||||||
'downloaded_bytes': byte_counter,
|
|
||||||
'total_bytes': byte_counter,
|
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
'status': 'finished',
|
'total_frags': len(fragment_urls),
|
||||||
})
|
}
|
||||||
self.try_rename(tmpfilename, filename)
|
|
||||||
|
self._prepare_and_start_frag_download(ctx)
|
||||||
|
|
||||||
|
frags_filenames = []
|
||||||
|
for i, frag_url in enumerate(fragment_urls):
|
||||||
|
frag_filename = '%s-Frag%d' % (ctx['tmpfilename'], i)
|
||||||
|
success = ctx['dl'].download(frag_filename, {'url': frag_url})
|
||||||
|
if not success:
|
||||||
|
return False
|
||||||
|
down, frag_sanitized = sanitize_open(frag_filename, 'rb')
|
||||||
|
ctx['dest_stream'].write(down.read())
|
||||||
|
down.close()
|
||||||
|
frags_filenames.append(frag_sanitized)
|
||||||
|
|
||||||
|
self._finish_frag_download(ctx)
|
||||||
|
|
||||||
|
for frag_file in frags_filenames:
|
||||||
|
os.remove(encodeFilename(frag_file))
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|||||||
@@ -4,16 +4,15 @@ import errno
|
|||||||
import os
|
import os
|
||||||
import socket
|
import socket
|
||||||
import time
|
import time
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from ..compat import (
|
from ..compat import compat_urllib_error
|
||||||
compat_urllib_request,
|
|
||||||
compat_urllib_error,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ContentTooShortError,
|
ContentTooShortError,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
sanitize_open,
|
sanitize_open,
|
||||||
|
sanitized_Request,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -28,8 +27,8 @@ class HttpFD(FileDownloader):
|
|||||||
add_headers = info_dict.get('http_headers')
|
add_headers = info_dict.get('http_headers')
|
||||||
if add_headers:
|
if add_headers:
|
||||||
headers.update(add_headers)
|
headers.update(add_headers)
|
||||||
basic_request = compat_urllib_request.Request(url, None, headers)
|
basic_request = sanitized_Request(url, None, headers)
|
||||||
request = compat_urllib_request.Request(url, None, headers)
|
request = sanitized_Request(url, None, headers)
|
||||||
|
|
||||||
is_test = self.params.get('test', False)
|
is_test = self.params.get('test', False)
|
||||||
|
|
||||||
@@ -57,6 +56,24 @@ class HttpFD(FileDownloader):
|
|||||||
# Establish connection
|
# Establish connection
|
||||||
try:
|
try:
|
||||||
data = self.ydl.urlopen(request)
|
data = self.ydl.urlopen(request)
|
||||||
|
# When trying to resume, Content-Range HTTP header of response has to be checked
|
||||||
|
# to match the value of requested Range HTTP header. This is due to a webservers
|
||||||
|
# that don't support resuming and serve a whole file with no Content-Range
|
||||||
|
# set in response despite of requested Range (see
|
||||||
|
# https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799)
|
||||||
|
if resume_len > 0:
|
||||||
|
content_range = data.headers.get('Content-Range')
|
||||||
|
if content_range:
|
||||||
|
content_range_m = re.search(r'bytes (\d+)-', content_range)
|
||||||
|
# Content-Range is present and matches requested Range, resume is possible
|
||||||
|
if content_range_m and resume_len == int(content_range_m.group(1)):
|
||||||
|
break
|
||||||
|
# Content-Range is either not present or invalid. Assuming remote webserver is
|
||||||
|
# trying to send the whole file, resume is not possible, so wiping the local file
|
||||||
|
# and performing entire redownload
|
||||||
|
self.report_unable_to_resume()
|
||||||
|
resume_len = 0
|
||||||
|
open_mode = 'wb'
|
||||||
break
|
break
|
||||||
except (compat_urllib_error.HTTPError, ) as err:
|
except (compat_urllib_error.HTTPError, ) as err:
|
||||||
if (err.code < 500 or err.code >= 600) and err.code != 416:
|
if (err.code < 500 or err.code >= 600) and err.code != 416:
|
||||||
@@ -123,8 +140,8 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
if data_len is not None:
|
if data_len is not None:
|
||||||
data_len = int(data_len) + resume_len
|
data_len = int(data_len) + resume_len
|
||||||
min_data_len = self.params.get("min_filesize", None)
|
min_data_len = self.params.get('min_filesize')
|
||||||
max_data_len = self.params.get("max_filesize", None)
|
max_data_len = self.params.get('max_filesize')
|
||||||
if min_data_len is not None and data_len < min_data_len:
|
if min_data_len is not None and data_len < min_data_len:
|
||||||
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -94,18 +94,18 @@ class RtmpFD(FileDownloader):
|
|||||||
return proc.returncode
|
return proc.returncode
|
||||||
|
|
||||||
url = info_dict['url']
|
url = info_dict['url']
|
||||||
player_url = info_dict.get('player_url', None)
|
player_url = info_dict.get('player_url')
|
||||||
page_url = info_dict.get('page_url', None)
|
page_url = info_dict.get('page_url')
|
||||||
app = info_dict.get('app', None)
|
app = info_dict.get('app')
|
||||||
play_path = info_dict.get('play_path', None)
|
play_path = info_dict.get('play_path')
|
||||||
tc_url = info_dict.get('tc_url', None)
|
tc_url = info_dict.get('tc_url')
|
||||||
flash_version = info_dict.get('flash_version', None)
|
flash_version = info_dict.get('flash_version')
|
||||||
live = info_dict.get('rtmp_live', False)
|
live = info_dict.get('rtmp_live', False)
|
||||||
conn = info_dict.get('rtmp_conn', None)
|
conn = info_dict.get('rtmp_conn')
|
||||||
protocol = info_dict.get('rtmp_protocol', None)
|
protocol = info_dict.get('rtmp_protocol')
|
||||||
real_time = info_dict.get('rtmp_real_time', False)
|
real_time = info_dict.get('rtmp_real_time', False)
|
||||||
no_resume = info_dict.get('no_resume', False)
|
no_resume = info_dict.get('no_resume', False)
|
||||||
continue_dl = info_dict.get('continuedl', True)
|
continue_dl = self.params.get('continuedl', True)
|
||||||
|
|
||||||
self.report_destination(filename)
|
self.report_destination(filename)
|
||||||
tmpfilename = self.temp_name(filename)
|
tmpfilename = self.temp_name(filename)
|
||||||
@@ -117,7 +117,7 @@ class RtmpFD(FileDownloader):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
# Download using rtmpdump. rtmpdump returns exit code 2 when
|
# Download using rtmpdump. rtmpdump returns exit code 2 when
|
||||||
# the connection was interrumpted and resuming appears to be
|
# the connection was interrupted and resuming appears to be
|
||||||
# possible. This is part of rtmpdump's normal usage, AFAIK.
|
# possible. This is part of rtmpdump's normal usage, AFAIK.
|
||||||
basic_args = [
|
basic_args = [
|
||||||
'rtmpdump', '--verbose', '-r', url,
|
'rtmpdump', '--verbose', '-r', url,
|
||||||
|
|||||||
@@ -27,6 +27,8 @@ class RtspFD(FileDownloader):
|
|||||||
self.report_error('MMS or RTSP download detected but neither "mplayer" nor "mpv" could be run. Please install any.')
|
self.report_error('MMS or RTSP download detected but neither "mplayer" nor "mpv" could be run. Please install any.')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
self._debug_cmd(args)
|
||||||
|
|
||||||
retval = subprocess.call(args)
|
retval = subprocess.call(args)
|
||||||
if retval == 0:
|
if retval == 0:
|
||||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
|
|||||||
@@ -1,771 +1,33 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .abc import ABCIE
|
try:
|
||||||
from .abc7news import Abc7NewsIE
|
from .lazy_extractors import *
|
||||||
from .academicearth import AcademicEarthCourseIE
|
from .lazy_extractors import _ALL_CLASSES
|
||||||
from .addanime import AddAnimeIE
|
_LAZY_LOADER = True
|
||||||
from .adobetv import (
|
except ImportError:
|
||||||
AdobeTVIE,
|
_LAZY_LOADER = False
|
||||||
AdobeTVVideoIE,
|
from .extractors import *
|
||||||
)
|
|
||||||
from .adultswim import AdultSwimIE
|
|
||||||
from .aftenposten import AftenpostenIE
|
|
||||||
from .aftonbladet import AftonbladetIE
|
|
||||||
from .airmozilla import AirMozillaIE
|
|
||||||
from .aljazeera import AlJazeeraIE
|
|
||||||
from .alphaporno import AlphaPornoIE
|
|
||||||
from .anitube import AnitubeIE
|
|
||||||
from .anysex import AnySexIE
|
|
||||||
from .aol import AolIE
|
|
||||||
from .allocine import AllocineIE
|
|
||||||
from .aparat import AparatIE
|
|
||||||
from .appletrailers import AppleTrailersIE
|
|
||||||
from .archiveorg import ArchiveOrgIE
|
|
||||||
from .ard import ARDIE, ARDMediathekIE
|
|
||||||
from .arte import (
|
|
||||||
ArteTvIE,
|
|
||||||
ArteTVPlus7IE,
|
|
||||||
ArteTVCreativeIE,
|
|
||||||
ArteTVConcertIE,
|
|
||||||
ArteTVFutureIE,
|
|
||||||
ArteTVDDCIE,
|
|
||||||
ArteTVEmbedIE,
|
|
||||||
)
|
|
||||||
from .atresplayer import AtresPlayerIE
|
|
||||||
from .atttechchannel import ATTTechChannelIE
|
|
||||||
from .audiomack import AudiomackIE, AudiomackAlbumIE
|
|
||||||
from .azubu import AzubuIE
|
|
||||||
from .baidu import BaiduVideoIE
|
|
||||||
from .bambuser import BambuserIE, BambuserChannelIE
|
|
||||||
from .bandcamp import BandcampIE, BandcampAlbumIE
|
|
||||||
from .bbccouk import BBCCoUkIE
|
|
||||||
from .beeg import BeegIE
|
|
||||||
from .behindkink import BehindKinkIE
|
|
||||||
from .beatportpro import BeatportProIE
|
|
||||||
from .bet import BetIE
|
|
||||||
from .bild import BildIE
|
|
||||||
from .bilibili import BiliBiliIE
|
|
||||||
from .blinkx import BlinkxIE
|
|
||||||
from .bliptv import BlipTVIE, BlipTVUserIE
|
|
||||||
from .bloomberg import BloombergIE
|
|
||||||
from .bpb import BpbIE
|
|
||||||
from .br import BRIE
|
|
||||||
from .breakcom import BreakIE
|
|
||||||
from .brightcove import BrightcoveIE
|
|
||||||
from .buzzfeed import BuzzFeedIE
|
|
||||||
from .byutv import BYUtvIE
|
|
||||||
from .c56 import C56IE
|
|
||||||
from .camdemy import (
|
|
||||||
CamdemyIE,
|
|
||||||
CamdemyFolderIE
|
|
||||||
)
|
|
||||||
from .canal13cl import Canal13clIE
|
|
||||||
from .canalplus import CanalplusIE
|
|
||||||
from .canalc2 import Canalc2IE
|
|
||||||
from .cbs import CBSIE
|
|
||||||
from .cbsnews import CBSNewsIE
|
|
||||||
from .cbssports import CBSSportsIE
|
|
||||||
from .ccc import CCCIE
|
|
||||||
from .ceskatelevize import CeskaTelevizeIE
|
|
||||||
from .channel9 import Channel9IE
|
|
||||||
from .chilloutzone import ChilloutzoneIE
|
|
||||||
from .chirbit import (
|
|
||||||
ChirbitIE,
|
|
||||||
ChirbitProfileIE,
|
|
||||||
)
|
|
||||||
from .cinchcast import CinchcastIE
|
|
||||||
from .cinemassacre import CinemassacreIE
|
|
||||||
from .clipfish import ClipfishIE
|
|
||||||
from .cliphunter import CliphunterIE
|
|
||||||
from .clipsyndicate import ClipsyndicateIE
|
|
||||||
from .cloudy import CloudyIE
|
|
||||||
from .clubic import ClubicIE
|
|
||||||
from .cmt import CMTIE
|
|
||||||
from .cnet import CNETIE
|
|
||||||
from .cnn import (
|
|
||||||
CNNIE,
|
|
||||||
CNNBlogsIE,
|
|
||||||
CNNArticleIE,
|
|
||||||
)
|
|
||||||
from .collegehumor import CollegeHumorIE
|
|
||||||
from .collegerama import CollegeRamaIE
|
|
||||||
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
|
|
||||||
from .comcarcoff import ComCarCoffIE
|
|
||||||
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
|
|
||||||
from .condenast import CondeNastIE
|
|
||||||
from .cracked import CrackedIE
|
|
||||||
from .criterion import CriterionIE
|
|
||||||
from .crooksandliars import CrooksAndLiarsIE
|
|
||||||
from .crunchyroll import (
|
|
||||||
CrunchyrollIE,
|
|
||||||
CrunchyrollShowPlaylistIE
|
|
||||||
)
|
|
||||||
from .cspan import CSpanIE
|
|
||||||
from .ctsnews import CtsNewsIE
|
|
||||||
from .dailymotion import (
|
|
||||||
DailymotionIE,
|
|
||||||
DailymotionPlaylistIE,
|
|
||||||
DailymotionUserIE,
|
|
||||||
DailymotionCloudIE,
|
|
||||||
)
|
|
||||||
from .daum import DaumIE
|
|
||||||
from .dbtv import DBTVIE
|
|
||||||
from .dctp import DctpTvIE
|
|
||||||
from .deezer import DeezerPlaylistIE
|
|
||||||
from .dfb import DFBIE
|
|
||||||
from .dhm import DHMIE
|
|
||||||
from .dotsub import DotsubIE
|
|
||||||
from .douyutv import DouyuTVIE
|
|
||||||
from .dramafever import (
|
|
||||||
DramaFeverIE,
|
|
||||||
DramaFeverSeriesIE,
|
|
||||||
)
|
|
||||||
from .dreisat import DreiSatIE
|
|
||||||
from .drbonanza import DRBonanzaIE
|
|
||||||
from .drtuber import DrTuberIE
|
|
||||||
from .drtv import DRTVIE
|
|
||||||
from .dvtv import DVTVIE
|
|
||||||
from .dump import DumpIE
|
|
||||||
from .dumpert import DumpertIE
|
|
||||||
from .defense import DefenseGouvFrIE
|
|
||||||
from .discovery import DiscoveryIE
|
|
||||||
from .divxstage import DivxStageIE
|
|
||||||
from .dropbox import DropboxIE
|
|
||||||
from .eagleplatform import EaglePlatformIE
|
|
||||||
from .ebaumsworld import EbaumsWorldIE
|
|
||||||
from .echomsk import EchoMskIE
|
|
||||||
from .ehow import EHowIE
|
|
||||||
from .eighttracks import EightTracksIE
|
|
||||||
from .einthusan import EinthusanIE
|
|
||||||
from .eitb import EitbIE
|
|
||||||
from .ellentv import (
|
|
||||||
EllenTVIE,
|
|
||||||
EllenTVClipsIE,
|
|
||||||
)
|
|
||||||
from .elpais import ElPaisIE
|
|
||||||
from .embedly import EmbedlyIE
|
|
||||||
from .empflix import EMPFlixIE
|
|
||||||
from .engadget import EngadgetIE
|
|
||||||
from .eporner import EpornerIE
|
|
||||||
from .eroprofile import EroProfileIE
|
|
||||||
from .escapist import EscapistIE
|
|
||||||
from .espn import ESPNIE
|
|
||||||
from .everyonesmixtape import EveryonesMixtapeIE
|
|
||||||
from .exfm import ExfmIE
|
|
||||||
from .expotv import ExpoTVIE
|
|
||||||
from .extremetube import ExtremeTubeIE
|
|
||||||
from .facebook import FacebookIE
|
|
||||||
from .faz import FazIE
|
|
||||||
from .fc2 import FC2IE
|
|
||||||
from .firstpost import FirstpostIE
|
|
||||||
from .firsttv import FirstTVIE
|
|
||||||
from .fivemin import FiveMinIE
|
|
||||||
from .fivetv import FiveTVIE
|
|
||||||
from .fktv import (
|
|
||||||
FKTVIE,
|
|
||||||
FKTVPosteckeIE,
|
|
||||||
)
|
|
||||||
from .flickr import FlickrIE
|
|
||||||
from .folketinget import FolketingetIE
|
|
||||||
from .footyroom import FootyRoomIE
|
|
||||||
from .fourtube import FourTubeIE
|
|
||||||
from .foxgay import FoxgayIE
|
|
||||||
from .foxnews import FoxNewsIE
|
|
||||||
from .foxsports import FoxSportsIE
|
|
||||||
from .franceculture import FranceCultureIE
|
|
||||||
from .franceinter import FranceInterIE
|
|
||||||
from .francetv import (
|
|
||||||
PluzzIE,
|
|
||||||
FranceTvInfoIE,
|
|
||||||
FranceTVIE,
|
|
||||||
GenerationQuoiIE,
|
|
||||||
CultureboxIE,
|
|
||||||
)
|
|
||||||
from .freesound import FreesoundIE
|
|
||||||
from .freespeech import FreespeechIE
|
|
||||||
from .freevideo import FreeVideoIE
|
|
||||||
from .funnyordie import FunnyOrDieIE
|
|
||||||
from .gamekings import GamekingsIE
|
|
||||||
from .gameone import (
|
|
||||||
GameOneIE,
|
|
||||||
GameOnePlaylistIE,
|
|
||||||
)
|
|
||||||
from .gamersyde import GamersydeIE
|
|
||||||
from .gamespot import GameSpotIE
|
|
||||||
from .gamestar import GameStarIE
|
|
||||||
from .gametrailers import GametrailersIE
|
|
||||||
from .gazeta import GazetaIE
|
|
||||||
from .gdcvault import GDCVaultIE
|
|
||||||
from .generic import GenericIE
|
|
||||||
from .gfycat import GfycatIE
|
|
||||||
from .giantbomb import GiantBombIE
|
|
||||||
from .giga import GigaIE
|
|
||||||
from .glide import GlideIE
|
|
||||||
from .globo import GloboIE
|
|
||||||
from .godtube import GodTubeIE
|
|
||||||
from .goldenmoustache import GoldenMoustacheIE
|
|
||||||
from .golem import GolemIE
|
|
||||||
from .googleplus import GooglePlusIE
|
|
||||||
from .googlesearch import GoogleSearchIE
|
|
||||||
from .gorillavid import GorillaVidIE
|
|
||||||
from .goshgay import GoshgayIE
|
|
||||||
from .groupon import GrouponIE
|
|
||||||
from .hark import HarkIE
|
|
||||||
from .hearthisat import HearThisAtIE
|
|
||||||
from .heise import HeiseIE
|
|
||||||
from .hellporno import HellPornoIE
|
|
||||||
from .helsinki import HelsinkiIE
|
|
||||||
from .hentaistigma import HentaiStigmaIE
|
|
||||||
from .historicfilms import HistoricFilmsIE
|
|
||||||
from .history import HistoryIE
|
|
||||||
from .hitbox import HitboxIE, HitboxLiveIE
|
|
||||||
from .hornbunny import HornBunnyIE
|
|
||||||
from .hostingbulk import HostingBulkIE
|
|
||||||
from .hotnewhiphop import HotNewHipHopIE
|
|
||||||
from .howcast import HowcastIE
|
|
||||||
from .howstuffworks import HowStuffWorksIE
|
|
||||||
from .huffpost import HuffPostIE
|
|
||||||
from .hypem import HypemIE
|
|
||||||
from .iconosquare import IconosquareIE
|
|
||||||
from .ign import IGNIE, OneUPIE
|
|
||||||
from .imdb import (
|
|
||||||
ImdbIE,
|
|
||||||
ImdbListIE
|
|
||||||
)
|
|
||||||
from .imgur import ImgurIE
|
|
||||||
from .ina import InaIE
|
|
||||||
from .infoq import InfoQIE
|
|
||||||
from .instagram import InstagramIE, InstagramUserIE
|
|
||||||
from .internetvideoarchive import InternetVideoArchiveIE
|
|
||||||
from .iprima import IPrimaIE
|
|
||||||
from .iqiyi import IqiyiIE
|
|
||||||
from .ivi import (
|
|
||||||
IviIE,
|
|
||||||
IviCompilationIE
|
|
||||||
)
|
|
||||||
from .izlesene import IzleseneIE
|
|
||||||
from .jadorecettepub import JadoreCettePubIE
|
|
||||||
from .jeuxvideo import JeuxVideoIE
|
|
||||||
from .jove import JoveIE
|
|
||||||
from .jukebox import JukeboxIE
|
|
||||||
from .jpopsukitv import JpopsukiIE
|
|
||||||
from .kaltura import KalturaIE
|
|
||||||
from .kanalplay import KanalPlayIE
|
|
||||||
from .kankan import KankanIE
|
|
||||||
from .karaoketv import KaraoketvIE
|
|
||||||
from .karrierevideos import KarriereVideosIE
|
|
||||||
from .keezmovies import KeezMoviesIE
|
|
||||||
from .khanacademy import KhanAcademyIE
|
|
||||||
from .kickstarter import KickStarterIE
|
|
||||||
from .keek import KeekIE
|
|
||||||
from .kontrtube import KontrTubeIE
|
|
||||||
from .krasview import KrasViewIE
|
|
||||||
from .ku6 import Ku6IE
|
|
||||||
from .la7 import LA7IE
|
|
||||||
from .laola1tv import Laola1TvIE
|
|
||||||
from .letv import (
|
|
||||||
LetvIE,
|
|
||||||
LetvTvIE,
|
|
||||||
LetvPlaylistIE
|
|
||||||
)
|
|
||||||
from .libsyn import LibsynIE
|
|
||||||
from .lifenews import (
|
|
||||||
LifeNewsIE,
|
|
||||||
LifeEmbedIE,
|
|
||||||
)
|
|
||||||
from .liveleak import LiveLeakIE
|
|
||||||
from .livestream import (
|
|
||||||
LivestreamIE,
|
|
||||||
LivestreamOriginalIE,
|
|
||||||
LivestreamShortenerIE,
|
|
||||||
)
|
|
||||||
from .lnkgo import LnkGoIE
|
|
||||||
from .lrt import LRTIE
|
|
||||||
from .lynda import (
|
|
||||||
LyndaIE,
|
|
||||||
LyndaCourseIE
|
|
||||||
)
|
|
||||||
from .m6 import M6IE
|
|
||||||
from .macgamestore import MacGameStoreIE
|
|
||||||
from .mailru import MailRuIE
|
|
||||||
from .malemotion import MalemotionIE
|
|
||||||
from .mdr import MDRIE
|
|
||||||
from .megavideoz import MegaVideozIE
|
|
||||||
from .metacafe import MetacafeIE
|
|
||||||
from .metacritic import MetacriticIE
|
|
||||||
from .mgoon import MgoonIE
|
|
||||||
from .minhateca import MinhatecaIE
|
|
||||||
from .ministrygrid import MinistryGridIE
|
|
||||||
from .miomio import MioMioIE
|
|
||||||
from .mit import TechTVMITIE, MITIE, OCWMITIE
|
|
||||||
from .mitele import MiTeleIE
|
|
||||||
from .mixcloud import MixcloudIE
|
|
||||||
from .mlb import MLBIE
|
|
||||||
from .mpora import MporaIE
|
|
||||||
from .moevideo import MoeVideoIE
|
|
||||||
from .mofosex import MofosexIE
|
|
||||||
from .mojvideo import MojvideoIE
|
|
||||||
from .moniker import MonikerIE
|
|
||||||
from .mooshare import MooshareIE
|
|
||||||
from .morningstar import MorningstarIE
|
|
||||||
from .motherless import MotherlessIE
|
|
||||||
from .motorsport import MotorsportIE
|
|
||||||
from .movieclips import MovieClipsIE
|
|
||||||
from .moviezine import MoviezineIE
|
|
||||||
from .movshare import MovShareIE
|
|
||||||
from .mtv import (
|
|
||||||
MTVIE,
|
|
||||||
MTVServicesEmbeddedIE,
|
|
||||||
MTVIggyIE,
|
|
||||||
)
|
|
||||||
from .muenchentv import MuenchenTVIE
|
|
||||||
from .musicplayon import MusicPlayOnIE
|
|
||||||
from .musicvault import MusicVaultIE
|
|
||||||
from .muzu import MuzuTVIE
|
|
||||||
from .myspace import MySpaceIE, MySpaceAlbumIE
|
|
||||||
from .myspass import MySpassIE
|
|
||||||
from .myvideo import MyVideoIE
|
|
||||||
from .myvidster import MyVidsterIE
|
|
||||||
from .nationalgeographic import NationalGeographicIE
|
|
||||||
from .naver import NaverIE
|
|
||||||
from .nba import NBAIE
|
|
||||||
from .nbc import (
|
|
||||||
NBCIE,
|
|
||||||
NBCNewsIE,
|
|
||||||
NBCSportsIE,
|
|
||||||
NBCSportsVPlayerIE,
|
|
||||||
)
|
|
||||||
from .ndr import (
|
|
||||||
NDRIE,
|
|
||||||
NJoyIE,
|
|
||||||
)
|
|
||||||
from .ndtv import NDTVIE
|
|
||||||
from .netzkino import NetzkinoIE
|
|
||||||
from .nerdcubed import NerdCubedFeedIE
|
|
||||||
from .nerdist import NerdistIE
|
|
||||||
from .newgrounds import NewgroundsIE
|
|
||||||
from .newstube import NewstubeIE
|
|
||||||
from .nextmedia import (
|
|
||||||
NextMediaIE,
|
|
||||||
NextMediaActionNewsIE,
|
|
||||||
AppleDailyIE,
|
|
||||||
)
|
|
||||||
from .nfb import NFBIE
|
|
||||||
from .nfl import NFLIE
|
|
||||||
from .nhl import (
|
|
||||||
NHLIE,
|
|
||||||
NHLNewsIE,
|
|
||||||
NHLVideocenterIE,
|
|
||||||
)
|
|
||||||
from .niconico import NiconicoIE, NiconicoPlaylistIE
|
|
||||||
from .ninegag import NineGagIE
|
|
||||||
from .noco import NocoIE
|
|
||||||
from .normalboots import NormalbootsIE
|
|
||||||
from .nosvideo import NosVideoIE
|
|
||||||
from .nova import NovaIE
|
|
||||||
from .novamov import NovaMovIE
|
|
||||||
from .nowness import NownessIE
|
|
||||||
from .nowtv import NowTVIE
|
|
||||||
from .nowvideo import NowVideoIE
|
|
||||||
from .npo import (
|
|
||||||
NPOIE,
|
|
||||||
NPOLiveIE,
|
|
||||||
NPORadioIE,
|
|
||||||
NPORadioFragmentIE,
|
|
||||||
TegenlichtVproIE,
|
|
||||||
)
|
|
||||||
from .nrk import (
|
|
||||||
NRKIE,
|
|
||||||
NRKPlaylistIE,
|
|
||||||
NRKTVIE,
|
|
||||||
)
|
|
||||||
from .ntvde import NTVDeIE
|
|
||||||
from .ntvru import NTVRuIE
|
|
||||||
from .nytimes import (
|
|
||||||
NYTimesIE,
|
|
||||||
NYTimesArticleIE,
|
|
||||||
)
|
|
||||||
from .nuvid import NuvidIE
|
|
||||||
from .odnoklassniki import OdnoklassnikiIE
|
|
||||||
from .oktoberfesttv import OktoberfestTVIE
|
|
||||||
from .onionstudios import OnionStudiosIE
|
|
||||||
from .ooyala import (
|
|
||||||
OoyalaIE,
|
|
||||||
OoyalaExternalIE,
|
|
||||||
)
|
|
||||||
from .openfilm import OpenFilmIE
|
|
||||||
from .orf import (
|
|
||||||
ORFTVthekIE,
|
|
||||||
ORFOE1IE,
|
|
||||||
ORFFM4IE,
|
|
||||||
ORFIPTVIE,
|
|
||||||
)
|
|
||||||
from .parliamentliveuk import ParliamentLiveUKIE
|
|
||||||
from .patreon import PatreonIE
|
|
||||||
from .pbs import PBSIE
|
|
||||||
from .philharmoniedeparis import PhilharmonieDeParisIE
|
|
||||||
from .phoenix import PhoenixIE
|
|
||||||
from .photobucket import PhotobucketIE
|
|
||||||
from .pinkbike import PinkbikeIE
|
|
||||||
from .planetaplay import PlanetaPlayIE
|
|
||||||
from .pladform import PladformIE
|
|
||||||
from .played import PlayedIE
|
|
||||||
from .playfm import PlayFMIE
|
|
||||||
from .playvid import PlayvidIE
|
|
||||||
from .playwire import PlaywireIE
|
|
||||||
from .podomatic import PodomaticIE
|
|
||||||
from .porn91 import Porn91IE
|
|
||||||
from .pornhd import PornHdIE
|
|
||||||
from .pornhub import (
|
|
||||||
PornHubIE,
|
|
||||||
PornHubPlaylistIE,
|
|
||||||
)
|
|
||||||
from .pornotube import PornotubeIE
|
|
||||||
from .pornovoisines import PornoVoisinesIE
|
|
||||||
from .pornoxo import PornoXOIE
|
|
||||||
from .primesharetv import PrimeShareTVIE
|
|
||||||
from .promptfile import PromptFileIE
|
|
||||||
from .prosiebensat1 import ProSiebenSat1IE
|
|
||||||
from .puls4 import Puls4IE
|
|
||||||
from .pyvideo import PyvideoIE
|
|
||||||
from .qqmusic import (
|
|
||||||
QQMusicIE,
|
|
||||||
QQMusicSingerIE,
|
|
||||||
QQMusicAlbumIE,
|
|
||||||
QQMusicToplistIE,
|
|
||||||
)
|
|
||||||
from .quickvid import QuickVidIE
|
|
||||||
from .r7 import R7IE
|
|
||||||
from .radiode import RadioDeIE
|
|
||||||
from .radiojavan import RadioJavanIE
|
|
||||||
from .radiobremen import RadioBremenIE
|
|
||||||
from .radiofrance import RadioFranceIE
|
|
||||||
from .rai import RaiIE
|
|
||||||
from .rbmaradio import RBMARadioIE
|
|
||||||
from .redtube import RedTubeIE
|
|
||||||
from .restudy import RestudyIE
|
|
||||||
from .reverbnation import ReverbNationIE
|
|
||||||
from .ringtv import RingTVIE
|
|
||||||
from .ro220 import Ro220IE
|
|
||||||
from .rottentomatoes import RottenTomatoesIE
|
|
||||||
from .roxwel import RoxwelIE
|
|
||||||
from .rtbf import RTBFIE
|
|
||||||
from .rte import RteIE
|
|
||||||
from .rtlnl import RtlNlIE
|
|
||||||
from .rtl2 import RTL2IE
|
|
||||||
from .rtp import RTPIE
|
|
||||||
from .rts import RTSIE
|
|
||||||
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
|
|
||||||
from .ruhd import RUHDIE
|
|
||||||
from .rutube import (
|
|
||||||
RutubeIE,
|
|
||||||
RutubeChannelIE,
|
|
||||||
RutubeEmbedIE,
|
|
||||||
RutubeMovieIE,
|
|
||||||
RutubePersonIE,
|
|
||||||
)
|
|
||||||
from .rutv import RUTVIE
|
|
||||||
from .ruutu import RuutuIE
|
|
||||||
from .sandia import SandiaIE
|
|
||||||
from .safari import (
|
|
||||||
SafariIE,
|
|
||||||
SafariCourseIE,
|
|
||||||
)
|
|
||||||
from .sapo import SapoIE
|
|
||||||
from .savefrom import SaveFromIE
|
|
||||||
from .sbs import SBSIE
|
|
||||||
from .scivee import SciVeeIE
|
|
||||||
from .screencast import ScreencastIE
|
|
||||||
from .screencastomatic import ScreencastOMaticIE
|
|
||||||
from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE
|
|
||||||
from .senateisvp import SenateISVPIE
|
|
||||||
from .servingsys import ServingSysIE
|
|
||||||
from .sexu import SexuIE
|
|
||||||
from .sexykarma import SexyKarmaIE
|
|
||||||
from .shared import SharedIE
|
|
||||||
from .sharesix import ShareSixIE
|
|
||||||
from .sina import SinaIE
|
|
||||||
from .slideshare import SlideshareIE
|
|
||||||
from .slutload import SlutloadIE
|
|
||||||
from .smotri import (
|
|
||||||
SmotriIE,
|
|
||||||
SmotriCommunityIE,
|
|
||||||
SmotriUserIE,
|
|
||||||
SmotriBroadcastIE,
|
|
||||||
)
|
|
||||||
from .snotr import SnotrIE
|
|
||||||
from .sohu import SohuIE
|
|
||||||
from .soompi import (
|
|
||||||
SoompiIE,
|
|
||||||
SoompiShowIE,
|
|
||||||
)
|
|
||||||
from .soundcloud import (
|
|
||||||
SoundcloudIE,
|
|
||||||
SoundcloudSetIE,
|
|
||||||
SoundcloudUserIE,
|
|
||||||
SoundcloudPlaylistIE
|
|
||||||
)
|
|
||||||
from .soundgasm import (
|
|
||||||
SoundgasmIE,
|
|
||||||
SoundgasmProfileIE
|
|
||||||
)
|
|
||||||
from .southpark import (
|
|
||||||
SouthParkIE,
|
|
||||||
SouthParkDeIE,
|
|
||||||
SouthParkDkIE,
|
|
||||||
SouthParkEsIE,
|
|
||||||
SouthParkNlIE
|
|
||||||
)
|
|
||||||
from .space import SpaceIE
|
|
||||||
from .spankbang import SpankBangIE
|
|
||||||
from .spankwire import SpankwireIE
|
|
||||||
from .spiegel import SpiegelIE, SpiegelArticleIE
|
|
||||||
from .spiegeltv import SpiegeltvIE
|
|
||||||
from .spike import SpikeIE
|
|
||||||
from .sport5 import Sport5IE
|
|
||||||
from .sportbox import (
|
|
||||||
SportBoxIE,
|
|
||||||
SportBoxEmbedIE,
|
|
||||||
)
|
|
||||||
from .sportdeutschland import SportDeutschlandIE
|
|
||||||
from .srf import SrfIE
|
|
||||||
from .srmediathek import SRMediathekIE
|
|
||||||
from .ssa import SSAIE
|
|
||||||
from .stanfordoc import StanfordOpenClassroomIE
|
|
||||||
from .steam import SteamIE
|
|
||||||
from .streamcloud import StreamcloudIE
|
|
||||||
from .streamcz import StreamCZIE
|
|
||||||
from .streetvoice import StreetVoiceIE
|
|
||||||
from .sunporno import SunPornoIE
|
|
||||||
from .svt import (
|
|
||||||
SVTIE,
|
|
||||||
SVTPlayIE,
|
|
||||||
)
|
|
||||||
from .swrmediathek import SWRMediathekIE
|
|
||||||
from .syfy import SyfyIE
|
|
||||||
from .sztvhu import SztvHuIE
|
|
||||||
from .tagesschau import TagesschauIE
|
|
||||||
from .tapely import TapelyIE
|
|
||||||
from .tass import TassIE
|
|
||||||
from .teachertube import (
|
|
||||||
TeacherTubeIE,
|
|
||||||
TeacherTubeUserIE,
|
|
||||||
)
|
|
||||||
from .teachingchannel import TeachingChannelIE
|
|
||||||
from .teamcoco import TeamcocoIE
|
|
||||||
from .techtalks import TechTalksIE
|
|
||||||
from .ted import TEDIE
|
|
||||||
from .telebruxelles import TeleBruxellesIE
|
|
||||||
from .telecinco import TelecincoIE
|
|
||||||
from .telemb import TeleMBIE
|
|
||||||
from .teletask import TeleTaskIE
|
|
||||||
from .tenplay import TenPlayIE
|
|
||||||
from .testurl import TestURLIE
|
|
||||||
from .testtube import TestTubeIE
|
|
||||||
from .tf1 import TF1IE
|
|
||||||
from .theonion import TheOnionIE
|
|
||||||
from .theplatform import ThePlatformIE
|
|
||||||
from .thesixtyone import TheSixtyOneIE
|
|
||||||
from .thisav import ThisAVIE
|
|
||||||
from .tinypic import TinyPicIE
|
|
||||||
from .tlc import TlcIE, TlcDeIE
|
|
||||||
from .tmz import (
|
|
||||||
TMZIE,
|
|
||||||
TMZArticleIE,
|
|
||||||
)
|
|
||||||
from .tnaflix import TNAFlixIE
|
|
||||||
from .thvideo import (
|
|
||||||
THVideoIE,
|
|
||||||
THVideoPlaylistIE
|
|
||||||
)
|
|
||||||
from .toutv import TouTvIE
|
|
||||||
from .toypics import ToypicsUserIE, ToypicsIE
|
|
||||||
from .traileraddict import TrailerAddictIE
|
|
||||||
from .trilulilu import TriluliluIE
|
|
||||||
from .trutube import TruTubeIE
|
|
||||||
from .tube8 import Tube8IE
|
|
||||||
from .tubitv import TubiTvIE
|
|
||||||
from .tudou import TudouIE
|
|
||||||
from .tumblr import TumblrIE
|
|
||||||
from .tunein import TuneInIE
|
|
||||||
from .turbo import TurboIE
|
|
||||||
from .tutv import TutvIE
|
|
||||||
from .tv2 import (
|
|
||||||
TV2IE,
|
|
||||||
TV2ArticleIE,
|
|
||||||
)
|
|
||||||
from .tv4 import TV4IE
|
|
||||||
from .tvc import (
|
|
||||||
TVCIE,
|
|
||||||
TVCArticleIE,
|
|
||||||
)
|
|
||||||
from .tvigle import TvigleIE
|
|
||||||
from .tvp import TvpIE, TvpSeriesIE
|
|
||||||
from .tvplay import TVPlayIE
|
|
||||||
from .tweakers import TweakersIE
|
|
||||||
from .twentyfourvideo import TwentyFourVideoIE
|
|
||||||
from .twentytwotracks import (
|
|
||||||
TwentyTwoTracksIE,
|
|
||||||
TwentyTwoTracksGenreIE
|
|
||||||
)
|
|
||||||
from .twitch import (
|
|
||||||
TwitchVideoIE,
|
|
||||||
TwitchChapterIE,
|
|
||||||
TwitchVodIE,
|
|
||||||
TwitchProfileIE,
|
|
||||||
TwitchPastBroadcastsIE,
|
|
||||||
TwitchBookmarksIE,
|
|
||||||
TwitchStreamIE,
|
|
||||||
)
|
|
||||||
from .ubu import UbuIE
|
|
||||||
from .udemy import (
|
|
||||||
UdemyIE,
|
|
||||||
UdemyCourseIE
|
|
||||||
)
|
|
||||||
from .udn import UDNEmbedIE
|
|
||||||
from .ultimedia import UltimediaIE
|
|
||||||
from .unistra import UnistraIE
|
|
||||||
from .urort import UrortIE
|
|
||||||
from .ustream import UstreamIE, UstreamChannelIE
|
|
||||||
from .varzesh3 import Varzesh3IE
|
|
||||||
from .vbox7 import Vbox7IE
|
|
||||||
from .veehd import VeeHDIE
|
|
||||||
from .veoh import VeohIE
|
|
||||||
from .vessel import VesselIE
|
|
||||||
from .vesti import VestiIE
|
|
||||||
from .vevo import VevoIE
|
|
||||||
from .vgtv import (
|
|
||||||
BTArticleIE,
|
|
||||||
BTVestlendingenIE,
|
|
||||||
VGTVIE,
|
|
||||||
)
|
|
||||||
from .vh1 import VH1IE
|
|
||||||
from .vice import ViceIE
|
|
||||||
from .viddler import ViddlerIE
|
|
||||||
from .videobam import VideoBamIE
|
|
||||||
from .videodetective import VideoDetectiveIE
|
|
||||||
from .videolecturesnet import VideoLecturesNetIE
|
|
||||||
from .videofyme import VideofyMeIE
|
|
||||||
from .videomega import VideoMegaIE
|
|
||||||
from .videopremium import VideoPremiumIE
|
|
||||||
from .videott import VideoTtIE
|
|
||||||
from .videoweed import VideoWeedIE
|
|
||||||
from .vidme import VidmeIE
|
|
||||||
from .vidzi import VidziIE
|
|
||||||
from .vier import VierIE, VierVideosIE
|
|
||||||
from .viewster import ViewsterIE
|
|
||||||
from .vimeo import (
|
|
||||||
VimeoIE,
|
|
||||||
VimeoAlbumIE,
|
|
||||||
VimeoChannelIE,
|
|
||||||
VimeoGroupsIE,
|
|
||||||
VimeoLikesIE,
|
|
||||||
VimeoReviewIE,
|
|
||||||
VimeoUserIE,
|
|
||||||
VimeoWatchLaterIE,
|
|
||||||
)
|
|
||||||
from .vimple import VimpleIE
|
|
||||||
from .vine import (
|
|
||||||
VineIE,
|
|
||||||
VineUserIE,
|
|
||||||
)
|
|
||||||
from .viki import (
|
|
||||||
VikiIE,
|
|
||||||
VikiChannelIE,
|
|
||||||
)
|
|
||||||
from .vk import (
|
|
||||||
VKIE,
|
|
||||||
VKUserVideosIE,
|
|
||||||
)
|
|
||||||
from .vodlocker import VodlockerIE
|
|
||||||
from .voicerepublic import VoiceRepublicIE
|
|
||||||
from .vporn import VpornIE
|
|
||||||
from .vrt import VRTIE
|
|
||||||
from .vube import VubeIE
|
|
||||||
from .vuclip import VuClipIE
|
|
||||||
from .vulture import VultureIE
|
|
||||||
from .walla import WallaIE
|
|
||||||
from .washingtonpost import WashingtonPostIE
|
|
||||||
from .wat import WatIE
|
|
||||||
from .wayofthemaster import WayOfTheMasterIE
|
|
||||||
from .wdr import (
|
|
||||||
WDRIE,
|
|
||||||
WDRMobileIE,
|
|
||||||
WDRMausIE,
|
|
||||||
)
|
|
||||||
from .webofstories import WebOfStoriesIE
|
|
||||||
from .weibo import WeiboIE
|
|
||||||
from .wimp import WimpIE
|
|
||||||
from .wistia import WistiaIE
|
|
||||||
from .worldstarhiphop import WorldStarHipHopIE
|
|
||||||
from .wrzuta import WrzutaIE
|
|
||||||
from .wsj import WSJIE
|
|
||||||
from .xbef import XBefIE
|
|
||||||
from .xboxclips import XboxClipsIE
|
|
||||||
from .xhamster import (
|
|
||||||
XHamsterIE,
|
|
||||||
XHamsterEmbedIE,
|
|
||||||
)
|
|
||||||
from .xminus import XMinusIE
|
|
||||||
from .xnxx import XNXXIE
|
|
||||||
from .xstream import XstreamIE
|
|
||||||
from .xtube import XTubeUserIE, XTubeIE
|
|
||||||
from .xuite import XuiteIE
|
|
||||||
from .xvideos import XVideosIE
|
|
||||||
from .xxxymovies import XXXYMoviesIE
|
|
||||||
from .yahoo import (
|
|
||||||
YahooIE,
|
|
||||||
YahooSearchIE,
|
|
||||||
)
|
|
||||||
from .yam import YamIE
|
|
||||||
from .yandexmusic import (
|
|
||||||
YandexMusicTrackIE,
|
|
||||||
YandexMusicAlbumIE,
|
|
||||||
YandexMusicPlaylistIE,
|
|
||||||
)
|
|
||||||
from .yesjapan import YesJapanIE
|
|
||||||
from .ynet import YnetIE
|
|
||||||
from .youjizz import YouJizzIE
|
|
||||||
from .youku import YoukuIE
|
|
||||||
from .youporn import YouPornIE
|
|
||||||
from .yourupload import YourUploadIE
|
|
||||||
from .youtube import (
|
|
||||||
YoutubeIE,
|
|
||||||
YoutubeChannelIE,
|
|
||||||
YoutubeFavouritesIE,
|
|
||||||
YoutubeHistoryIE,
|
|
||||||
YoutubePlaylistIE,
|
|
||||||
YoutubeRecommendedIE,
|
|
||||||
YoutubeSearchDateIE,
|
|
||||||
YoutubeSearchIE,
|
|
||||||
YoutubeSearchURLIE,
|
|
||||||
YoutubeShowIE,
|
|
||||||
YoutubeSubscriptionsIE,
|
|
||||||
YoutubeTruncatedIDIE,
|
|
||||||
YoutubeTruncatedURLIE,
|
|
||||||
YoutubeUserIE,
|
|
||||||
YoutubeWatchLaterIE,
|
|
||||||
)
|
|
||||||
from .zapiks import ZapiksIE
|
|
||||||
from .zdf import ZDFIE, ZDFChannelIE
|
|
||||||
from .zingmp3 import (
|
|
||||||
ZingMp3SongIE,
|
|
||||||
ZingMp3AlbumIE,
|
|
||||||
)
|
|
||||||
|
|
||||||
_ALL_CLASSES = [
|
_ALL_CLASSES = [
|
||||||
klass
|
klass
|
||||||
for name, klass in globals().items()
|
for name, klass in globals().items()
|
||||||
if name.endswith('IE') and name != 'GenericIE'
|
if name.endswith('IE') and name != 'GenericIE'
|
||||||
]
|
]
|
||||||
_ALL_CLASSES.append(GenericIE)
|
_ALL_CLASSES.append(GenericIE)
|
||||||
|
|
||||||
|
|
||||||
|
def gen_extractor_classes():
|
||||||
|
""" Return a list of supported extractors.
|
||||||
|
The order does matter; the first extractor matched is the one handling the URL.
|
||||||
|
"""
|
||||||
|
return _ALL_CLASSES
|
||||||
|
|
||||||
|
|
||||||
def gen_extractors():
|
def gen_extractors():
|
||||||
""" Return a list of an instance of every supported extractor.
|
""" Return a list of an instance of every supported extractor.
|
||||||
The order does matter; the first extractor matched is the one handling the URL.
|
The order does matter; the first extractor matched is the one handling the URL.
|
||||||
"""
|
"""
|
||||||
return [klass() for klass in _ALL_CLASSES]
|
return [klass() for klass in gen_extractor_classes()]
|
||||||
|
|
||||||
|
|
||||||
def list_extractors(age_limit):
|
def list_extractors(age_limit):
|
||||||
|
|||||||
@@ -1,16 +1,20 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
js_to_json,
|
||||||
|
int_or_none,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ABCIE(InfoExtractor):
|
class ABCIE(InfoExtractor):
|
||||||
IE_NAME = 'abc.net.au'
|
IE_NAME = 'abc.net.au'
|
||||||
_VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
|
_VALID_URL = r'https?://www\.abc\.net\.au/news/(?:[^/]+/){1,2}(?P<id>\d+)'
|
||||||
|
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
|
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
|
||||||
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
|
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -19,23 +23,67 @@ class ABCIE(InfoExtractor):
|
|||||||
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
|
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
|
||||||
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
|
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
|
||||||
},
|
},
|
||||||
}
|
'skip': 'this video has expired',
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326',
|
||||||
|
'md5': 'db2a5369238b51f9811ad815b69dc086',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'NvqvPeNZsHU',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'upload_date': '20150816',
|
||||||
|
'uploader': 'ABC News (Australia)',
|
||||||
|
'description': 'Government backbencher Warren Entsch introduces a cross-party sponsored bill to legalise same-sex marriage, saying the bill is designed to promote "an inclusive Australia, not a divided one.". Read more here: http://ab.co/1Mwc6ef',
|
||||||
|
'uploader_id': 'NewsOnABC',
|
||||||
|
'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill',
|
||||||
|
},
|
||||||
|
'add_ie': ['Youtube'],
|
||||||
|
'skip': 'Not accessible from Travis CI server',
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080',
|
||||||
|
'md5': 'b96eee7c9edf4fc5a358a0252881cc1f',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '6880080',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'NAB lifts interest rates, following Westpac and CBA',
|
||||||
|
'description': 'md5:f13d8edc81e462fce4a0437c7dc04728',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.abc.net.au/news/2015-10-19/6866214',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
urls_info_json = self._search_regex(
|
mobj = re.search(
|
||||||
r'inlineVideoData\.push\((.*?)\);', webpage, 'video urls',
|
r'inline(?P<type>Video|Audio|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
|
||||||
flags=re.DOTALL)
|
webpage)
|
||||||
urls_info = json.loads(urls_info_json.replace('\'', '"'))
|
if mobj is None:
|
||||||
|
expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None)
|
||||||
|
if expired:
|
||||||
|
raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True)
|
||||||
|
raise ExtractorError('Unable to extract video urls')
|
||||||
|
|
||||||
|
urls_info = self._parse_json(
|
||||||
|
mobj.group('json_data'), video_id, transform_source=js_to_json)
|
||||||
|
|
||||||
|
if not isinstance(urls_info, list):
|
||||||
|
urls_info = [urls_info]
|
||||||
|
|
||||||
|
if mobj.group('type') == 'YouTube':
|
||||||
|
return self.playlist_result([
|
||||||
|
self.url_result(url_info['url']) for url_info in urls_info])
|
||||||
|
|
||||||
formats = [{
|
formats = [{
|
||||||
'url': url_info['url'],
|
'url': url_info['url'],
|
||||||
'width': int(url_info['width']),
|
'vcodec': url_info.get('codec') if mobj.group('type') == 'Video' else 'none',
|
||||||
'height': int(url_info['height']),
|
'width': int_or_none(url_info.get('width')),
|
||||||
'tbr': int(url_info['bitrate']),
|
'height': int_or_none(url_info.get('height')),
|
||||||
'filesize': int(url_info['filesize']),
|
'tbr': int_or_none(url_info.get('bitrate')),
|
||||||
|
'filesize': int_or_none(url_info.get('filesize')),
|
||||||
} for url_info in urls_info]
|
} for url_info in urls_info]
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|||||||
135
youtube_dl/extractor/abcnews.py
Normal file
135
youtube_dl/extractor/abcnews.py
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import calendar
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
|
||||||
|
from .amp import AMPIE
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_urlparse
|
||||||
|
|
||||||
|
|
||||||
|
class AbcNewsVideoIE(AMPIE):
|
||||||
|
IE_NAME = 'abcnews:video'
|
||||||
|
_VALID_URL = 'http://abcnews.go.com/[^/]+/video/(?P<display_id>[0-9a-z-]+)-(?P<id>\d+)'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://abcnews.go.com/ThisWeek/video/week-exclusive-irans-foreign-minister-zarif-20411932',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '20411932',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'display_id': 'week-exclusive-irans-foreign-minister-zarif',
|
||||||
|
'title': '\'This Week\' Exclusive: Iran\'s Foreign Minister Zarif',
|
||||||
|
'description': 'George Stephanopoulos goes one-on-one with Iranian Foreign Minister Dr. Javad Zarif.',
|
||||||
|
'duration': 180,
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://abcnews.go.com/2020/video/2020-husband-stands-teacher-jail-student-affairs-26119478',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
display_id = mobj.group('display_id')
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
info_dict = self._extract_feed_info(
|
||||||
|
'http://abcnews.go.com/video/itemfeed?id=%s' % video_id)
|
||||||
|
info_dict.update({
|
||||||
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
|
})
|
||||||
|
return info_dict
|
||||||
|
|
||||||
|
|
||||||
|
class AbcNewsIE(InfoExtractor):
|
||||||
|
IE_NAME = 'abcnews'
|
||||||
|
_VALID_URL = 'https?://abcnews\.go\.com/(?:[^/]+/)+(?P<display_id>[0-9a-z-]+)/story\?id=(?P<id>\d+)'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://abcnews.go.com/Blotter/News/dramatic-video-rare-death-job-america/story?id=10498713#.UIhwosWHLjY',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '10498713',
|
||||||
|
'ext': 'flv',
|
||||||
|
'display_id': 'dramatic-video-rare-death-job-america',
|
||||||
|
'title': 'Occupational Hazards',
|
||||||
|
'description': 'Nightline investigates the dangers that lurk at various jobs.',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'upload_date': '20100428',
|
||||||
|
'timestamp': 1272412800,
|
||||||
|
},
|
||||||
|
'add_ie': ['AbcNewsVideo'],
|
||||||
|
}, {
|
||||||
|
'url': 'http://abcnews.go.com/Entertainment/justin-timberlake-performs-stop-feeling-eurovision-2016/story?id=39125818',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '39125818',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'display_id': 'justin-timberlake-performs-stop-feeling-eurovision-2016',
|
||||||
|
'title': 'Justin Timberlake Drops Hints For Secret Single',
|
||||||
|
'description': 'Lara Spencer reports the buzziest stories of the day in "GMA" Pop News.',
|
||||||
|
'upload_date': '20160515',
|
||||||
|
'timestamp': 1463329500,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
# The embedded YouTube video is blocked due to copyright issues
|
||||||
|
'playlist_items': '1',
|
||||||
|
},
|
||||||
|
'add_ie': ['AbcNewsVideo'],
|
||||||
|
}, {
|
||||||
|
'url': 'http://abcnews.go.com/Technology/exclusive-apple-ceo-tim-cook-iphone-cracking-software/story?id=37173343',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
display_id = mobj.group('display_id')
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
video_url = self._search_regex(
|
||||||
|
r'window\.abcnvideo\.url\s*=\s*"([^"]+)"', webpage, 'video URL')
|
||||||
|
full_video_url = compat_urlparse.urljoin(url, video_url)
|
||||||
|
|
||||||
|
youtube_url = self._html_search_regex(
|
||||||
|
r'<iframe[^>]+src="(https://www\.youtube\.com/embed/[^"]+)"',
|
||||||
|
webpage, 'YouTube URL', default=None)
|
||||||
|
|
||||||
|
timestamp = None
|
||||||
|
date_str = self._html_search_regex(
|
||||||
|
r'<span[^>]+class="timestamp">([^<]+)</span>',
|
||||||
|
webpage, 'timestamp', fatal=False)
|
||||||
|
if date_str:
|
||||||
|
tz_offset = 0
|
||||||
|
if date_str.endswith(' ET'): # Eastern Time
|
||||||
|
tz_offset = -5
|
||||||
|
date_str = date_str[:-3]
|
||||||
|
date_formats = ['%b. %d, %Y', '%b %d, %Y, %I:%M %p']
|
||||||
|
for date_format in date_formats:
|
||||||
|
try:
|
||||||
|
timestamp = calendar.timegm(time.strptime(date_str.strip(), date_format))
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
if timestamp is not None:
|
||||||
|
timestamp -= tz_offset * 3600
|
||||||
|
|
||||||
|
entry = {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'ie_key': AbcNewsVideoIE.ie_key(),
|
||||||
|
'url': full_video_url,
|
||||||
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
|
'timestamp': timestamp,
|
||||||
|
}
|
||||||
|
|
||||||
|
if youtube_url:
|
||||||
|
entries = [entry, self.url_result(youtube_url, 'Youtube')]
|
||||||
|
return self.playlist_result(entries)
|
||||||
|
|
||||||
|
return entry
|
||||||
@@ -15,7 +15,7 @@ class AcademicEarthCourseIE(InfoExtractor):
|
|||||||
'title': 'Laws of Nature',
|
'title': 'Laws of Nature',
|
||||||
'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.',
|
'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.',
|
||||||
},
|
},
|
||||||
'playlist_count': 4,
|
'playlist_count': 3,
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|||||||
82
youtube_dl/extractor/acast.py
Normal file
82
youtube_dl/extractor/acast.py
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
import functools
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_str
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
OnDemandPagedList,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ACastIE(InfoExtractor):
|
||||||
|
IE_NAME = 'acast'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?acast\.com/(?P<channel>[^/]+)/(?P<id>[^/#?]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://www.acast.com/condenasttraveler/-where-are-you-taipei-101-taiwan',
|
||||||
|
'md5': 'ada3de5a1e3a2a381327d749854788bb',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '57de3baa-4bb0-487e-9418-2692c1277a34',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': '"Where Are You?": Taipei 101, Taiwan',
|
||||||
|
'timestamp': 1196172000000,
|
||||||
|
'description': 'md5:a0b4ef3634e63866b542e5b1199a1a0e',
|
||||||
|
'duration': 211,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
channel, display_id = re.match(self._VALID_URL, url).groups()
|
||||||
|
cast_data = self._download_json(
|
||||||
|
'https://embed.acast.com/api/acasts/%s/%s' % (channel, display_id), display_id)
|
||||||
|
return {
|
||||||
|
'id': compat_str(cast_data['id']),
|
||||||
|
'display_id': display_id,
|
||||||
|
'url': cast_data['blings'][0]['audio'],
|
||||||
|
'title': cast_data['name'],
|
||||||
|
'description': cast_data.get('description'),
|
||||||
|
'thumbnail': cast_data.get('image'),
|
||||||
|
'timestamp': int_or_none(cast_data.get('publishingDate')),
|
||||||
|
'duration': int_or_none(cast_data.get('duration')),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ACastChannelIE(InfoExtractor):
|
||||||
|
IE_NAME = 'acast:channel'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?acast\.com/(?P<id>[^/#?]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://www.acast.com/condenasttraveler',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '50544219-29bb-499e-a083-6087f4cb7797',
|
||||||
|
'title': 'Condé Nast Traveler Podcast',
|
||||||
|
'description': 'md5:98646dee22a5b386626ae31866638fbd',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 20,
|
||||||
|
}
|
||||||
|
_API_BASE_URL = 'https://www.acast.com/api/'
|
||||||
|
_PAGE_SIZE = 10
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def suitable(cls, url):
|
||||||
|
return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
|
||||||
|
|
||||||
|
def _fetch_page(self, channel_slug, page):
|
||||||
|
casts = self._download_json(
|
||||||
|
self._API_BASE_URL + 'channels/%s/acasts?page=%s' % (channel_slug, page),
|
||||||
|
channel_slug, note='Download page %d of channel data' % page)
|
||||||
|
for cast in casts:
|
||||||
|
yield self.url_result(
|
||||||
|
'https://www.acast.com/%s/%s' % (channel_slug, cast['url']),
|
||||||
|
'ACast', cast['id'])
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
channel_slug = self._match_id(url)
|
||||||
|
channel_data = self._download_json(
|
||||||
|
self._API_BASE_URL + 'channels/%s' % channel_slug, channel_slug)
|
||||||
|
entries = OnDemandPagedList(functools.partial(
|
||||||
|
self._fetch_page, channel_slug), self._PAGE_SIZE)
|
||||||
|
return self.playlist_result(entries, compat_str(
|
||||||
|
channel_data['id']), channel_data['name'], channel_data.get('description'))
|
||||||
@@ -6,7 +6,7 @@ from .common import InfoExtractor
|
|||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@@ -16,7 +16,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class AddAnimeIE(InfoExtractor):
|
class AddAnimeIE(InfoExtractor):
|
||||||
_VALID_URL = r'http://(?:\w+\.)?add-anime\.net/(?:watch_video\.php\?(?:.*?)v=|video/)(?P<id>[\w_]+)'
|
_VALID_URL = r'https?://(?:\w+\.)?add-anime\.net/(?:watch_video\.php\?(?:.*?)v=|video/)(?P<id>[\w_]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
|
'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
|
||||||
'md5': '72954ea10bc979ab5e2eb288b21425a0',
|
'md5': '72954ea10bc979ab5e2eb288b21425a0',
|
||||||
@@ -60,7 +60,7 @@ class AddAnimeIE(InfoExtractor):
|
|||||||
confirm_url = (
|
confirm_url = (
|
||||||
parsed_url.scheme + '://' + parsed_url.netloc +
|
parsed_url.scheme + '://' + parsed_url.netloc +
|
||||||
action + '?' +
|
action + '?' +
|
||||||
compat_urllib_parse.urlencode({
|
compat_urllib_parse_urlencode({
|
||||||
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
|
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
|
||||||
self._download_webpage(
|
self._download_webpage(
|
||||||
confirm_url, video_id,
|
confirm_url, video_id,
|
||||||
|
|||||||
@@ -1,23 +1,32 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
parse_duration,
|
parse_duration,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
|
int_or_none,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
ISO639Utils,
|
ISO639Utils,
|
||||||
|
determine_ext,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class AdobeTVIE(InfoExtractor):
|
class AdobeTVBaseIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://tv\.adobe\.com/watch/[^/]+/(?P<id>[^/]+)'
|
_API_BASE_URL = 'http://tv.adobe.com/api/v4/'
|
||||||
|
|
||||||
|
|
||||||
|
class AdobeTVIE(AdobeTVBaseIE):
|
||||||
|
_VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?watch/(?P<show_urlname>[^/]+)/(?P<id>[^/]+)'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/',
|
'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/',
|
||||||
'md5': '9bc5727bcdd55251f35ad311ca74fa1e',
|
'md5': '9bc5727bcdd55251f35ad311ca74fa1e',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop',
|
'id': '10981',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop',
|
'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop',
|
||||||
'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311',
|
'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311',
|
||||||
@@ -29,50 +38,106 @@ class AdobeTVIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
language, show_urlname, urlname = re.match(self._VALID_URL, url).groups()
|
||||||
webpage = self._download_webpage(url, video_id)
|
if not language:
|
||||||
|
language = 'en'
|
||||||
|
|
||||||
player = self._parse_json(
|
video_data = self._download_json(
|
||||||
self._search_regex(r'html5player:\s*({.+?})\s*\n', webpage, 'player'),
|
self._API_BASE_URL + 'episode/get/?language=%s&show_urlname=%s&urlname=%s&disclosure=standard' % (language, show_urlname, urlname),
|
||||||
video_id)
|
urlname)['data'][0]
|
||||||
|
|
||||||
title = player.get('title') or self._search_regex(
|
|
||||||
r'data-title="([^"]+)"', webpage, 'title')
|
|
||||||
description = self._og_search_description(webpage)
|
|
||||||
thumbnail = self._og_search_thumbnail(webpage)
|
|
||||||
|
|
||||||
upload_date = unified_strdate(
|
|
||||||
self._html_search_meta('datepublished', webpage, 'upload date'))
|
|
||||||
|
|
||||||
duration = parse_duration(
|
|
||||||
self._html_search_meta('duration', webpage, 'duration') or
|
|
||||||
self._search_regex(
|
|
||||||
r'Runtime:\s*(\d{2}:\d{2}:\d{2})',
|
|
||||||
webpage, 'duration', fatal=False))
|
|
||||||
|
|
||||||
view_count = str_to_int(self._search_regex(
|
|
||||||
r'<div class="views">\s*Views?:\s*([\d,.]+)\s*</div>',
|
|
||||||
webpage, 'view count'))
|
|
||||||
|
|
||||||
formats = [{
|
formats = [{
|
||||||
'url': source['src'],
|
'url': source['url'],
|
||||||
'format_id': source.get('quality') or source['src'].split('-')[-1].split('.')[0] or None,
|
'format_id': source.get('quality_level') or source['url'].split('-')[-1].split('.')[0] or None,
|
||||||
'tbr': source.get('bitrate'),
|
'width': int_or_none(source.get('width')),
|
||||||
} for source in player['sources']]
|
'height': int_or_none(source.get('height')),
|
||||||
|
'tbr': int_or_none(source.get('video_data_rate')),
|
||||||
|
} for source in video_data['videos']]
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': compat_str(video_data['id']),
|
||||||
'title': title,
|
'title': video_data['title'],
|
||||||
'description': description,
|
'description': video_data.get('description'),
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': video_data.get('thumbnail'),
|
||||||
'upload_date': upload_date,
|
'upload_date': unified_strdate(video_data.get('start_date')),
|
||||||
'duration': duration,
|
'duration': parse_duration(video_data.get('duration')),
|
||||||
'view_count': view_count,
|
'view_count': str_to_int(video_data.get('playcount')),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class AdobeTVPlaylistBaseIE(AdobeTVBaseIE):
|
||||||
|
def _parse_page_data(self, page_data):
|
||||||
|
return [self.url_result(self._get_element_url(element_data)) for element_data in page_data]
|
||||||
|
|
||||||
|
def _extract_playlist_entries(self, url, display_id):
|
||||||
|
page = self._download_json(url, display_id)
|
||||||
|
entries = self._parse_page_data(page['data'])
|
||||||
|
for page_num in range(2, page['paging']['pages'] + 1):
|
||||||
|
entries.extend(self._parse_page_data(
|
||||||
|
self._download_json(url + '&page=%d' % page_num, display_id)['data']))
|
||||||
|
return entries
|
||||||
|
|
||||||
|
|
||||||
|
class AdobeTVShowIE(AdobeTVPlaylistBaseIE):
|
||||||
|
_VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?show/(?P<id>[^/]+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://tv.adobe.com/show/the-complete-picture-with-julieanne-kost',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '36',
|
||||||
|
'title': 'The Complete Picture with Julieanne Kost',
|
||||||
|
'description': 'md5:fa50867102dcd1aa0ddf2ab039311b27',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 136,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _get_element_url(self, element_data):
|
||||||
|
return element_data['urls'][0]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
language, show_urlname = re.match(self._VALID_URL, url).groups()
|
||||||
|
if not language:
|
||||||
|
language = 'en'
|
||||||
|
query = 'language=%s&show_urlname=%s' % (language, show_urlname)
|
||||||
|
|
||||||
|
show_data = self._download_json(self._API_BASE_URL + 'show/get/?%s' % query, show_urlname)['data'][0]
|
||||||
|
|
||||||
|
return self.playlist_result(
|
||||||
|
self._extract_playlist_entries(self._API_BASE_URL + 'episode/?%s' % query, show_urlname),
|
||||||
|
compat_str(show_data['id']),
|
||||||
|
show_data['show_name'],
|
||||||
|
show_data['show_description'])
|
||||||
|
|
||||||
|
|
||||||
|
class AdobeTVChannelIE(AdobeTVPlaylistBaseIE):
|
||||||
|
_VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?channel/(?P<id>[^/]+)(?:/(?P<category_urlname>[^/]+))?'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://tv.adobe.com/channel/development',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'development',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 96,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _get_element_url(self, element_data):
|
||||||
|
return element_data['url']
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
language, channel_urlname, category_urlname = re.match(self._VALID_URL, url).groups()
|
||||||
|
if not language:
|
||||||
|
language = 'en'
|
||||||
|
query = 'language=%s&channel_urlname=%s' % (language, channel_urlname)
|
||||||
|
if category_urlname:
|
||||||
|
query += '&category_urlname=%s' % category_urlname
|
||||||
|
|
||||||
|
return self.playlist_result(
|
||||||
|
self._extract_playlist_entries(self._API_BASE_URL + 'show/?%s' % query, channel_urlname),
|
||||||
|
channel_urlname)
|
||||||
|
|
||||||
|
|
||||||
class AdobeTVVideoIE(InfoExtractor):
|
class AdobeTVVideoIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
|
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
|
||||||
|
|
||||||
@@ -91,28 +156,25 @@ class AdobeTVVideoIE(InfoExtractor):
|
|||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
video_data = self._download_json(url + '?format=json', video_id)
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
|
|
||||||
player_params = self._parse_json(self._search_regex(
|
|
||||||
r'var\s+bridge\s*=\s*([^;]+);', webpage, 'player parameters'),
|
|
||||||
video_id)
|
|
||||||
|
|
||||||
formats = [{
|
formats = [{
|
||||||
|
'format_id': '%s-%s' % (determine_ext(source['src']), source.get('height')),
|
||||||
'url': source['src'],
|
'url': source['src'],
|
||||||
'width': source.get('width'),
|
'width': int_or_none(source.get('width')),
|
||||||
'height': source.get('height'),
|
'height': int_or_none(source.get('height')),
|
||||||
'tbr': source.get('bitrate'),
|
'tbr': int_or_none(source.get('bitrate')),
|
||||||
} for source in player_params['sources']]
|
} for source in video_data['sources']]
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
# For both metadata and downloaded files the duration varies among
|
# For both metadata and downloaded files the duration varies among
|
||||||
# formats. I just pick the max one
|
# formats. I just pick the max one
|
||||||
duration = max(filter(None, [
|
duration = max(filter(None, [
|
||||||
float_or_none(source.get('duration'), scale=1000)
|
float_or_none(source.get('duration'), scale=1000)
|
||||||
for source in player_params['sources']]))
|
for source in video_data['sources']]))
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
for translation in player_params.get('translations', []):
|
for translation in video_data.get('translations', []):
|
||||||
lang_id = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium'])
|
lang_id = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium'])
|
||||||
if lang_id not in subtitles:
|
if lang_id not in subtitles:
|
||||||
subtitles[lang_id] = []
|
subtitles[lang_id] = []
|
||||||
@@ -124,8 +186,9 @@ class AdobeTVVideoIE(InfoExtractor):
|
|||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'title': player_params['title'],
|
'title': video_data['title'],
|
||||||
'description': self._og_search_description(webpage),
|
'description': video_data.get('description'),
|
||||||
|
'thumbnail': video_data['video'].get('poster'),
|
||||||
'duration': duration,
|
'duration': duration,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
@@ -40,7 +41,8 @@ class AdultSwimIE(InfoExtractor):
|
|||||||
'id': 'rQxZvXQ4ROaSOqq-or2Mow',
|
'id': 'rQxZvXQ4ROaSOqq-or2Mow',
|
||||||
'title': 'Rick and Morty - Pilot',
|
'title': 'Rick and Morty - Pilot',
|
||||||
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
|
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
|
||||||
}
|
},
|
||||||
|
'skip': 'This video is only available for registered users',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.adultswim.com/videos/playlists/american-parenting/putting-francine-out-of-business/',
|
'url': 'http://www.adultswim.com/videos/playlists/american-parenting/putting-francine-out-of-business/',
|
||||||
'playlist': [
|
'playlist': [
|
||||||
@@ -66,7 +68,7 @@ class AdultSwimIE(InfoExtractor):
|
|||||||
'md5': '3e346a2ab0087d687a05e1e7f3b3e529',
|
'md5': '3e346a2ab0087d687a05e1e7f3b3e529',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'sY3cMUR_TbuE4YmdjzbIcQ-0',
|
'id': 'sY3cMUR_TbuE4YmdjzbIcQ-0',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
|
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
|
||||||
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n',
|
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n',
|
||||||
},
|
},
|
||||||
@@ -77,6 +79,10 @@ class AdultSwimIE(InfoExtractor):
|
|||||||
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
|
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
|
||||||
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n',
|
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n',
|
||||||
},
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -123,7 +129,6 @@ class AdultSwimIE(InfoExtractor):
|
|||||||
else:
|
else:
|
||||||
collections = bootstrapped_data['show']['collections']
|
collections = bootstrapped_data['show']['collections']
|
||||||
collection, video_info = self.find_collection_containing_video(collections, episode_path)
|
collection, video_info = self.find_collection_containing_video(collections, episode_path)
|
||||||
|
|
||||||
# Video wasn't found in the collections, let's try `slugged_video`.
|
# Video wasn't found in the collections, let's try `slugged_video`.
|
||||||
if video_info is None:
|
if video_info is None:
|
||||||
if bootstrapped_data.get('slugged_video', {}).get('slug') == episode_path:
|
if bootstrapped_data.get('slugged_video', {}).get('slug') == episode_path:
|
||||||
@@ -133,7 +138,15 @@ class AdultSwimIE(InfoExtractor):
|
|||||||
|
|
||||||
show = bootstrapped_data['show']
|
show = bootstrapped_data['show']
|
||||||
show_title = show['title']
|
show_title = show['title']
|
||||||
segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']]
|
stream = video_info.get('stream')
|
||||||
|
clips = [stream] if stream else video_info.get('clips')
|
||||||
|
if not clips:
|
||||||
|
raise ExtractorError(
|
||||||
|
'This video is only available via cable service provider subscription that'
|
||||||
|
' is not currently supported. You may want to use --cookies.'
|
||||||
|
if video_info.get('auth') is True else 'Unable to find stream or clips',
|
||||||
|
expected=True)
|
||||||
|
segment_ids = [clip['videoPlaybackID'] for clip in clips]
|
||||||
|
|
||||||
episode_id = video_info['id']
|
episode_id = video_info['id']
|
||||||
episode_title = video_info['title']
|
episode_title = video_info['title']
|
||||||
@@ -142,7 +155,7 @@ class AdultSwimIE(InfoExtractor):
|
|||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
for part_num, segment_id in enumerate(segment_ids):
|
for part_num, segment_id in enumerate(segment_ids):
|
||||||
segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=mobile' % segment_id
|
segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=desktop' % segment_id
|
||||||
|
|
||||||
segment_title = '%s - %s' % (show_title, episode_title)
|
segment_title = '%s - %s' % (show_title, episode_title)
|
||||||
if len(segment_ids) > 1:
|
if len(segment_ids) > 1:
|
||||||
@@ -156,19 +169,33 @@ class AdultSwimIE(InfoExtractor):
|
|||||||
xpath_text(idoc, './/trt', 'segment duration').strip())
|
xpath_text(idoc, './/trt', 'segment duration').strip())
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
file_els = idoc.findall('.//files/file')
|
file_els = idoc.findall('.//files/file') or idoc.findall('./files/file')
|
||||||
|
|
||||||
|
unique_urls = []
|
||||||
|
unique_file_els = []
|
||||||
for file_el in file_els:
|
for file_el in file_els:
|
||||||
|
media_url = file_el.text
|
||||||
|
if not media_url or determine_ext(media_url) == 'f4m':
|
||||||
|
continue
|
||||||
|
if file_el.text not in unique_urls:
|
||||||
|
unique_urls.append(file_el.text)
|
||||||
|
unique_file_els.append(file_el)
|
||||||
|
|
||||||
|
for file_el in unique_file_els:
|
||||||
bitrate = file_el.attrib.get('bitrate')
|
bitrate = file_el.attrib.get('bitrate')
|
||||||
ftype = file_el.attrib.get('type')
|
ftype = file_el.attrib.get('type')
|
||||||
|
media_url = file_el.text
|
||||||
formats.append({
|
if determine_ext(media_url) == 'm3u8':
|
||||||
'format_id': '%s_%s' % (bitrate, ftype),
|
formats.extend(self._extract_m3u8_formats(
|
||||||
'url': file_el.text.strip(),
|
media_url, segment_title, 'mp4', preference=0,
|
||||||
# The bitrate may not be a number (for example: 'iphone')
|
m3u8_id='hls', fatal=False))
|
||||||
'tbr': int(bitrate) if bitrate.isdigit() else None,
|
else:
|
||||||
'quality': 1 if ftype == 'hd' else -1
|
formats.append({
|
||||||
})
|
'format_id': '%s_%s' % (bitrate, ftype),
|
||||||
|
'url': file_el.text.strip(),
|
||||||
|
# The bitrate may not be a number (for example: 'iphone')
|
||||||
|
'tbr': int(bitrate) if bitrate.isdigit() else None,
|
||||||
|
})
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
|||||||
87
youtube_dl/extractor/aenetworks.py
Normal file
87
youtube_dl/extractor/aenetworks.py
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
smuggle_url,
|
||||||
|
update_url_query,
|
||||||
|
unescapeHTML,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AENetworksIE(InfoExtractor):
|
||||||
|
IE_NAME = 'aenetworks'
|
||||||
|
IE_DESC = 'A+E Networks: A&E, Lifetime, History.com, FYI Network'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?(?:(?:history|aetv|mylifetime)\.com|fyi\.tv)/(?P<type>[^/]+)/(?:[^/]+/)+(?P<id>[^/]+?)(?:$|[?#])'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.history.com/topics/valentines-day/history-of-valentines-day/videos/bet-you-didnt-know-valentines-day?m=528e394da93ae&s=undefined&f=1&free=false',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'g12m5Gyt3fdR',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': "Bet You Didn't Know: Valentine's Day",
|
||||||
|
'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
|
||||||
|
'timestamp': 1375819729,
|
||||||
|
'upload_date': '20130806',
|
||||||
|
'uploader': 'AENE-NEW',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'add_ie': ['ThePlatform'],
|
||||||
|
'expected_warnings': ['JSON-LD'],
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.history.com/shows/mountain-men/season-1/episode-1',
|
||||||
|
'md5': '8ff93eb073449f151d6b90c0ae1ef0c7',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'eg47EERs_JsZ',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Winter Is Coming',
|
||||||
|
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
|
||||||
|
'timestamp': 1338306241,
|
||||||
|
'upload_date': '20120529',
|
||||||
|
'uploader': 'AENE-NEW',
|
||||||
|
},
|
||||||
|
'add_ie': ['ThePlatform'],
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.aetv.com/shows/duck-dynasty/video/inlawful-entry',
|
||||||
|
'only_matching': True
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.fyi.tv/shows/tiny-house-nation/videos/207-sq-ft-minnesota-prairie-cottage',
|
||||||
|
'only_matching': True
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.mylifetime.com/shows/project-runway-junior/video/season-1/episode-6/superstar-clients',
|
||||||
|
'only_matching': True
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
page_type, video_id = re.match(self._VALID_URL, url).groups()
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
video_url_re = [
|
||||||
|
r'data-href="[^"]*/%s"[^>]+data-release-url="([^"]+)"' % video_id,
|
||||||
|
r"media_url\s*=\s*'([^']+)'"
|
||||||
|
]
|
||||||
|
video_url = unescapeHTML(self._search_regex(video_url_re, webpage, 'video url'))
|
||||||
|
query = {'mbr': 'true'}
|
||||||
|
if page_type == 'shows':
|
||||||
|
query['assetTypes'] = 'medium_video_s3'
|
||||||
|
if 'switch=hds' in video_url:
|
||||||
|
query['switch'] = 'hls'
|
||||||
|
|
||||||
|
info = self._search_json_ld(webpage, video_id, fatal=False)
|
||||||
|
info.update({
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': smuggle_url(
|
||||||
|
update_url_query(video_url, query),
|
||||||
|
{
|
||||||
|
'sig': {
|
||||||
|
'key': 'crazyjava',
|
||||||
|
'secret': 's3cr3t'},
|
||||||
|
'force_smil_url': True
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
return info
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
# coding: utf-8
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
|
|
||||||
|
|
||||||
class AftenpostenIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?aftenposten\.no/webtv/(?:#!/)?video/(?P<id>\d+)'
|
|
||||||
_TEST = {
|
|
||||||
'url': 'http://www.aftenposten.no/webtv/#!/video/21039/trailer-sweatshop-i-can-t-take-any-more',
|
|
||||||
'md5': 'fd828cd29774a729bf4d4425fe192972',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '21039',
|
|
||||||
'ext': 'mov',
|
|
||||||
'title': 'TRAILER: "Sweatshop" - I can´t take any more',
|
|
||||||
'description': 'md5:21891f2b0dd7ec2f78d84a50e54f8238',
|
|
||||||
'timestamp': 1416927969,
|
|
||||||
'upload_date': '20141125',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
return self.url_result('xstream:ap:%s' % self._match_id(url), 'Xstream')
|
|
||||||
@@ -6,7 +6,7 @@ from ..utils import int_or_none
|
|||||||
|
|
||||||
|
|
||||||
class AftonbladetIE(InfoExtractor):
|
class AftonbladetIE(InfoExtractor):
|
||||||
_VALID_URL = r'http://tv\.aftonbladet\.se/abtv/articles/(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://tv\.aftonbladet\.se/abtv/articles/(?P<id>[0-9]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://tv.aftonbladet.se/abtv/articles/36015',
|
'url': 'http://tv.aftonbladet.se/abtv/articles/36015',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|||||||
@@ -20,14 +20,14 @@ class AirMozillaIE(InfoExtractor):
|
|||||||
'id': '6x4q2w',
|
'id': '6x4q2w',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Privacy Lab - a meetup for privacy minded people in San Francisco',
|
'title': 'Privacy Lab - a meetup for privacy minded people in San Francisco',
|
||||||
'thumbnail': 're:https://\w+\.cloudfront\.net/6x4q2w/poster\.jpg\?t=\d+',
|
'thumbnail': 're:https?://vid\.ly/(?P<id>[0-9a-z-]+)/poster',
|
||||||
'description': 'Brings together privacy professionals and others interested in privacy at for-profits, non-profits, and NGOs in an effort to contribute to the state of the ecosystem...',
|
'description': 'Brings together privacy professionals and others interested in privacy at for-profits, non-profits, and NGOs in an effort to contribute to the state of the ecosystem...',
|
||||||
'timestamp': 1422487800,
|
'timestamp': 1422487800,
|
||||||
'upload_date': '20150128',
|
'upload_date': '20150128',
|
||||||
'location': 'SFO Commons',
|
'location': 'SFO Commons',
|
||||||
'duration': 3780,
|
'duration': 3780,
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'categories': ['Main'],
|
'categories': ['Main', 'Privacy'],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from .common import InfoExtractor
|
|||||||
|
|
||||||
|
|
||||||
class AlJazeeraIE(InfoExtractor):
|
class AlJazeeraIE(InfoExtractor):
|
||||||
_VALID_URL = r'http://www\.aljazeera\.com/programmes/.*?/(?P<id>[^/]+)\.html'
|
_VALID_URL = r'https?://www\.aljazeera\.com/programmes/.*?/(?P<id>[^/]+)\.html'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.aljazeera.com/programmes/the-slum/2014/08/deliverance-201482883754237240.html',
|
'url': 'http://www.aljazeera.com/programmes/the-slum/2014/08/deliverance-201482883754237240.html',
|
||||||
@@ -13,23 +13,18 @@ class AlJazeeraIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'The Slum - Episode 1: Deliverance',
|
'title': 'The Slum - Episode 1: Deliverance',
|
||||||
'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.',
|
'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.',
|
||||||
'uploader': 'Al Jazeera English',
|
'uploader_id': '665003303001',
|
||||||
|
'timestamp': 1411116829,
|
||||||
|
'upload_date': '20140919',
|
||||||
},
|
},
|
||||||
'add_ie': ['Brightcove'],
|
'add_ie': ['BrightcoveNew'],
|
||||||
|
'skip': 'Not accessible from Travis CI server',
|
||||||
}
|
}
|
||||||
|
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/665003303001/default_default/index.html?videoId=%s'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
program_name = self._match_id(url)
|
program_name = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, program_name)
|
webpage = self._download_webpage(url, program_name)
|
||||||
brightcove_id = self._search_regex(
|
brightcove_id = self._search_regex(
|
||||||
r'RenderPagesVideo\(\'(.+?)\'', webpage, 'brightcove id')
|
r'RenderPagesVideo\(\'(.+?)\'', webpage, 'brightcove id')
|
||||||
|
return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
|
||||||
return {
|
|
||||||
'_type': 'url',
|
|
||||||
'url': (
|
|
||||||
'brightcove:'
|
|
||||||
'playerKey=AQ~~%2CAAAAmtVJIFk~%2CTVGOQ5ZTwJbeMWnq5d_H4MOM57xfzApc'
|
|
||||||
'&%40videoPlayer={0}'.format(brightcove_id)
|
|
||||||
),
|
|
||||||
'ie_key': 'Brightcove',
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -8,6 +8,8 @@ from .common import InfoExtractor
|
|||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
qualities,
|
qualities,
|
||||||
|
unescapeHTML,
|
||||||
|
xpath_element,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -31,7 +33,7 @@ class AllocineIE(InfoExtractor):
|
|||||||
'id': '19540403',
|
'id': '19540403',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Planes 2 Bande-annonce VF',
|
'title': 'Planes 2 Bande-annonce VF',
|
||||||
'description': 'md5:eeaffe7c2d634525e21159b93acf3b1e',
|
'description': 'Regardez la bande annonce du film Planes 2 (Planes 2 Bande-annonce VF). Planes 2, un film de Roberts Gannaway',
|
||||||
'thumbnail': 're:http://.*\.jpg',
|
'thumbnail': 're:http://.*\.jpg',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@@ -41,7 +43,7 @@ class AllocineIE(InfoExtractor):
|
|||||||
'id': '19544709',
|
'id': '19544709',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Dragons 2 - Bande annonce finale VF',
|
'title': 'Dragons 2 - Bande annonce finale VF',
|
||||||
'description': 'md5:71742e3a74b0d692c7fce0dd2017a4ac',
|
'description': 'md5:601d15393ac40f249648ef000720e7e3',
|
||||||
'thumbnail': 're:http://.*\.jpg',
|
'thumbnail': 're:http://.*\.jpg',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@@ -59,14 +61,18 @@ class AllocineIE(InfoExtractor):
|
|||||||
if typ == 'film':
|
if typ == 'film':
|
||||||
video_id = self._search_regex(r'href="/video/player_gen_cmedia=([0-9]+).+"', webpage, 'video id')
|
video_id = self._search_regex(r'href="/video/player_gen_cmedia=([0-9]+).+"', webpage, 'video id')
|
||||||
else:
|
else:
|
||||||
player = self._search_regex(r'data-player=\'([^\']+)\'>', webpage, 'data player')
|
player = self._search_regex(r'data-player=\'([^\']+)\'>', webpage, 'data player', default=None)
|
||||||
|
if player:
|
||||||
player_data = json.loads(player)
|
player_data = json.loads(player)
|
||||||
video_id = compat_str(player_data['refMedia'])
|
video_id = compat_str(player_data['refMedia'])
|
||||||
|
else:
|
||||||
|
model = self._search_regex(r'data-model="([^"]+)">', webpage, 'data model')
|
||||||
|
model_data = self._parse_json(unescapeHTML(model), display_id)
|
||||||
|
video_id = compat_str(model_data['id'])
|
||||||
|
|
||||||
xml = self._download_xml('http://www.allocine.fr/ws/AcVisiondataV4.ashx?media=%s' % video_id, display_id)
|
xml = self._download_xml('http://www.allocine.fr/ws/AcVisiondataV4.ashx?media=%s' % video_id, display_id)
|
||||||
|
|
||||||
video = xml.find('.//AcVisionVideo').attrib
|
video = xpath_element(xml, './/AcVisionVideo').attrib
|
||||||
quality = qualities(['ld', 'md', 'hd'])
|
quality = qualities(['ld', 'md', 'hd'])
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
|||||||
83
youtube_dl/extractor/amp.py
Normal file
83
youtube_dl/extractor/amp.py
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AMPIE(InfoExtractor):
|
||||||
|
# parse Akamai Adaptive Media Player feed
|
||||||
|
def _extract_feed_info(self, url):
|
||||||
|
item = self._download_json(
|
||||||
|
url, None, 'Downloading Akamai AMP feed',
|
||||||
|
'Unable to download Akamai AMP feed')['channel']['item']
|
||||||
|
|
||||||
|
video_id = item['guid']
|
||||||
|
|
||||||
|
def get_media_node(name, default=None):
|
||||||
|
media_name = 'media-%s' % name
|
||||||
|
media_group = item.get('media-group') or item
|
||||||
|
return media_group.get(media_name) or item.get(media_name) or item.get(name, default)
|
||||||
|
|
||||||
|
thumbnails = []
|
||||||
|
media_thumbnail = get_media_node('thumbnail')
|
||||||
|
if media_thumbnail:
|
||||||
|
if isinstance(media_thumbnail, dict):
|
||||||
|
media_thumbnail = [media_thumbnail]
|
||||||
|
for thumbnail_data in media_thumbnail:
|
||||||
|
thumbnail = thumbnail_data['@attributes']
|
||||||
|
thumbnails.append({
|
||||||
|
'url': self._proto_relative_url(thumbnail['url'], 'http:'),
|
||||||
|
'width': int_or_none(thumbnail.get('width')),
|
||||||
|
'height': int_or_none(thumbnail.get('height')),
|
||||||
|
})
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
media_subtitle = get_media_node('subTitle')
|
||||||
|
if media_subtitle:
|
||||||
|
if isinstance(media_subtitle, dict):
|
||||||
|
media_subtitle = [media_subtitle]
|
||||||
|
for subtitle_data in media_subtitle:
|
||||||
|
subtitle = subtitle_data['@attributes']
|
||||||
|
lang = subtitle.get('lang') or 'en'
|
||||||
|
subtitles[lang] = [{'url': subtitle['href']}]
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
media_content = get_media_node('content')
|
||||||
|
if isinstance(media_content, dict):
|
||||||
|
media_content = [media_content]
|
||||||
|
for media_data in media_content:
|
||||||
|
media = media_data['@attributes']
|
||||||
|
media_type = media['type']
|
||||||
|
if media_type in ('video/f4m', 'application/f4m+xml'):
|
||||||
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
media['url'] + '?hdcore=3.4.0&plugin=aasp-3.4.0.132.124',
|
||||||
|
video_id, f4m_id='hds', fatal=False))
|
||||||
|
elif media_type == 'application/x-mpegURL':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
media['url'], video_id, 'mp4', m3u8_id='hls', fatal=False))
|
||||||
|
else:
|
||||||
|
formats.append({
|
||||||
|
'format_id': media_data.get('media-category', {}).get('@attributes', {}).get('label'),
|
||||||
|
'url': media['url'],
|
||||||
|
'tbr': int_or_none(media.get('bitrate')),
|
||||||
|
'filesize': int_or_none(media.get('fileSize')),
|
||||||
|
})
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
timestamp = parse_iso8601(item.get('pubDate'), ' ') or parse_iso8601(item.get('dc-date'))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': get_media_node('title'),
|
||||||
|
'description': get_media_node('description'),
|
||||||
|
'thumbnails': thumbnails,
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'duration': int_or_none(media_content[0].get('@attributes', {}).get('duration')),
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
242
youtube_dl/extractor/animeondemand.py
Normal file
242
youtube_dl/extractor/animeondemand.py
Normal file
@@ -0,0 +1,242 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import (
|
||||||
|
compat_urlparse,
|
||||||
|
compat_str,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
|
extract_attributes,
|
||||||
|
ExtractorError,
|
||||||
|
sanitized_Request,
|
||||||
|
urlencode_postdata,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AnimeOnDemandIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?anime-on-demand\.de/anime/(?P<id>\d+)'
|
||||||
|
_LOGIN_URL = 'https://www.anime-on-demand.de/users/sign_in'
|
||||||
|
_APPLY_HTML5_URL = 'https://www.anime-on-demand.de/html5apply'
|
||||||
|
_NETRC_MACHINE = 'animeondemand'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.anime-on-demand.de/anime/161',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '161',
|
||||||
|
'title': 'Grimgar, Ashes and Illusions (OmU)',
|
||||||
|
'description': 'md5:6681ce3c07c7189d255ac6ab23812d31',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 4,
|
||||||
|
}, {
|
||||||
|
# Film wording is used instead of Episode
|
||||||
|
'url': 'https://www.anime-on-demand.de/anime/39',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# Episodes without titles
|
||||||
|
'url': 'https://www.anime-on-demand.de/anime/162',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# ger/jap, Dub/OmU, account required
|
||||||
|
'url': 'https://www.anime-on-demand.de/anime/169',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _login(self):
|
||||||
|
(username, password) = self._get_login_info()
|
||||||
|
if username is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
login_page = self._download_webpage(
|
||||||
|
self._LOGIN_URL, None, 'Downloading login page')
|
||||||
|
|
||||||
|
if '>Our licensing terms allow the distribution of animes only to German-speaking countries of Europe' in login_page:
|
||||||
|
self.raise_geo_restricted(
|
||||||
|
'%s is only available in German-speaking countries of Europe' % self.IE_NAME)
|
||||||
|
|
||||||
|
login_form = self._form_hidden_inputs('new_user', login_page)
|
||||||
|
|
||||||
|
login_form.update({
|
||||||
|
'user[login]': username,
|
||||||
|
'user[password]': password,
|
||||||
|
})
|
||||||
|
|
||||||
|
post_url = self._search_regex(
|
||||||
|
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
|
||||||
|
'post url', default=self._LOGIN_URL, group='url')
|
||||||
|
|
||||||
|
if not post_url.startswith('http'):
|
||||||
|
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
|
||||||
|
|
||||||
|
request = sanitized_Request(
|
||||||
|
post_url, urlencode_postdata(login_form))
|
||||||
|
request.add_header('Referer', self._LOGIN_URL)
|
||||||
|
|
||||||
|
response = self._download_webpage(
|
||||||
|
request, None, 'Logging in as %s' % username)
|
||||||
|
|
||||||
|
if all(p not in response for p in ('>Logout<', 'href="/users/sign_out"')):
|
||||||
|
error = self._search_regex(
|
||||||
|
r'<p class="alert alert-danger">(.+?)</p>',
|
||||||
|
response, 'error', default=None)
|
||||||
|
if error:
|
||||||
|
raise ExtractorError('Unable to login: %s' % error, expected=True)
|
||||||
|
raise ExtractorError('Unable to log in')
|
||||||
|
|
||||||
|
def _real_initialize(self):
|
||||||
|
self._login()
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
anime_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, anime_id)
|
||||||
|
|
||||||
|
if 'data-playlist=' not in webpage:
|
||||||
|
self._download_webpage(
|
||||||
|
self._APPLY_HTML5_URL, anime_id,
|
||||||
|
'Activating HTML5 beta', 'Unable to apply HTML5 beta')
|
||||||
|
webpage = self._download_webpage(url, anime_id)
|
||||||
|
|
||||||
|
csrf_token = self._html_search_meta(
|
||||||
|
'csrf-token', webpage, 'csrf token', fatal=True)
|
||||||
|
|
||||||
|
anime_title = self._html_search_regex(
|
||||||
|
r'(?s)<h1[^>]+itemprop="name"[^>]*>(.+?)</h1>',
|
||||||
|
webpage, 'anime name')
|
||||||
|
anime_description = self._html_search_regex(
|
||||||
|
r'(?s)<div[^>]+itemprop="description"[^>]*>(.+?)</div>',
|
||||||
|
webpage, 'anime description', default=None)
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
|
||||||
|
for num, episode_html in enumerate(re.findall(
|
||||||
|
r'(?s)<h3[^>]+class="episodebox-title".+?>Episodeninhalt<', webpage), 1):
|
||||||
|
episodebox_title = self._search_regex(
|
||||||
|
(r'class="episodebox-title"[^>]+title=(["\'])(?P<title>.+?)\1',
|
||||||
|
r'class="episodebox-title"[^>]+>(?P<title>.+?)<'),
|
||||||
|
episode_html, 'episodebox title', default=None, group='title')
|
||||||
|
if not episodebox_title:
|
||||||
|
continue
|
||||||
|
|
||||||
|
episode_number = int(self._search_regex(
|
||||||
|
r'(?:Episode|Film)\s*(\d+)',
|
||||||
|
episodebox_title, 'episode number', default=num))
|
||||||
|
episode_title = self._search_regex(
|
||||||
|
r'(?:Episode|Film)\s*\d+\s*-\s*(.+)',
|
||||||
|
episodebox_title, 'episode title', default=None)
|
||||||
|
|
||||||
|
video_id = 'episode-%d' % episode_number
|
||||||
|
|
||||||
|
common_info = {
|
||||||
|
'id': video_id,
|
||||||
|
'series': anime_title,
|
||||||
|
'episode': episode_title,
|
||||||
|
'episode_number': episode_number,
|
||||||
|
}
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
|
||||||
|
for input_ in re.findall(
|
||||||
|
r'<input[^>]+class=["\'].*?streamstarter_html5[^>]+>', episode_html):
|
||||||
|
attributes = extract_attributes(input_)
|
||||||
|
playlist_urls = []
|
||||||
|
for playlist_key in ('data-playlist', 'data-otherplaylist'):
|
||||||
|
playlist_url = attributes.get(playlist_key)
|
||||||
|
if isinstance(playlist_url, compat_str) and re.match(
|
||||||
|
r'/?[\da-zA-Z]+', playlist_url):
|
||||||
|
playlist_urls.append(attributes[playlist_key])
|
||||||
|
if not playlist_urls:
|
||||||
|
continue
|
||||||
|
|
||||||
|
lang = attributes.get('data-lang')
|
||||||
|
lang_note = attributes.get('value')
|
||||||
|
|
||||||
|
for playlist_url in playlist_urls:
|
||||||
|
kind = self._search_regex(
|
||||||
|
r'videomaterialurl/\d+/([^/]+)/',
|
||||||
|
playlist_url, 'media kind', default=None)
|
||||||
|
format_id_list = []
|
||||||
|
if lang:
|
||||||
|
format_id_list.append(lang)
|
||||||
|
if kind:
|
||||||
|
format_id_list.append(kind)
|
||||||
|
if not format_id_list:
|
||||||
|
format_id_list.append(compat_str(num))
|
||||||
|
format_id = '-'.join(format_id_list)
|
||||||
|
format_note = ', '.join(filter(None, (kind, lang_note)))
|
||||||
|
request = sanitized_Request(
|
||||||
|
compat_urlparse.urljoin(url, playlist_url),
|
||||||
|
headers={
|
||||||
|
'X-Requested-With': 'XMLHttpRequest',
|
||||||
|
'X-CSRF-Token': csrf_token,
|
||||||
|
'Referer': url,
|
||||||
|
'Accept': 'application/json, text/javascript, */*; q=0.01',
|
||||||
|
})
|
||||||
|
playlist = self._download_json(
|
||||||
|
request, video_id, 'Downloading %s playlist JSON' % format_id,
|
||||||
|
fatal=False)
|
||||||
|
if not playlist:
|
||||||
|
continue
|
||||||
|
start_video = playlist.get('startvideo', 0)
|
||||||
|
playlist = playlist.get('playlist')
|
||||||
|
if not playlist or not isinstance(playlist, list):
|
||||||
|
continue
|
||||||
|
playlist = playlist[start_video]
|
||||||
|
title = playlist.get('title')
|
||||||
|
if not title:
|
||||||
|
continue
|
||||||
|
description = playlist.get('description')
|
||||||
|
for source in playlist.get('sources', []):
|
||||||
|
file_ = source.get('file')
|
||||||
|
if not file_:
|
||||||
|
continue
|
||||||
|
ext = determine_ext(file_)
|
||||||
|
format_id_list = [lang, kind]
|
||||||
|
if ext == 'm3u8':
|
||||||
|
format_id_list.append('hls')
|
||||||
|
elif source.get('type') == 'video/dash' or ext == 'mpd':
|
||||||
|
format_id_list.append('dash')
|
||||||
|
format_id = '-'.join(filter(None, format_id_list))
|
||||||
|
if ext == 'm3u8':
|
||||||
|
file_formats = self._extract_m3u8_formats(
|
||||||
|
file_, video_id, 'mp4',
|
||||||
|
entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False)
|
||||||
|
elif source.get('type') == 'video/dash' or ext == 'mpd':
|
||||||
|
continue
|
||||||
|
file_formats = self._extract_mpd_formats(
|
||||||
|
file_, video_id, mpd_id=format_id, fatal=False)
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
for f in file_formats:
|
||||||
|
f.update({
|
||||||
|
'language': lang,
|
||||||
|
'format_note': format_note,
|
||||||
|
})
|
||||||
|
formats.extend(file_formats)
|
||||||
|
|
||||||
|
if formats:
|
||||||
|
self._sort_formats(formats)
|
||||||
|
f = common_info.copy()
|
||||||
|
f.update({
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'formats': formats,
|
||||||
|
})
|
||||||
|
entries.append(f)
|
||||||
|
|
||||||
|
# Extract teaser only when full episode is not available
|
||||||
|
if not formats:
|
||||||
|
m = re.search(
|
||||||
|
r'data-dialog-header=(["\'])(?P<title>.+?)\1[^>]+href=(["\'])(?P<href>.+?)\3[^>]*>Teaser<',
|
||||||
|
episode_html)
|
||||||
|
if m:
|
||||||
|
f = common_info.copy()
|
||||||
|
f.update({
|
||||||
|
'id': '%s-teaser' % f['id'],
|
||||||
|
'title': m.group('title'),
|
||||||
|
'url': compat_urlparse.urljoin(url, m.group('href')),
|
||||||
|
})
|
||||||
|
entries.append(f)
|
||||||
|
|
||||||
|
return self.playlist_result(entries, anime_id, anime_title, anime_description)
|
||||||
@@ -1,11 +1,9 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
from .nuevo import NuevoBaseIE
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
|
|
||||||
|
|
||||||
class AnitubeIE(InfoExtractor):
|
class AnitubeIE(NuevoBaseIE):
|
||||||
IE_NAME = 'anitube.se'
|
IE_NAME = 'anitube.se'
|
||||||
_VALID_URL = r'https?://(?:www\.)?anitube\.se/video/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?anitube\.se/video/(?P<id>\d+)'
|
||||||
|
|
||||||
@@ -22,38 +20,11 @@ class AnitubeIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
key = self._html_search_regex(
|
key = self._search_regex(
|
||||||
r'http://www\.anitube\.se/embed/([A-Za-z0-9_-]*)', webpage, 'key')
|
r'src=["\']https?://[^/]+/embed/([A-Za-z0-9_-]+)', webpage, 'key')
|
||||||
|
|
||||||
config_xml = self._download_xml(
|
return self._extract_nuevo(
|
||||||
'http://www.anitube.se/nuevo/econfig.php?key=%s' % key, key)
|
'http://www.anitube.se/nuevo/econfig.php?key=%s' % key, video_id)
|
||||||
|
|
||||||
video_title = config_xml.find('title').text
|
|
||||||
thumbnail = config_xml.find('image').text
|
|
||||||
duration = float(config_xml.find('duration').text)
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
video_url = config_xml.find('file')
|
|
||||||
if video_url is not None:
|
|
||||||
formats.append({
|
|
||||||
'format_id': 'sd',
|
|
||||||
'url': video_url.text,
|
|
||||||
})
|
|
||||||
video_url = config_xml.find('filehd')
|
|
||||||
if video_url is not None:
|
|
||||||
formats.append({
|
|
||||||
'format_id': 'hd',
|
|
||||||
'url': video_url.text,
|
|
||||||
})
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': video_title,
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
'duration': duration,
|
|
||||||
'formats': formats
|
|
||||||
}
|
|
||||||
|
|||||||
224
youtube_dl/extractor/anvato.py
Normal file
224
youtube_dl/extractor/anvato.py
Normal file
@@ -0,0 +1,224 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import random
|
||||||
|
import time
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..aes import aes_encrypt
|
||||||
|
from ..compat import compat_str
|
||||||
|
from ..utils import (
|
||||||
|
bytes_to_intlist,
|
||||||
|
determine_ext,
|
||||||
|
intlist_to_bytes,
|
||||||
|
int_or_none,
|
||||||
|
strip_jsonp,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def md5_text(s):
|
||||||
|
if not isinstance(s, compat_str):
|
||||||
|
s = compat_str(s)
|
||||||
|
return hashlib.md5(s.encode('utf-8')).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
class AnvatoIE(InfoExtractor):
|
||||||
|
# Copied from anvplayer.min.js
|
||||||
|
_ANVACK_TABLE = {
|
||||||
|
'nbcu_nbcd_desktop_web_prod_93d8ead38ce2024f8f544b78306fbd15895ae5e6': 'NNemUkySjxLyPTKvZRiGntBIjEyK8uqicjMakIaQ',
|
||||||
|
'nbcu_nbcd_desktop_web_qa_1a6f01bdd0dc45a439043b694c8a031d': 'eSxJUbA2UUKBTXryyQ2d6NuM8oEqaPySvaPzfKNA',
|
||||||
|
'nbcu_nbcd_desktop_web_acc_eb2ff240a5d4ae9a63d4c297c32716b6c523a129': '89JR3RtUGbvKuuJIiKOMK0SoarLb5MUx8v89RcbP',
|
||||||
|
'nbcu_nbcd_watchvod_web_prod_e61107507180976724ec8e8319fe24ba5b4b60e1': 'Uc7dFt7MJ9GsBWB5T7iPvLaMSOt8BBxv4hAXk5vv',
|
||||||
|
'nbcu_nbcd_watchvod_web_qa_42afedba88a36203db5a4c09a5ba29d045302232': 'T12oDYVFP2IaFvxkmYMy5dKxswpLHtGZa4ZAXEi7',
|
||||||
|
'nbcu_nbcd_watchvod_web_acc_9193214448e2e636b0ffb78abacfd9c4f937c6ca': 'MmobcxUxMedUpohNWwXaOnMjlbiyTOBLL6d46ZpR',
|
||||||
|
'nbcu_local_monitor_web_acc_f998ad54eaf26acd8ee033eb36f39a7b791c6335': 'QvfIoPYrwsjUCcASiw3AIkVtQob2LtJHfidp9iWg',
|
||||||
|
'nbcu_cable_monitor_web_acc_a413759603e8bedfcd3c61b14767796e17834077': 'uwVPJLShvJWSs6sWEIuVem7MTF8A4IknMMzIlFto',
|
||||||
|
'nbcu_nbcd_mcpstage_web_qa_4c43a8f6e95a88dbb40276c0630ba9f693a63a4e': 'PxVYZVwjhgd5TeoPRxL3whssb5OUPnM3zyAzq8GY',
|
||||||
|
'nbcu_comcast_comcast_web_prod_074080762ad4ce956b26b43fb22abf153443a8c4': 'afnaRZfDyg1Z3WZHdupKfy6xrbAG2MHqe3VfuSwh',
|
||||||
|
'nbcu_comcast_comcast_web_qa_706103bb93ead3ef70b1de12a0e95e3c4481ade0': 'DcjsVbX9b3uoPlhdriIiovgFQZVxpISZwz0cx1ZK',
|
||||||
|
'nbcu_comcast_comcastcable_web_prod_669f04817536743563d7331c9293e59fbdbe3d07': '0RwMN2cWy10qhAhOscq3eK7aEe0wqnKt3vJ0WS4D',
|
||||||
|
'nbcu_comcast_comcastcable_web_qa_3d9d2d66219094127f0f6b09cc3c7bb076e3e1ca': '2r8G9DEya7PCqBceKZgrn2XkXgASjwLMuaFE1Aad',
|
||||||
|
'hearst_hearst_demo_web_stage_960726dfef3337059a01a78816e43b29ec04dfc7': 'cuZBPXTR6kSdoTCVXwk5KGA8rk3NrgGn4H6e9Dsp',
|
||||||
|
'anvato_mcpqa_demo_web_stage_18b55e00db5a13faa8d03ae6e41f6f5bcb15b922': 'IOaaLQ8ymqVyem14QuAvE5SndQynTcH5CrLkU2Ih',
|
||||||
|
'anvato_nextmedia_demo_web_stage_9787d56a02ff6b9f43e9a2b0920d8ca88beb5818': 'Pqu9zVzI1ApiIzbVA3VkGBEQHvdKSUuKpD6s2uaR',
|
||||||
|
'anvato_scripps_app_web_prod_0837996dbe373629133857ae9eb72e740424d80a': 'du1ccmn7RxzgizwbWU7hyUaGodNlJn7HtXI0WgXW',
|
||||||
|
'anvato_scripps_app_web_stage_360797e00fe2826be142155c4618cc52fce6c26c': '2PMrQ0BRoqCWl7nzphj0GouIMEh2mZYivAT0S1Su',
|
||||||
|
'fs2go_fs2go_go_all_prod_21934911ccfafc03a075894ead2260d11e2ddd24': 'RcuHlKikW2IJw6HvVoEkqq2UsuEJlbEl11pWXs4Q',
|
||||||
|
'fs2go_fs2go_go_web_prod_ead4b0eec7460c1a07783808db21b49cf1f2f9a7': '4K0HTT2u1zkQA2MaGaZmkLa1BthGSBdr7jllrhk5',
|
||||||
|
'fs2go_fs2go_go_web_stage_407585454a4400355d4391691c67f361': 'ftnc37VKRJBmHfoGGi3kT05bHyeJzilEzhKJCyl3',
|
||||||
|
'fs2go_fs2go_go_android_stage_44b714db6f8477f29afcba15a41e1d30': 'CtxpPvVpo6AbZGomYUhkKs7juHZwNml9b9J0J2gI',
|
||||||
|
'anvato_cbslocal_app_web_prod_547f3e49241ef0e5d30c79b2efbca5d92c698f67': 'Pw0XX5KBDsyRnPS0R2JrSrXftsy8Jnz5pAjaYC8s',
|
||||||
|
'anvato_cbslocal_app_web_stage_547a5f096594cd3e00620c6f825cad1096d28c80': '37OBUhX2uwNyKhhrNzSSNHSRPZpApC3trdqDBpuz',
|
||||||
|
'fs2go_att_att_web_prod_1042dddd089a05438b6a08f972941176f699ffd8': 'JLcF20JwYvpv6uAGcLWIaV12jKwaL1R8us4b6Zkg',
|
||||||
|
'fs2go_att_att_web_stage_807c5001955fc114a3331fe027ddc76e': 'gbu1oO1y0JiOFh4SUipt86P288JHpyjSqolrrT1x',
|
||||||
|
'fs2go_fs2go_tudor_web_prod_a7dd8e5a7cdc830cae55eae6f3e9fee5ee49eb9b': 'ipcp87VCEZXPPe868j3orLqzc03oTy7DXsGkAXXH',
|
||||||
|
'anvato_mhz_app_web_prod_b808218b30de7fdf60340cbd9831512bc1bf6d37': 'Stlm5Gs6BEhJLRTZHcNquyzxGqr23EuFmE5DCgjX',
|
||||||
|
'fs2go_charter_charter_web_stage_c2c6e5a68375a1bf00fff213d3ff8f61a835a54c': 'Lz4hbJp1fwL6jlcz4M2PMzghM4jp4aAmybtT5dPc',
|
||||||
|
'fs2go_charter_charter_web_prod_ebfe3b10f1af215a7321cd3d629e0b81dfa6fa8c': 'vUJsK345A1bVmyYDRhZX0lqFIgVXuqhmuyp1EtPK',
|
||||||
|
'anvato_epfox_app_web_prod_b3373168e12f423f41504f207000188daf88251b': 'GDKq1ixvX3MoBNdU5IOYmYa2DTUXYOozPjrCJnW7',
|
||||||
|
'anvato_epfox_app_web_stage_a3c2ce60f8f83ef374a88b68ee73a950f8ab87ce': '2jz2NH4BsXMaDsoJ5qkHMbcczAfIReo2eFYuVC1C',
|
||||||
|
'fs2go_verizon_verizon_web_stage_08e6df0354a4803f1b1f2428b5a9a382e8dbcd62': 'rKTVapNaAcmnUbGL4ZcuOoY4SE7VmZSQsblPFr7e',
|
||||||
|
'fs2go_verizon_verizon_web_prod_f909564cb606eff1f731b5e22e0928676732c445': 'qLSUuHerM3u9eNPzaHyUK52obai5MvE4XDJfqYe1',
|
||||||
|
'fs2go_foxcom_synd_web_stage_f7b9091f00ea25a4fdaaae77fca5b54cdc7e7043': '96VKF2vLd24fFiDfwPFpzM5llFN4TiIGAlodE0Re',
|
||||||
|
'fs2go_foxcom_synd_web_prod_0f2cdd64d87e4ab6a1d54aada0ff7a7c8387a064': 'agiPjbXEyEZUkbuhcnmVPhe9NNVbDjCFq2xkcx51',
|
||||||
|
'anvato_own_app_web_stage_1214ade5d28422c4dae9d03c1243aba0563c4dba': 'mzhamNac3swG4WsJAiUTacnGIODi6SWeVWk5D7ho',
|
||||||
|
'anvato_own_app_web_prod_944e162ed927ec3e9ed13eb68ed2f1008ee7565e': '9TSxh6G2TXOLBoYm9ro3LdNjjvnXpKb8UR8KoIP9',
|
||||||
|
'anvato_scripps_app_ftv_prod_a10a10468edd5afb16fb48171c03b956176afad1': 'COJ2i2UIPK7xZqIWswxe7FaVBOVgRkP1F6O6qGoH',
|
||||||
|
'anvato_scripps_app_ftv_stage_77d3ad2bdb021ec37ca2e35eb09acd396a974c9a': 'Q7nnopNLe2PPfGLOTYBqxSaRpl209IhqaEuDZi1F',
|
||||||
|
'anvato_univision_app_web_stage_551236ef07a0e17718c3995c35586b5ed8cb5031': 'D92PoLS6UitwxDRA191HUGT9OYcOjV6mPMa5wNyo',
|
||||||
|
'anvato_univision_app_web_prod_039a5c0a6009e637ae8ac906718a79911e0e65e1': '5mVS5u4SQjtw6NGw2uhMbKEIONIiLqRKck5RwQLR',
|
||||||
|
'nbcu_cnbc_springfield_ios_prod_670207fae43d6e9a94c351688851a2ce': 'M7fqCCIP9lW53oJbHs19OlJlpDrVyc2OL8gNeuTa',
|
||||||
|
'nbcu_cnbc_springfieldvod_ios_prod_7a5f04b1ceceb0e9c9e2264a44aa236e08e034c2': 'Yia6QbJahW0S7K1I0drksimhZb4UFq92xLBmmMvk',
|
||||||
|
'anvato_cox_app_web_prod_ce45cda237969f93e7130f50ee8bb6280c1484ab': 'cc0miZexpFtdoqZGvdhfXsLy7FXjRAOgb9V0f5fZ',
|
||||||
|
'anvato_cox_app_web_stage_c23dbe016a8e9d8c7101d10172b92434f6088bf9': 'yivU3MYHd2eDZcOfmLbINVtqxyecKTOp8OjOuoGJ',
|
||||||
|
'anvato_chnzero_app_web_stage_b1164d1352b579e792e542fddf13ee34c0eeb46b': 'A76QkXMmVH8lTCfU15xva1mZnSVcqeY4Xb22Kp7m',
|
||||||
|
'anvato_chnzero_app_web_prod_253d358928dc08ec161eda2389d53707288a730c': 'OA5QI3ZWZZkdtUEDqh28AH8GedsF6FqzJI32596b',
|
||||||
|
'anvato_discovery_vodpoc_web_stage_9fa7077b5e8af1f8355f65d4fb8d2e0e9d54e2b7': 'q3oT191tTQ5g3JCP67PkjLASI9s16DuWZ6fYmry3',
|
||||||
|
'anvato_discovery_vodpoc_web_prod_688614983167a1af6cdf6d76343fda10a65223c1': 'qRvRQCTVHd0VVOHsMvvfidyWmlYVrTbjby7WqIuK',
|
||||||
|
'nbcu_cnbc_springfieldvod_ftv_stage_826040aad1925a46ac5dfb4b3c5143e648c6a30d': 'JQaSb5a8Tz0PT4ti329DNmzDO30TnngTHmvX8Vua',
|
||||||
|
'nbcu_cnbc_springfield_ftv_stage_826040aad1925a46ac5dfb4b3c5143e648c6a30d': 'JQaSb5a8Tz0PT4ti329DNmzDO30TnngTHmvX8Vua',
|
||||||
|
'nbcu_nbcd_capture_web_stage_4dd9d585bfb984ebf856dee35db027b2465cc4ae': '0j1Ov4Vopyi2HpBZJYdL2m8ERJVGYh3nNpzPiO8F',
|
||||||
|
'nbcu_nbcd_watch3_android_prod_7712ca5fcf1c22f19ec1870a9650f9c37db22dcf': '3LN2UB3rPUAMu7ZriWkHky9vpLMXYha8JbSnxBlx',
|
||||||
|
'nbcu_nbcd_watchvod3_android_prod_0910a3a4692d57c0b5ff4316075bc5d096be45b9': 'mJagcQ2II30vUOAauOXne7ERwbf5S9nlB3IP17lQ',
|
||||||
|
'anvato_scripps_app_atv_prod_790deda22e16e71e83df58f880cd389908a45d52': 'CB6trI1mpoDIM5o54DNTsji90NDBQPZ4z4RqBNSH',
|
||||||
|
'nbcu_nbcd_watchv4_android_prod_ff67cef9cb409158c6f8c3533edddadd0b750507': 'j8CHQCUWjlYERj4NFRmUYOND85QNbHViH09UwuKm',
|
||||||
|
'nbcu_nbcd_watchvodv4_android_prod_a814d781609989dea6a629d50ae4c7ad8cc8e907': 'rkVnUXxdA9rawVLUlDQtMue9Y4Q7lFEaIotcUhjt',
|
||||||
|
'rvVKpA50qlOPLFxMjrCGf5pdkdQDm7qn': '1J7ZkY5Qz5lMLi93QOH9IveE7EYB3rLl',
|
||||||
|
'nbcu_dtv_local_web_prod_b266cf49defe255fd4426a97e27c09e513e9f82f': 'HuLnJDqzLa4saCzYMJ79zDRSQpEduw1TzjMNQu2b',
|
||||||
|
'nbcu_att_local_web_prod_4cef038b2d969a6b7d700a56a599040b6a619f67': 'Q0Em5VDc2KpydUrVwzWRXAwoNBulWUxCq2faK0AV',
|
||||||
|
'nbcu_dish_local_web_prod_c56dcaf2da2e9157a4266c82a78195f1dd570f6b': 'bC1LWmRz9ayj2AlzizeJ1HuhTfIaJGsDBnZNgoRg',
|
||||||
|
'nbcu_verizon_local_web_prod_88bebd2ce006d4ed980de8133496f9a74cb9b3e1': 'wzhDKJZpgvUSS1EQvpCQP8Q59qVzcPixqDGJefSk',
|
||||||
|
'nbcu_charter_local_web_prod_9ad90f7fc4023643bb718f0fe0fd5beea2382a50': 'PyNbxNhEWLzy1ZvWEQelRuIQY88Eub7xbSVRMdfT',
|
||||||
|
'nbcu_suddenlink_local_web_prod_20fb711725cac224baa1c1cb0b1c324d25e97178': '0Rph41lPXZbb3fqeXtHjjbxfSrNbtZp1Ygq7Jypa',
|
||||||
|
'nbcu_wow_local_web_prod_652d9ce4f552d9c2e7b5b1ed37b8cb48155174ad': 'qayIBZ70w1dItm2zS42AptXnxW15mkjRrwnBjMPv',
|
||||||
|
'nbcu_centurylink_local_web_prod_2034402b029bf3e837ad46814d9e4b1d1345ccd5': 'StePcPMkjsX51PcizLdLRMzxMEl5k2FlsMLUNV4k',
|
||||||
|
'nbcu_atlanticbrd_local_web_prod_8d5f5ecbf7f7b2f5e6d908dd75d90ae3565f682e': 'NtYLb4TFUS0pRs3XTkyO5sbVGYjVf17bVbjaGscI',
|
||||||
|
'nbcu_nbcd_watchvod_web_dev_08bc05699be47c4f31d5080263a8cfadc16d0f7c': 'hwxi2dgDoSWgfmVVXOYZm14uuvku4QfopstXckhr',
|
||||||
|
'anvato_nextmedia_app_web_prod_a4fa8c7204aa65e71044b57aaf63711980cfe5a0': 'tQN1oGPYY1nM85rJYePWGcIb92TG0gSqoVpQTWOw',
|
||||||
|
'anvato_mcp_lin_web_prod_4c36fbfd4d8d8ecae6488656e21ac6d1ac972749': 'GUXNf5ZDX2jFUpu4WT2Go4DJ5nhUCzpnwDRRUx1K',
|
||||||
|
'anvato_mcp_univision_web_prod_37fe34850c99a3b5cdb71dab10a417dd5cdecafa': 'bLDYF8JqfG42b7bwKEgQiU9E2LTIAtnKzSgYpFUH',
|
||||||
|
'anvato_mcp_fs2go_web_prod_c7b90a93e171469cdca00a931211a2f556370d0a': 'icgGoYGipQMMSEvhplZX1pwbN69srwKYWksz3xWK',
|
||||||
|
'anvato_mcp_sps_web_prod_54bdc90dd6ba21710e9f7074338365bba28da336': 'fA2iQdI7RDpynqzQYIpXALVS83NTPr8LLFK4LFsu',
|
||||||
|
'anvato_mcp_anv_web_prod_791407490f4c1ef2a4bcb21103e0cb1bcb3352b3': 'rMOUZqe9lwcGq2mNgG3EDusm6lKgsUnczoOX3mbg',
|
||||||
|
'anvato_mcp_gray_web_prod_4c10f067c393ed8fc453d3930f8ab2b159973900': 'rMOUZqe9lwcGq2mNgG3EDusm6lKgsUnczoOX3mbg',
|
||||||
|
'anvato_mcp_hearst_web_prod_5356c3de0fc7c90a3727b4863ca7fec3a4524a99': 'P3uXJ0fXXditBPCGkfvlnVScpPEfKmc64Zv7ZgbK',
|
||||||
|
'anvato_mcp_cbs_web_prod_02f26581ff80e5bda7aad28226a8d369037f2cbe': 'mGPvo5ZA5SgjOFAPEPXv7AnOpFUICX8hvFQVz69n',
|
||||||
|
'anvato_mcp_telemundo_web_prod_c5278d51ad46fda4b6ca3d0ea44a7846a054f582': 'qyT6PXXLjVNCrHaRVj0ugAhalNRS7Ee9BP7LUokD',
|
||||||
|
'nbcu_nbcd_watchvodv4_web_stage_4108362fba2d4ede21f262fea3c4162cbafd66c7': 'DhaU5lj0W2gEdcSSsnxURq8t7KIWtJfD966crVDk',
|
||||||
|
'anvato_scripps_app_ios_prod_409c41960c60b308db43c3cc1da79cab9f1c3d93': 'WPxj5GraLTkYCyj3M7RozLqIycjrXOEcDGFMIJPn',
|
||||||
|
'EZqvRyKBJLrgpClDPDF8I7Xpdp40Vx73': '4OxGd2dEakylntVKjKF0UK9PDPYB6A9W',
|
||||||
|
'M2v78QkpleXm9hPp9jUXI63x5vA6BogR': 'ka6K32k7ZALmpINkjJUGUo0OE42Md1BQ',
|
||||||
|
'nbcu_nbcd_desktop_web_prod_93d8ead38ce2024f8f544b78306fbd15895ae5e6_secure': 'NNemUkySjxLyPTKvZRiGntBIjEyK8uqicjMakIaQ'
|
||||||
|
}
|
||||||
|
|
||||||
|
_AUTH_KEY = b'\x31\xc2\x42\x84\x9e\x73\xa0\xce'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(AnvatoIE, self).__init__(*args, **kwargs)
|
||||||
|
self.__server_time = None
|
||||||
|
|
||||||
|
def _server_time(self, access_key, video_id):
|
||||||
|
if self.__server_time is not None:
|
||||||
|
return self.__server_time
|
||||||
|
|
||||||
|
self.__server_time = int(self._download_json(
|
||||||
|
self._api_prefix(access_key) + 'server_time?anvack=' + access_key, video_id,
|
||||||
|
note='Fetching server time')['server_time'])
|
||||||
|
|
||||||
|
return self.__server_time
|
||||||
|
|
||||||
|
def _api_prefix(self, access_key):
|
||||||
|
return 'https://tkx2-%s.anvato.net/rest/v2/' % ('prod' if 'prod' in access_key else 'stage')
|
||||||
|
|
||||||
|
def _get_video_json(self, access_key, video_id):
|
||||||
|
# See et() in anvplayer.min.js, which is an alias of getVideoJSON()
|
||||||
|
video_data_url = self._api_prefix(access_key) + 'mcp/video/%s?anvack=%s' % (video_id, access_key)
|
||||||
|
server_time = self._server_time(access_key, video_id)
|
||||||
|
input_data = '%d~%s~%s' % (server_time, md5_text(video_data_url), md5_text(server_time))
|
||||||
|
|
||||||
|
auth_secret = intlist_to_bytes(aes_encrypt(
|
||||||
|
bytes_to_intlist(input_data[:64]), bytes_to_intlist(self._AUTH_KEY)))
|
||||||
|
|
||||||
|
video_data_url += '&X-Anvato-Adst-Auth=' + base64.b64encode(auth_secret).decode('ascii')
|
||||||
|
anvrid = md5_text(time.time() * 1000 * random.random())[:30]
|
||||||
|
payload = {
|
||||||
|
'api': {
|
||||||
|
'anvrid': anvrid,
|
||||||
|
'anvstk': md5_text('%s|%s|%d|%s' % (
|
||||||
|
access_key, anvrid, server_time, self._ANVACK_TABLE[access_key])),
|
||||||
|
'anvts': server_time,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return self._download_json(
|
||||||
|
video_data_url, video_id, transform_source=strip_jsonp,
|
||||||
|
data=json.dumps(payload).encode('utf-8'))
|
||||||
|
|
||||||
|
def _extract_anvato_videos(self, webpage, video_id):
|
||||||
|
anvplayer_data = self._parse_json(self._html_search_regex(
|
||||||
|
r'<script[^>]+data-anvp=\'([^\']+)\'', webpage,
|
||||||
|
'Anvato player data'), video_id)
|
||||||
|
|
||||||
|
video_id = anvplayer_data['video']
|
||||||
|
access_key = anvplayer_data['accessKey']
|
||||||
|
|
||||||
|
video_data = self._get_video_json(access_key, video_id)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for published_url in video_data['published_urls']:
|
||||||
|
video_url = published_url['embed_url']
|
||||||
|
ext = determine_ext(video_url)
|
||||||
|
|
||||||
|
if ext == 'smil':
|
||||||
|
formats.extend(self._extract_smil_formats(video_url, video_id))
|
||||||
|
continue
|
||||||
|
|
||||||
|
tbr = int_or_none(published_url.get('kbps'))
|
||||||
|
a_format = {
|
||||||
|
'url': video_url,
|
||||||
|
'format_id': ('-'.join(filter(None, ['http', published_url.get('cdn_name')]))).lower(),
|
||||||
|
'tbr': tbr if tbr != 0 else None,
|
||||||
|
}
|
||||||
|
|
||||||
|
if ext == 'm3u8':
|
||||||
|
# Not using _extract_m3u8_formats here as individual media
|
||||||
|
# playlists are also included in published_urls.
|
||||||
|
if tbr is None:
|
||||||
|
formats.append(self._m3u8_meta_format(video_url, ext='mp4', m3u8_id='hls'))
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
a_format.update({
|
||||||
|
'format_id': '-'.join(filter(None, ['hls', compat_str(tbr)])),
|
||||||
|
'ext': 'mp4',
|
||||||
|
})
|
||||||
|
elif ext == 'mp3':
|
||||||
|
a_format['vcodec'] = 'none'
|
||||||
|
else:
|
||||||
|
a_format.update({
|
||||||
|
'width': int_or_none(published_url.get('width')),
|
||||||
|
'height': int_or_none(published_url.get('height')),
|
||||||
|
})
|
||||||
|
formats.append(a_format)
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
for caption in video_data.get('captions', []):
|
||||||
|
a_caption = {
|
||||||
|
'url': caption['url'],
|
||||||
|
'ext': 'tt' if caption.get('format') == 'SMPTE-TT' else None
|
||||||
|
}
|
||||||
|
subtitles.setdefault(caption['language'], []).append(a_caption)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'formats': formats,
|
||||||
|
'title': video_data.get('def_title'),
|
||||||
|
'description': video_data.get('def_description'),
|
||||||
|
'categories': video_data.get('categories'),
|
||||||
|
'thumbnail': video_data.get('thumbnail'),
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
||||||
@@ -1,70 +1,133 @@
|
|||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
int_or_none,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class AolIE(InfoExtractor):
|
class AolIE(InfoExtractor):
|
||||||
IE_NAME = 'on.aol.com'
|
IE_NAME = 'on.aol.com'
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'(?:aol-video:|https?://on\.aol\.com/(?:[^/]+/)*(?:[^/?#&]+-)?)(?P<id>[^/?#&]+)'
|
||||||
(?:
|
|
||||||
aol-video:|
|
|
||||||
http://on\.aol\.com/
|
|
||||||
(?:
|
|
||||||
video/.*-|
|
|
||||||
playlist/(?P<playlist_display_id>[^/?#]+?)-(?P<playlist_id>[0-9]+)[?#].*_videoid=
|
|
||||||
)
|
|
||||||
)
|
|
||||||
(?P<id>[0-9]+)
|
|
||||||
(?:$|\?)
|
|
||||||
'''
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
# video with 5min ID
|
||||||
'url': 'http://on.aol.com/video/u-s--official-warns-of-largest-ever-irs-phone-scam-518167793?icid=OnHomepageC2Wide_MustSee_Img',
|
'url': 'http://on.aol.com/video/u-s--official-warns-of-largest-ever-irs-phone-scam-518167793?icid=OnHomepageC2Wide_MustSee_Img',
|
||||||
'md5': '18ef68f48740e86ae94b98da815eec42',
|
'md5': '18ef68f48740e86ae94b98da815eec42',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '518167793',
|
'id': '518167793',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'U.S. Official Warns Of \'Largest Ever\' IRS Phone Scam',
|
'title': 'U.S. Official Warns Of \'Largest Ever\' IRS Phone Scam',
|
||||||
|
'description': 'A major phone scam has cost thousands of taxpayers more than $1 million, with less than a month until income tax returns are due to the IRS.',
|
||||||
|
'timestamp': 1395405060,
|
||||||
|
'upload_date': '20140321',
|
||||||
|
'uploader': 'Newsy Studio',
|
||||||
},
|
},
|
||||||
'add_ie': ['FiveMin'],
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://on.aol.com/playlist/brace-yourself---todays-weirdest-news-152147?icid=OnHomepageC4_Omg_Img#_videoid=518184316',
|
# video with vidible ID
|
||||||
|
'url': 'http://on.aol.com/video/netflix-is-raising-rates-5707d6b8e4b090497b04f706?context=PC:homepage:PL1944:1460189336183',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '152147',
|
'id': '5707d6b8e4b090497b04f706',
|
||||||
'title': 'Brace Yourself - Today\'s Weirdest News',
|
'ext': 'mp4',
|
||||||
|
'title': 'Netflix is Raising Rates',
|
||||||
|
'description': 'Netflix is rewarding millions of it’s long-standing members with an increase in cost. Veuer’s Carly Figueroa has more.',
|
||||||
|
'upload_date': '20160408',
|
||||||
|
'timestamp': 1460123280,
|
||||||
|
'uploader': 'Veuer',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 10,
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'http://on.aol.com/partners/abc-551438d309eab105804dbfe8/sneak-peek-was-haley-really-framed-570eaebee4b0448640a5c944',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://on.aol.com/shows/park-bench-shw518173474-559a1b9be4b0c3bfad3357a7?context=SH:SHW518173474:PL4327:1460619712763',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://on.aol.com/video/519442220',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'aol-video:5707d6b8e4b090497b04f706',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
playlist_id = mobj.group('playlist_id')
|
|
||||||
if not playlist_id or self._downloader.params.get('noplaylist'):
|
|
||||||
return self.url_result('5min:%s' % video_id)
|
|
||||||
|
|
||||||
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
|
response = self._download_json(
|
||||||
|
'https://feedapi.b2c.on.aol.com/v1.0/app/videos/aolon/%s/details' % video_id,
|
||||||
|
video_id)['response']
|
||||||
|
if response['statusText'] != 'Ok':
|
||||||
|
raise ExtractorError('%s said: %s' % (self.IE_NAME, response['statusText']), expected=True)
|
||||||
|
|
||||||
webpage = self._download_webpage(url, playlist_id)
|
video_data = response['data']
|
||||||
title = self._html_search_regex(
|
formats = []
|
||||||
r'<h1 class="video-title[^"]*">(.+?)</h1>', webpage, 'title')
|
m3u8_url = video_data.get('videoMasterPlaylist')
|
||||||
playlist_html = self._search_regex(
|
if m3u8_url:
|
||||||
r"(?s)<ul\s+class='video-related[^']*'>(.*?)</ul>", webpage,
|
formats.extend(self._extract_m3u8_formats(
|
||||||
'playlist HTML')
|
m3u8_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
|
||||||
entries = [{
|
for rendition in video_data.get('renditions', []):
|
||||||
'_type': 'url',
|
video_url = rendition.get('url')
|
||||||
'url': 'aol-video:%s' % m.group('id'),
|
if not video_url:
|
||||||
'ie_key': 'Aol',
|
continue
|
||||||
} for m in re.finditer(
|
ext = rendition.get('format')
|
||||||
r"<a\s+href='.*videoid=(?P<id>[0-9]+)'\s+class='video-thumb'>",
|
if ext == 'm3u8':
|
||||||
playlist_html)]
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
video_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
|
||||||
|
else:
|
||||||
|
f = {
|
||||||
|
'url': video_url,
|
||||||
|
'format_id': rendition.get('quality'),
|
||||||
|
}
|
||||||
|
mobj = re.search(r'(\d+)x(\d+)', video_url)
|
||||||
|
if mobj:
|
||||||
|
f.update({
|
||||||
|
'width': int(mobj.group(1)),
|
||||||
|
'height': int(mobj.group(2)),
|
||||||
|
})
|
||||||
|
formats.append(f)
|
||||||
|
self._sort_formats(formats, ('width', 'height', 'tbr', 'format_id'))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'playlist',
|
'id': video_id,
|
||||||
'id': playlist_id,
|
'title': video_data['title'],
|
||||||
'display_id': mobj.group('playlist_display_id'),
|
'duration': int_or_none(video_data.get('duration')),
|
||||||
'title': title,
|
'timestamp': int_or_none(video_data.get('publishDate')),
|
||||||
'entries': entries,
|
'view_count': int_or_none(video_data.get('views')),
|
||||||
|
'description': video_data.get('description'),
|
||||||
|
'uploader': video_data.get('videoOwner'),
|
||||||
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class AolFeaturesIE(InfoExtractor):
|
||||||
|
IE_NAME = 'features.aol.com'
|
||||||
|
_VALID_URL = r'https?://features\.aol\.com/video/(?P<id>[^/?#]+)'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://features.aol.com/video/behind-secret-second-careers-late-night-talk-show-hosts',
|
||||||
|
'md5': '7db483bb0c09c85e241f84a34238cc75',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '519507715',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'What To Watch - February 17, 2016',
|
||||||
|
},
|
||||||
|
'add_ie': ['FiveMin'],
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
return self.url_result(self._search_regex(
|
||||||
|
r'<script type="text/javascript" src="(https?://[^/]*?5min\.com/Scripts/PlayerSeed\.js[^"]+)"',
|
||||||
|
webpage, '5min embed url'), 'FiveMin')
|
||||||
|
|||||||
50
youtube_dl/extractor/appleconnect.py
Normal file
50
youtube_dl/extractor/appleconnect.py
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
str_to_int,
|
||||||
|
ExtractorError
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AppleConnectIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://itunes\.apple\.com/\w{0,2}/?post/idsa\.(?P<id>[\w-]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://itunes.apple.com/us/post/idsa.4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
|
||||||
|
'md5': '10d0f2799111df4cb1c924520ca78f98',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
|
||||||
|
'ext': 'm4v',
|
||||||
|
'title': 'Energy',
|
||||||
|
'uploader': 'Drake',
|
||||||
|
'thumbnail': 'http://is5.mzstatic.com/image/thumb/Video5/v4/78/61/c5/7861c5fa-ad6d-294b-1464-cf7605b911d6/source/1920x1080sr.jpg',
|
||||||
|
'upload_date': '20150710',
|
||||||
|
'timestamp': 1436545535,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
video_json = self._html_search_regex(
|
||||||
|
r'class="auc-video-data">(\{.*?\})', webpage, 'json')
|
||||||
|
except ExtractorError:
|
||||||
|
raise ExtractorError('This post doesn\'t contain a video', expected=True)
|
||||||
|
|
||||||
|
video_data = self._parse_json(video_json, video_id)
|
||||||
|
timestamp = str_to_int(self._html_search_regex(r'data-timestamp="(\d+)"', webpage, 'timestamp'))
|
||||||
|
like_count = str_to_int(self._html_search_regex(r'(\d+) Loves', webpage, 'like count'))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_data['sslSrc'],
|
||||||
|
'title': video_data['title'],
|
||||||
|
'description': video_data['description'],
|
||||||
|
'uploader': video_data['artistName'],
|
||||||
|
'thumbnail': video_data['artworkUrl'],
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'like_count': like_count,
|
||||||
|
}
|
||||||
@@ -11,61 +11,71 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class AppleTrailersIE(InfoExtractor):
|
class AppleTrailersIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)'
|
IE_NAME = 'appletrailers'
|
||||||
|
_VALID_URL = r'https?://(?:www\.|movie)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
"url": "http://trailers.apple.com/trailers/wb/manofsteel/",
|
'url': 'http://trailers.apple.com/trailers/wb/manofsteel/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'manofsteel',
|
'id': 'manofsteel',
|
||||||
},
|
},
|
||||||
"playlist": [
|
'playlist': [
|
||||||
{
|
{
|
||||||
"md5": "d97a8e575432dbcb81b7c3acb741f8a8",
|
'md5': 'd97a8e575432dbcb81b7c3acb741f8a8',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
"id": "manofsteel-trailer4",
|
'id': 'manofsteel-trailer4',
|
||||||
"ext": "mov",
|
'ext': 'mov',
|
||||||
"duration": 111,
|
'duration': 111,
|
||||||
"title": "Trailer 4",
|
'title': 'Trailer 4',
|
||||||
"upload_date": "20130523",
|
'upload_date': '20130523',
|
||||||
"uploader_id": "wb",
|
'uploader_id': 'wb',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5": "b8017b7131b721fb4e8d6f49e1df908c",
|
'md5': 'b8017b7131b721fb4e8d6f49e1df908c',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
"id": "manofsteel-trailer3",
|
'id': 'manofsteel-trailer3',
|
||||||
"ext": "mov",
|
'ext': 'mov',
|
||||||
"duration": 182,
|
'duration': 182,
|
||||||
"title": "Trailer 3",
|
'title': 'Trailer 3',
|
||||||
"upload_date": "20130417",
|
'upload_date': '20130417',
|
||||||
"uploader_id": "wb",
|
'uploader_id': 'wb',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5": "d0f1e1150989b9924679b441f3404d48",
|
'md5': 'd0f1e1150989b9924679b441f3404d48',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
"id": "manofsteel-trailer",
|
'id': 'manofsteel-trailer',
|
||||||
"ext": "mov",
|
'ext': 'mov',
|
||||||
"duration": 148,
|
'duration': 148,
|
||||||
"title": "Trailer",
|
'title': 'Trailer',
|
||||||
"upload_date": "20121212",
|
'upload_date': '20121212',
|
||||||
"uploader_id": "wb",
|
'uploader_id': 'wb',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5": "5fe08795b943eb2e757fa95cb6def1cb",
|
'md5': '5fe08795b943eb2e757fa95cb6def1cb',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
"id": "manofsteel-teaser",
|
'id': 'manofsteel-teaser',
|
||||||
"ext": "mov",
|
'ext': 'mov',
|
||||||
"duration": 93,
|
'duration': 93,
|
||||||
"title": "Teaser",
|
'title': 'Teaser',
|
||||||
"upload_date": "20120721",
|
'upload_date': '20120721',
|
||||||
"uploader_id": "wb",
|
'uploader_id': 'wb',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
}, {
|
||||||
|
'url': 'http://trailers.apple.com/trailers/magnolia/blackthorn/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'blackthorn',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 2,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://trailers.apple.com/ca/metropole/autrui/',
|
'url': 'http://trailers.apple.com/ca/metropole/autrui/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://movietrailers.apple.com/trailers/focus_features/kuboandthetwostrings/',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_JSON_RE = r'iTunes.playURL\((.*?)\);'
|
_JSON_RE = r'iTunes.playURL\((.*?)\);'
|
||||||
@@ -79,7 +89,7 @@ class AppleTrailersIE(InfoExtractor):
|
|||||||
|
|
||||||
def fix_html(s):
|
def fix_html(s):
|
||||||
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
|
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
|
||||||
s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
|
s = re.sub(r'<img ([^<]*?)/?>', r'<img \1/>', s)
|
||||||
# The ' in the onClick attributes are not escaped, it couldn't be parsed
|
# The ' in the onClick attributes are not escaped, it couldn't be parsed
|
||||||
# like: http://trailers.apple.com/trailers/wb/gravity/
|
# like: http://trailers.apple.com/trailers/wb/gravity/
|
||||||
|
|
||||||
@@ -96,6 +106,9 @@ class AppleTrailersIE(InfoExtractor):
|
|||||||
trailer_info_json = self._search_regex(self._JSON_RE,
|
trailer_info_json = self._search_regex(self._JSON_RE,
|
||||||
on_click, 'trailer info')
|
on_click, 'trailer info')
|
||||||
trailer_info = json.loads(trailer_info_json)
|
trailer_info = json.loads(trailer_info_json)
|
||||||
|
first_url = trailer_info.get('url')
|
||||||
|
if not first_url:
|
||||||
|
continue
|
||||||
title = trailer_info['title']
|
title = trailer_info['title']
|
||||||
video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
|
video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
|
||||||
thumbnail = li.find('.//img').attrib['src']
|
thumbnail = li.find('.//img').attrib['src']
|
||||||
@@ -107,7 +120,6 @@ class AppleTrailersIE(InfoExtractor):
|
|||||||
if m:
|
if m:
|
||||||
duration = 60 * int(m.group('minutes')) + int(m.group('seconds'))
|
duration = 60 * int(m.group('minutes')) + int(m.group('seconds'))
|
||||||
|
|
||||||
first_url = trailer_info['url']
|
|
||||||
trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower()
|
trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower()
|
||||||
settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id)
|
settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id)
|
||||||
settings = self._download_json(settings_json_url, trailer_id, 'Downloading settings json')
|
settings = self._download_json(settings_json_url, trailer_id, 'Downloading settings json')
|
||||||
@@ -144,3 +156,76 @@ class AppleTrailersIE(InfoExtractor):
|
|||||||
'id': movie,
|
'id': movie,
|
||||||
'entries': playlist,
|
'entries': playlist,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class AppleTrailersSectionIE(InfoExtractor):
|
||||||
|
IE_NAME = 'appletrailers:section'
|
||||||
|
_SECTIONS = {
|
||||||
|
'justadded': {
|
||||||
|
'feed_path': 'just_added',
|
||||||
|
'title': 'Just Added',
|
||||||
|
},
|
||||||
|
'exclusive': {
|
||||||
|
'feed_path': 'exclusive',
|
||||||
|
'title': 'Exclusive',
|
||||||
|
},
|
||||||
|
'justhd': {
|
||||||
|
'feed_path': 'just_hd',
|
||||||
|
'title': 'Just HD',
|
||||||
|
},
|
||||||
|
'mostpopular': {
|
||||||
|
'feed_path': 'most_pop',
|
||||||
|
'title': 'Most Popular',
|
||||||
|
},
|
||||||
|
'moviestudios': {
|
||||||
|
'feed_path': 'studios',
|
||||||
|
'title': 'Movie Studios',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/#section=(?P<id>%s)' % '|'.join(_SECTIONS)
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://trailers.apple.com/#section=justadded',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'Just Added',
|
||||||
|
'id': 'justadded',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 80,
|
||||||
|
}, {
|
||||||
|
'url': 'http://trailers.apple.com/#section=exclusive',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'Exclusive',
|
||||||
|
'id': 'exclusive',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 80,
|
||||||
|
}, {
|
||||||
|
'url': 'http://trailers.apple.com/#section=justhd',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'Just HD',
|
||||||
|
'id': 'justhd',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 80,
|
||||||
|
}, {
|
||||||
|
'url': 'http://trailers.apple.com/#section=mostpopular',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'Most Popular',
|
||||||
|
'id': 'mostpopular',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 80,
|
||||||
|
}, {
|
||||||
|
'url': 'http://trailers.apple.com/#section=moviestudios',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'Movie Studios',
|
||||||
|
'id': 'moviestudios',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 80,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
section = self._match_id(url)
|
||||||
|
section_data = self._download_json(
|
||||||
|
'http://trailers.apple.com/trailers/home/feeds/%s.json' % self._SECTIONS[section]['feed_path'],
|
||||||
|
section)
|
||||||
|
entries = [
|
||||||
|
self.url_result('http://trailers.apple.com' + e['location'])
|
||||||
|
for e in section_data]
|
||||||
|
return self.playlist_result(entries, section, self._SECTIONS[section]['title'])
|
||||||
|
|||||||
@@ -8,13 +8,14 @@ from .generic import GenericIE
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
get_element_by_attribute,
|
||||||
qualities,
|
qualities,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
parse_xml,
|
|
||||||
)
|
)
|
||||||
|
from ..compat import compat_etree_fromstring
|
||||||
|
|
||||||
|
|
||||||
class ARDMediathekIE(InfoExtractor):
|
class ARDMediathekIE(InfoExtractor):
|
||||||
@@ -22,19 +23,127 @@ class ARDMediathekIE(InfoExtractor):
|
|||||||
_VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
|
_VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
'url': 'http://www.ardmediathek.de/tv/Dokumentation-und-Reportage/Ich-liebe-das-Leben-trotzdem/rbb-Fernsehen/Video?documentId=29582122&bcastId=3822114',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '29582122',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Ich liebe das Leben trotzdem',
|
||||||
|
'description': 'md5:45e4c225c72b27993314b31a84a5261c',
|
||||||
|
'duration': 4557,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.ardmediathek.de/tv/Tatort/Tatort-Scheinwelten-H%C3%B6rfassung-Video/Das-Erste/Video?documentId=29522730&bcastId=602916',
|
||||||
|
'md5': 'f4d98b10759ac06c0072bbcd1f0b9e3e',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '29522730',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Tatort: Scheinwelten - Hörfassung (Video tgl. ab 20 Uhr)',
|
||||||
|
'description': 'md5:196392e79876d0ac94c94e8cdb2875f1',
|
||||||
|
'duration': 5252,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# audio
|
||||||
|
'url': 'http://www.ardmediathek.de/tv/WDR-H%C3%B6rspiel-Speicher/Tod-eines-Fu%C3%9Fballers/WDR-3/Audio-Podcast?documentId=28488308&bcastId=23074086',
|
||||||
|
'md5': '219d94d8980b4f538c7fcb0865eb7f2c',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '28488308',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'Tod eines Fußballers',
|
||||||
|
'description': 'md5:f6e39f3461f0e1f54bfa48c8875c86ef',
|
||||||
|
'duration': 3240,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht',
|
'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
|
||||||
'url': 'http://www.ardmediathek.de/tv/Tatort/Das-Wunder-von-Wolbeck-Video-tgl-ab-20/Das-Erste/Video?documentId=22490580&bcastId=602916',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '22490580',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Das Wunder von Wolbeck (Video tgl. ab 20 Uhr)',
|
|
||||||
'description': 'Auf einem restaurierten Hof bei Wolbeck wird der Heilpraktiker Raffael Lembeck eines morgens von seiner Frau Stella tot aufgefunden. Das Opfer war offensichtlich in seiner Praxis zu Fall gekommen und ist dann verblutet, erklärt Prof. Boerne am Tatort.',
|
|
||||||
},
|
|
||||||
'skip': 'Blocked outside of Germany',
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
def _extract_media_info(self, media_info_url, webpage, video_id):
|
||||||
|
media_info = self._download_json(
|
||||||
|
media_info_url, video_id, 'Downloading media JSON')
|
||||||
|
|
||||||
|
formats = self._extract_formats(media_info, video_id)
|
||||||
|
|
||||||
|
if not formats:
|
||||||
|
if '"fsk"' in webpage:
|
||||||
|
raise ExtractorError(
|
||||||
|
'This video is only available after 20:00', expected=True)
|
||||||
|
elif media_info.get('_geoblocked'):
|
||||||
|
raise ExtractorError('This video is not available due to geo restriction', expected=True)
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
duration = int_or_none(media_info.get('_duration'))
|
||||||
|
thumbnail = media_info.get('_previewImage')
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
subtitle_url = media_info.get('_subtitleUrl')
|
||||||
|
if subtitle_url:
|
||||||
|
subtitles['de'] = [{
|
||||||
|
'ext': 'ttml',
|
||||||
|
'url': subtitle_url,
|
||||||
|
}]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'duration': duration,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _extract_formats(self, media_info, video_id):
|
||||||
|
type_ = media_info.get('_type')
|
||||||
|
media_array = media_info.get('_mediaArray', [])
|
||||||
|
formats = []
|
||||||
|
for num, media in enumerate(media_array):
|
||||||
|
for stream in media.get('_mediaStreamArray', []):
|
||||||
|
stream_urls = stream.get('_stream')
|
||||||
|
if not stream_urls:
|
||||||
|
continue
|
||||||
|
if not isinstance(stream_urls, list):
|
||||||
|
stream_urls = [stream_urls]
|
||||||
|
quality = stream.get('_quality')
|
||||||
|
server = stream.get('_server')
|
||||||
|
for stream_url in stream_urls:
|
||||||
|
ext = determine_ext(stream_url)
|
||||||
|
if quality != 'auto' and ext in ('f4m', 'm3u8'):
|
||||||
|
continue
|
||||||
|
if ext == 'f4m':
|
||||||
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
stream_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124',
|
||||||
|
video_id, preference=-1, f4m_id='hds', fatal=False))
|
||||||
|
elif ext == 'm3u8':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
stream_url, video_id, 'mp4', preference=1, m3u8_id='hls', fatal=False))
|
||||||
|
else:
|
||||||
|
if server and server.startswith('rtmp'):
|
||||||
|
f = {
|
||||||
|
'url': server,
|
||||||
|
'play_path': stream_url,
|
||||||
|
'format_id': 'a%s-rtmp-%s' % (num, quality),
|
||||||
|
}
|
||||||
|
elif stream_url.startswith('http'):
|
||||||
|
f = {
|
||||||
|
'url': stream_url,
|
||||||
|
'format_id': 'a%s-%s-%s' % (num, ext, quality)
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
m = re.search(r'_(?P<width>\d+)x(?P<height>\d+)\.mp4$', stream_url)
|
||||||
|
if m:
|
||||||
|
f.update({
|
||||||
|
'width': int(m.group('width')),
|
||||||
|
'height': int(m.group('height')),
|
||||||
|
})
|
||||||
|
if type_ == 'audio':
|
||||||
|
f['vcodec'] = 'none'
|
||||||
|
formats.append(f)
|
||||||
|
return formats
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
# determine video id from url
|
# determine video id from url
|
||||||
m = re.match(self._VALID_URL, url)
|
m = re.match(self._VALID_URL, url)
|
||||||
@@ -54,7 +163,7 @@ class ARDMediathekIE(InfoExtractor):
|
|||||||
raise ExtractorError('This program is only suitable for those aged 12 and older. Video %s is therefore only available between 20 pm and 6 am.' % video_id, expected=True)
|
raise ExtractorError('This program is only suitable for those aged 12 and older. Video %s is therefore only available between 20 pm and 6 am.' % video_id, expected=True)
|
||||||
|
|
||||||
if re.search(r'[\?&]rss($|[=&])', url):
|
if re.search(r'[\?&]rss($|[=&])', url):
|
||||||
doc = parse_xml(webpage)
|
doc = compat_etree_fromstring(webpage.encode('utf-8'))
|
||||||
if doc.tag == 'rss':
|
if doc.tag == 'rss':
|
||||||
return GenericIE()._extract_rss(url, video_id, doc)
|
return GenericIE()._extract_rss(url, video_id, doc)
|
||||||
|
|
||||||
@@ -92,46 +201,22 @@ class ARDMediathekIE(InfoExtractor):
|
|||||||
'format_id': fid,
|
'format_id': fid,
|
||||||
'url': furl,
|
'url': furl,
|
||||||
})
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
info = {
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
else: # request JSON file
|
else: # request JSON file
|
||||||
media_info = self._download_json(
|
info = self._extract_media_info(
|
||||||
'http://www.ardmediathek.de/play/media/%s' % video_id, video_id)
|
'http://www.ardmediathek.de/play/media/%s' % video_id, webpage, video_id)
|
||||||
# The second element of the _mediaArray contains the standard http urls
|
|
||||||
streams = media_info['_mediaArray'][1]['_mediaStreamArray']
|
|
||||||
if not streams:
|
|
||||||
if '"fsk"' in webpage:
|
|
||||||
raise ExtractorError('This video is only available after 20:00')
|
|
||||||
|
|
||||||
formats = []
|
info.update({
|
||||||
for s in streams:
|
|
||||||
if type(s['_stream']) == list:
|
|
||||||
for index, url in enumerate(s['_stream'][::-1]):
|
|
||||||
quality = s['_quality'] + index
|
|
||||||
formats.append({
|
|
||||||
'quality': quality,
|
|
||||||
'url': url,
|
|
||||||
'format_id': '%s-%s' % (determine_ext(url), quality)
|
|
||||||
})
|
|
||||||
continue
|
|
||||||
|
|
||||||
format = {
|
|
||||||
'quality': s['_quality'],
|
|
||||||
'url': s['_stream'],
|
|
||||||
}
|
|
||||||
|
|
||||||
format['format_id'] = '%s-%s' % (
|
|
||||||
determine_ext(format['url']), format['quality'])
|
|
||||||
|
|
||||||
formats.append(format)
|
|
||||||
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
'formats': formats,
|
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
}
|
})
|
||||||
|
|
||||||
|
return info
|
||||||
|
|
||||||
|
|
||||||
class ARDIE(InfoExtractor):
|
class ARDIE(InfoExtractor):
|
||||||
@@ -189,3 +274,41 @@ class ARDIE(InfoExtractor):
|
|||||||
'upload_date': upload_date,
|
'upload_date': upload_date,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class SportschauIE(ARDMediathekIE):
|
||||||
|
IE_NAME = 'Sportschau'
|
||||||
|
_VALID_URL = r'(?P<baseurl>https?://(?:www\.)?sportschau\.de/(?:[^/]+/)+video(?P<id>[^/#?]+))\.html'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.sportschau.de/tourdefrance/videoseppeltkokainhatnichtsmitklassischemdopingzutun100.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'seppeltkokainhatnichtsmitklassischemdopingzutun100',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Seppelt: "Kokain hat nichts mit klassischem Doping zu tun"',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'description': 'Der ARD-Doping Experte Hajo Seppelt gibt seine Einschätzung zum ersten Dopingfall der diesjährigen Tour de France um den Italiener Luca Paolini ab.',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
base_url = mobj.group('baseurl')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
title = get_element_by_attribute('class', 'headline', webpage)
|
||||||
|
description = self._html_search_meta('description', webpage, 'description')
|
||||||
|
|
||||||
|
info = self._extract_media_info(
|
||||||
|
base_url + '-mc_defaultQuality-h.json', webpage, video_id)
|
||||||
|
|
||||||
|
info.update({
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
})
|
||||||
|
|
||||||
|
return info
|
||||||
|
|||||||
@@ -4,11 +4,16 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import (
|
||||||
|
compat_parse_qs,
|
||||||
|
compat_urllib_parse_urlparse,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
get_element_by_attribute,
|
get_element_by_attribute,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
NO_DEFAULT,
|
||||||
qualities,
|
qualities,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -18,7 +23,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class ArteTvIE(InfoExtractor):
|
class ArteTvIE(InfoExtractor):
|
||||||
_VALID_URL = r'http://videos\.arte\.tv/(?P<lang>fr|de)/.*-(?P<id>.*?)\.html'
|
_VALID_URL = r'https?://videos\.arte\.tv/(?P<lang>fr|de|en|es)/.*-(?P<id>.*?)\.html'
|
||||||
IE_NAME = 'arte.tv'
|
IE_NAME = 'arte.tv'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -58,15 +63,19 @@ class ArteTvIE(InfoExtractor):
|
|||||||
|
|
||||||
class ArteTVPlus7IE(InfoExtractor):
|
class ArteTVPlus7IE(InfoExtractor):
|
||||||
IE_NAME = 'arte.tv:+7'
|
IE_NAME = 'arte.tv:+7'
|
||||||
_VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
|
_VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de|en|es)/(?:(?:sendungen|emissions|embed)/)?(?P<id>[^/]+)/(?P<name>[^/?#&]+)'
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _extract_url_info(cls, url):
|
def _extract_url_info(cls, url):
|
||||||
mobj = re.match(cls._VALID_URL, url)
|
mobj = re.match(cls._VALID_URL, url)
|
||||||
lang = mobj.group('lang')
|
lang = mobj.group('lang')
|
||||||
# This is not a real id, it can be for example AJT for the news
|
query = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
||||||
# http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
|
if 'vid' in query:
|
||||||
video_id = mobj.group('id')
|
video_id = query['vid'][0]
|
||||||
|
else:
|
||||||
|
# This is not a real id, it can be for example AJT for the news
|
||||||
|
# http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
|
||||||
|
video_id = mobj.group('id')
|
||||||
return video_id, lang
|
return video_id, lang
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -75,20 +84,63 @@ class ArteTVPlus7IE(InfoExtractor):
|
|||||||
return self._extract_from_webpage(webpage, video_id, lang)
|
return self._extract_from_webpage(webpage, video_id, lang)
|
||||||
|
|
||||||
def _extract_from_webpage(self, webpage, video_id, lang):
|
def _extract_from_webpage(self, webpage, video_id, lang):
|
||||||
|
patterns_templates = (r'arte_vp_url=["\'](.*?%s.*?)["\']', r'data-url=["\']([^"]+%s[^"]+)["\']')
|
||||||
|
ids = (video_id, '')
|
||||||
|
# some pages contain multiple videos (like
|
||||||
|
# http://www.arte.tv/guide/de/sendungen/XEN/xenius/?vid=055918-015_PLUS7-D),
|
||||||
|
# so we first try to look for json URLs that contain the video id from
|
||||||
|
# the 'vid' parameter.
|
||||||
|
patterns = [t % re.escape(_id) for _id in ids for t in patterns_templates]
|
||||||
json_url = self._html_search_regex(
|
json_url = self._html_search_regex(
|
||||||
[r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'],
|
patterns, webpage, 'json vp url', default=None)
|
||||||
webpage, 'json vp url')
|
if not json_url:
|
||||||
return self._extract_from_json_url(json_url, video_id, lang)
|
def find_iframe_url(webpage, default=NO_DEFAULT):
|
||||||
|
return self._html_search_regex(
|
||||||
|
r'<iframe[^>]+src=(["\'])(?P<url>.+\bjson_url=.+?)\1',
|
||||||
|
webpage, 'iframe url', group='url', default=default)
|
||||||
|
|
||||||
def _extract_from_json_url(self, json_url, video_id, lang):
|
iframe_url = find_iframe_url(webpage, None)
|
||||||
|
if not iframe_url:
|
||||||
|
embed_url = self._html_search_regex(
|
||||||
|
r'arte_vp_url_oembed=\'([^\']+?)\'', webpage, 'embed url', default=None)
|
||||||
|
if embed_url:
|
||||||
|
player = self._download_json(
|
||||||
|
embed_url, video_id, 'Downloading player page')
|
||||||
|
iframe_url = find_iframe_url(player['html'])
|
||||||
|
# en and es URLs produce react-based pages with different layout (e.g.
|
||||||
|
# http://www.arte.tv/guide/en/053330-002-A/carnival-italy?zone=world)
|
||||||
|
if not iframe_url:
|
||||||
|
program = self._search_regex(
|
||||||
|
r'program\s*:\s*({.+?["\']embed_html["\'].+?}),?\s*\n',
|
||||||
|
webpage, 'program', default=None)
|
||||||
|
if program:
|
||||||
|
embed_html = self._parse_json(program, video_id)
|
||||||
|
if embed_html:
|
||||||
|
iframe_url = find_iframe_url(embed_html['embed_html'])
|
||||||
|
if iframe_url:
|
||||||
|
json_url = compat_parse_qs(
|
||||||
|
compat_urllib_parse_urlparse(iframe_url).query)['json_url'][0]
|
||||||
|
if json_url:
|
||||||
|
title = self._search_regex(
|
||||||
|
r'<h3[^>]+title=(["\'])(?P<title>.+?)\1',
|
||||||
|
webpage, 'title', default=None, group='title')
|
||||||
|
return self._extract_from_json_url(json_url, video_id, lang, title=title)
|
||||||
|
# Different kind of embed URL (e.g.
|
||||||
|
# http://www.arte.tv/magazine/trepalium/fr/episode-0406-replay-trepalium)
|
||||||
|
embed_url = self._search_regex(
|
||||||
|
r'<iframe[^>]+src=(["\'])(?P<url>.+?)\1',
|
||||||
|
webpage, 'embed url', group='url')
|
||||||
|
return self.url_result(embed_url)
|
||||||
|
|
||||||
|
def _extract_from_json_url(self, json_url, video_id, lang, title=None):
|
||||||
info = self._download_json(json_url, video_id)
|
info = self._download_json(json_url, video_id)
|
||||||
player_info = info['videoJsonPlayer']
|
player_info = info['videoJsonPlayer']
|
||||||
|
|
||||||
upload_date_str = player_info.get('shootingDate')
|
upload_date_str = player_info.get('shootingDate')
|
||||||
if not upload_date_str:
|
if not upload_date_str:
|
||||||
upload_date_str = player_info.get('VDA', '').split(' ')[0]
|
upload_date_str = (player_info.get('VRA') or player_info.get('VDA') or '').split(' ')[0]
|
||||||
|
|
||||||
title = player_info['VTI'].strip()
|
title = (player_info.get('VTI') or title or player_info['VID']).strip()
|
||||||
subtitle = player_info.get('VSU', '').strip()
|
subtitle = player_info.get('VSU', '').strip()
|
||||||
if subtitle:
|
if subtitle:
|
||||||
title += ' - %s' % subtitle
|
title += ' - %s' % subtitle
|
||||||
@@ -102,28 +154,60 @@ class ArteTVPlus7IE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
|
qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
|
||||||
|
|
||||||
|
LANGS = {
|
||||||
|
'fr': 'F',
|
||||||
|
'de': 'A',
|
||||||
|
'en': 'E[ANG]',
|
||||||
|
'es': 'E[ESP]',
|
||||||
|
}
|
||||||
|
|
||||||
|
langcode = LANGS.get(lang, lang)
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for format_id, format_dict in player_info['VSR'].items():
|
for format_id, format_dict in player_info['VSR'].items():
|
||||||
f = dict(format_dict)
|
f = dict(format_dict)
|
||||||
versionCode = f.get('versionCode')
|
versionCode = f.get('versionCode')
|
||||||
|
l = re.escape(langcode)
|
||||||
|
|
||||||
|
# Language preference from most to least priority
|
||||||
|
# Reference: section 5.6.3 of
|
||||||
|
# http://www.arte.tv/sites/en/corporate/files/complete-technical-guidelines-arte-geie-v1-05.pdf
|
||||||
|
PREFERENCES = (
|
||||||
|
# original version in requested language, without subtitles
|
||||||
|
r'VO{0}$'.format(l),
|
||||||
|
# original version in requested language, with partial subtitles in requested language
|
||||||
|
r'VO{0}-ST{0}$'.format(l),
|
||||||
|
# original version in requested language, with subtitles for the deaf and hard-of-hearing in requested language
|
||||||
|
r'VO{0}-STM{0}$'.format(l),
|
||||||
|
# non-original (dubbed) version in requested language, without subtitles
|
||||||
|
r'V{0}$'.format(l),
|
||||||
|
# non-original (dubbed) version in requested language, with subtitles partial subtitles in requested language
|
||||||
|
r'V{0}-ST{0}$'.format(l),
|
||||||
|
# non-original (dubbed) version in requested language, with subtitles for the deaf and hard-of-hearing in requested language
|
||||||
|
r'V{0}-STM{0}$'.format(l),
|
||||||
|
# original version in requested language, with partial subtitles in different language
|
||||||
|
r'VO{0}-ST(?!{0}).+?$'.format(l),
|
||||||
|
# original version in requested language, with subtitles for the deaf and hard-of-hearing in different language
|
||||||
|
r'VO{0}-STM(?!{0}).+?$'.format(l),
|
||||||
|
# original version in different language, with partial subtitles in requested language
|
||||||
|
r'VO(?:(?!{0}).+?)?-ST{0}$'.format(l),
|
||||||
|
# original version in different language, with subtitles for the deaf and hard-of-hearing in requested language
|
||||||
|
r'VO(?:(?!{0}).+?)?-STM{0}$'.format(l),
|
||||||
|
# original version in different language, without subtitles
|
||||||
|
r'VO(?:(?!{0}))?$'.format(l),
|
||||||
|
# original version in different language, with partial subtitles in different language
|
||||||
|
r'VO(?:(?!{0}).+?)?-ST(?!{0}).+?$'.format(l),
|
||||||
|
# original version in different language, with subtitles for the deaf and hard-of-hearing in different language
|
||||||
|
r'VO(?:(?!{0}).+?)?-STM(?!{0}).+?$'.format(l),
|
||||||
|
)
|
||||||
|
|
||||||
|
for pref, p in enumerate(PREFERENCES):
|
||||||
|
if re.match(p, versionCode):
|
||||||
|
lang_pref = len(PREFERENCES) - pref
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
lang_pref = -1
|
||||||
|
|
||||||
langcode = {
|
|
||||||
'fr': 'F',
|
|
||||||
'de': 'A',
|
|
||||||
}.get(lang, lang)
|
|
||||||
lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode]
|
|
||||||
lang_pref = (
|
|
||||||
None if versionCode is None else (
|
|
||||||
10 if any(re.match(r, versionCode) for r in lang_rexs)
|
|
||||||
else -10))
|
|
||||||
source_pref = 0
|
|
||||||
if versionCode is not None:
|
|
||||||
# The original version with subtitles has lower relevance
|
|
||||||
if re.match(r'VO-ST(F|A)', versionCode):
|
|
||||||
source_pref -= 10
|
|
||||||
# The version with sourds/mal subtitles has also lower relevance
|
|
||||||
elif re.match(r'VO?(F|A)-STM\1', versionCode):
|
|
||||||
source_pref -= 9
|
|
||||||
format = {
|
format = {
|
||||||
'format_id': format_id,
|
'format_id': format_id,
|
||||||
'preference': -10 if f.get('videoFormat') == 'M3U8' else None,
|
'preference': -10 if f.get('videoFormat') == 'M3U8' else None,
|
||||||
@@ -133,7 +217,6 @@ class ArteTVPlus7IE(InfoExtractor):
|
|||||||
'height': int_or_none(f.get('height')),
|
'height': int_or_none(f.get('height')),
|
||||||
'tbr': int_or_none(f.get('bitrate')),
|
'tbr': int_or_none(f.get('bitrate')),
|
||||||
'quality': qfunc(f.get('quality')),
|
'quality': qfunc(f.get('quality')),
|
||||||
'source_preference': source_pref,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.get('mediaType') == 'rtmp':
|
if f.get('mediaType') == 'rtmp':
|
||||||
@@ -155,7 +238,7 @@ class ArteTVPlus7IE(InfoExtractor):
|
|||||||
# It also uses the arte_vp_url url from the webpage to extract the information
|
# It also uses the arte_vp_url url from the webpage to extract the information
|
||||||
class ArteTVCreativeIE(ArteTVPlus7IE):
|
class ArteTVCreativeIE(ArteTVPlus7IE):
|
||||||
IE_NAME = 'arte.tv:creative'
|
IE_NAME = 'arte.tv:creative'
|
||||||
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/(?:magazine?/)?(?P<id>[^?#]+)'
|
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de|en|es)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
|
'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
|
||||||
@@ -174,35 +257,48 @@ class ArteTVCreativeIE(ArteTVPlus7IE):
|
|||||||
'description': 'Événement ! Quarante-cinq ans après leurs premiers succès, les légendaires Monty Python remontent sur scène.\n',
|
'description': 'Événement ! Quarante-cinq ans après leurs premiers succès, les légendaires Monty Python remontent sur scène.\n',
|
||||||
'upload_date': '20140805',
|
'upload_date': '20140805',
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'http://creative.arte.tv/de/episode/agentur-amateur-4-der-erste-kunde',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
|
||||||
|
class ArteTVInfoIE(ArteTVPlus7IE):
|
||||||
|
IE_NAME = 'arte.tv:info'
|
||||||
|
_VALID_URL = r'https?://info\.arte\.tv/(?P<lang>fr|de|en|es)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://info.arte.tv/fr/service-civique-un-cache-misere',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '067528-000-A',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Service civique, un cache misère ?',
|
||||||
|
'upload_date': '20160403',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class ArteTVFutureIE(ArteTVPlus7IE):
|
class ArteTVFutureIE(ArteTVPlus7IE):
|
||||||
IE_NAME = 'arte.tv:future'
|
IE_NAME = 'arte.tv:future'
|
||||||
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)'
|
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>[^/?#&]+)'
|
||||||
|
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
|
'url': 'http://future.arte.tv/fr/info-sciences/les-ecrevisses-aussi-sont-anxieuses',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '5201',
|
'id': '050940-028-A',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Les champignons au secours de la planète',
|
'title': 'Les écrevisses aussi peuvent être anxieuses',
|
||||||
'upload_date': '20131101',
|
'upload_date': '20140902',
|
||||||
},
|
},
|
||||||
}
|
}, {
|
||||||
|
'url': 'http://future.arte.tv/fr/la-science-est-elle-responsable',
|
||||||
def _real_extract(self, url):
|
'only_matching': True,
|
||||||
anchor_id, lang = self._extract_url_info(url)
|
}]
|
||||||
webpage = self._download_webpage(url, anchor_id)
|
|
||||||
row = self._search_regex(
|
|
||||||
r'(?s)id="%s"[^>]*>.+?(<div[^>]*arte_vp_url[^>]*>)' % anchor_id,
|
|
||||||
webpage, 'row')
|
|
||||||
return self._extract_from_webpage(row, anchor_id, lang)
|
|
||||||
|
|
||||||
|
|
||||||
class ArteTVDDCIE(ArteTVPlus7IE):
|
class ArteTVDDCIE(ArteTVPlus7IE):
|
||||||
IE_NAME = 'arte.tv:ddc'
|
IE_NAME = 'arte.tv:ddc'
|
||||||
_VALID_URL = r'https?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)'
|
_VALID_URL = r'https?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>[^/?#&]+)'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id, lang = self._extract_url_info(url)
|
video_id, lang = self._extract_url_info(url)
|
||||||
@@ -220,7 +316,7 @@ class ArteTVDDCIE(ArteTVPlus7IE):
|
|||||||
|
|
||||||
class ArteTVConcertIE(ArteTVPlus7IE):
|
class ArteTVConcertIE(ArteTVPlus7IE):
|
||||||
IE_NAME = 'arte.tv:concert'
|
IE_NAME = 'arte.tv:concert'
|
||||||
_VALID_URL = r'https?://concert\.arte\.tv/(?P<lang>de|fr)/(?P<id>.+)'
|
_VALID_URL = r'https?://concert\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>[^/?#&]+)'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://concert.arte.tv/de/notwist-im-pariser-konzertclub-divan-du-monde',
|
'url': 'http://concert.arte.tv/de/notwist-im-pariser-konzertclub-divan-du-monde',
|
||||||
@@ -235,11 +331,59 @@ class ArteTVConcertIE(ArteTVPlus7IE):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ArteTVCinemaIE(ArteTVPlus7IE):
|
||||||
|
IE_NAME = 'arte.tv:cinema'
|
||||||
|
_VALID_URL = r'https?://cinema\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>.+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://cinema.arte.tv/de/node/38291',
|
||||||
|
'md5': '6b275511a5107c60bacbeeda368c3aa1',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '055876-000_PWA12025-D',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Tod auf dem Nil',
|
||||||
|
'upload_date': '20160122',
|
||||||
|
'description': 'md5:7f749bbb77d800ef2be11d54529b96bc',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ArteTVMagazineIE(ArteTVPlus7IE):
|
||||||
|
IE_NAME = 'arte.tv:magazine'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?arte\.tv/magazine/[^/]+/(?P<lang>fr|de|en|es)/(?P<id>[^/?#&]+)'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
# Embedded via <iframe src="http://www.arte.tv/arte_vp/index.php?json_url=..."
|
||||||
|
'url': 'http://www.arte.tv/magazine/trepalium/fr/entretien-avec-le-realisateur-vincent-lannoo-trepalium',
|
||||||
|
'md5': '2a9369bcccf847d1c741e51416299f25',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '065965-000-A',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Trepalium - Extrait Ep.01',
|
||||||
|
'upload_date': '20160121',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# Embedded via <iframe src="http://www.arte.tv/guide/fr/embed/054813-004-A/medium"
|
||||||
|
'url': 'http://www.arte.tv/magazine/trepalium/fr/episode-0406-replay-trepalium',
|
||||||
|
'md5': 'fedc64fc7a946110fe311634e79782ca',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '054813-004_PLUS7-F',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Trepalium (4/6)',
|
||||||
|
'description': 'md5:10057003c34d54e95350be4f9b05cb40',
|
||||||
|
'upload_date': '20160218',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.arte.tv/magazine/metropolis/de/frank-woeste-german-paris-metropolis',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
|
||||||
class ArteTVEmbedIE(ArteTVPlus7IE):
|
class ArteTVEmbedIE(ArteTVPlus7IE):
|
||||||
IE_NAME = 'arte.tv:embed'
|
IE_NAME = 'arte.tv:embed'
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
http://www\.arte\.tv
|
http://www\.arte\.tv
|
||||||
/playerv2/embed\.php\?json_url=
|
/(?:playerv2/embed|arte_vp/index)\.php\?json_url=
|
||||||
(?P<json_url>
|
(?P<json_url>
|
||||||
http://arte\.tv/papi/tvguide/videos/stream/player/
|
http://arte\.tv/papi/tvguide/videos/stream/player/
|
||||||
(?P<lang>[^/]+)/(?P<id>[^/]+)[^&]*
|
(?P<lang>[^/]+)/(?P<id>[^/]+)[^&]*
|
||||||
|
|||||||
@@ -2,18 +2,18 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
import time
|
import time
|
||||||
import hmac
|
import hmac
|
||||||
|
import hashlib
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_str
|
||||||
compat_str,
|
|
||||||
compat_urllib_parse,
|
|
||||||
compat_urllib_request,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
|
||||||
float_or_none,
|
|
||||||
xpath_text,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
float_or_none,
|
||||||
|
int_or_none,
|
||||||
|
sanitized_Request,
|
||||||
|
urlencode_postdata,
|
||||||
|
xpath_text,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -32,6 +32,19 @@ class AtresPlayerIE(InfoExtractor):
|
|||||||
'duration': 5527.6,
|
'duration': 5527.6,
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
},
|
},
|
||||||
|
'skip': 'This video is only available for registered users'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://www.atresplayer.com/television/especial/videoencuentros/temporada-1/capitulo-112-david-bustamante_2014121600375.html',
|
||||||
|
'md5': '0d0e918533bbd4b263f2de4d197d4aac',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'capitulo-112-david-bustamante',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'David Bustamante',
|
||||||
|
'description': 'md5:f33f1c0a05be57f6708d4dd83a3b81c6',
|
||||||
|
'duration': 1439.0,
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://www.atresplayer.com/television/series/el-secreto-de-puente-viejo/el-chico-de-los-tres-lunares/capitulo-977-29-12-14_2014122400174.html',
|
'url': 'http://www.atresplayer.com/television/series/el-secreto-de-puente-viejo/el-chico-de-los-tres-lunares/capitulo-977-29-12-14_2014122400174.html',
|
||||||
@@ -50,6 +63,13 @@ class AtresPlayerIE(InfoExtractor):
|
|||||||
|
|
||||||
_LOGIN_URL = 'https://servicios.atresplayer.com/j_spring_security_check'
|
_LOGIN_URL = 'https://servicios.atresplayer.com/j_spring_security_check'
|
||||||
|
|
||||||
|
_ERRORS = {
|
||||||
|
'UNPUBLISHED': 'We\'re sorry, but this video is not yet available.',
|
||||||
|
'DELETED': 'This video has expired and is no longer available for online streaming.',
|
||||||
|
'GEOUNPUBLISHED': 'We\'re sorry, but this video is not available in your region due to right restrictions.',
|
||||||
|
# 'PREMIUM': 'PREMIUM',
|
||||||
|
}
|
||||||
|
|
||||||
def _real_initialize(self):
|
def _real_initialize(self):
|
||||||
self._login()
|
self._login()
|
||||||
|
|
||||||
@@ -63,8 +83,8 @@ class AtresPlayerIE(InfoExtractor):
|
|||||||
'j_password': password,
|
'j_password': password,
|
||||||
}
|
}
|
||||||
|
|
||||||
request = compat_urllib_request.Request(
|
request = sanitized_Request(
|
||||||
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
self._LOGIN_URL, urlencode_postdata(login_form))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
response = self._download_webpage(
|
response = self._download_webpage(
|
||||||
request, None, 'Logging in as %s' % username)
|
request, None, 'Logging in as %s' % username)
|
||||||
@@ -83,58 +103,72 @@ class AtresPlayerIE(InfoExtractor):
|
|||||||
episode_id = self._search_regex(
|
episode_id = self._search_regex(
|
||||||
r'episode="([^"]+)"', webpage, 'episode id')
|
r'episode="([^"]+)"', webpage, 'episode id')
|
||||||
|
|
||||||
|
request = sanitized_Request(
|
||||||
|
self._PLAYER_URL_TEMPLATE % episode_id,
|
||||||
|
headers={'User-Agent': self._USER_AGENT})
|
||||||
|
player = self._download_json(request, episode_id, 'Downloading player JSON')
|
||||||
|
|
||||||
|
episode_type = player.get('typeOfEpisode')
|
||||||
|
error_message = self._ERRORS.get(episode_type)
|
||||||
|
if error_message:
|
||||||
|
raise ExtractorError(
|
||||||
|
'%s returned error: %s' % (self.IE_NAME, error_message), expected=True)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
video_url = player.get('urlVideo')
|
||||||
|
if video_url:
|
||||||
|
format_info = {
|
||||||
|
'url': video_url,
|
||||||
|
'format_id': 'http',
|
||||||
|
}
|
||||||
|
mobj = re.search(r'(?P<bitrate>\d+)K_(?P<width>\d+)x(?P<height>\d+)', video_url)
|
||||||
|
if mobj:
|
||||||
|
format_info.update({
|
||||||
|
'width': int_or_none(mobj.group('width')),
|
||||||
|
'height': int_or_none(mobj.group('height')),
|
||||||
|
'tbr': int_or_none(mobj.group('bitrate')),
|
||||||
|
})
|
||||||
|
formats.append(format_info)
|
||||||
|
|
||||||
timestamp = int_or_none(self._download_webpage(
|
timestamp = int_or_none(self._download_webpage(
|
||||||
self._TIME_API_URL,
|
self._TIME_API_URL,
|
||||||
video_id, 'Downloading timestamp', fatal=False), 1000, time.time())
|
video_id, 'Downloading timestamp', fatal=False), 1000, time.time())
|
||||||
timestamp_shifted = compat_str(timestamp + self._TIMESTAMP_SHIFT)
|
timestamp_shifted = compat_str(timestamp + self._TIMESTAMP_SHIFT)
|
||||||
token = hmac.new(
|
token = hmac.new(
|
||||||
self._MAGIC.encode('ascii'),
|
self._MAGIC.encode('ascii'),
|
||||||
(episode_id + timestamp_shifted).encode('utf-8')
|
(episode_id + timestamp_shifted).encode('utf-8'), hashlib.md5
|
||||||
).hexdigest()
|
).hexdigest()
|
||||||
|
|
||||||
formats = []
|
request = sanitized_Request(
|
||||||
for fmt in ['windows', 'android_tablet']:
|
self._URL_VIDEO_TEMPLATE.format('windows', episode_id, timestamp_shifted, token),
|
||||||
request = compat_urllib_request.Request(
|
headers={'User-Agent': self._USER_AGENT})
|
||||||
self._URL_VIDEO_TEMPLATE.format(fmt, episode_id, timestamp_shifted, token))
|
|
||||||
request.add_header('User-Agent', self._USER_AGENT)
|
|
||||||
|
|
||||||
fmt_json = self._download_json(
|
fmt_json = self._download_json(
|
||||||
request, video_id, 'Downloading %s video JSON' % fmt)
|
request, video_id, 'Downloading windows video JSON')
|
||||||
|
|
||||||
result = fmt_json.get('resultDes')
|
result = fmt_json.get('resultDes')
|
||||||
if result.lower() != 'ok':
|
if result.lower() != 'ok':
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'%s returned error: %s' % (self.IE_NAME, result), expected=True)
|
'%s returned error: %s' % (self.IE_NAME, result), expected=True)
|
||||||
|
|
||||||
for format_id, video_url in fmt_json['resultObject'].items():
|
for format_id, video_url in fmt_json['resultObject'].items():
|
||||||
if format_id == 'token' or not video_url.startswith('http'):
|
if format_id == 'token' or not video_url.startswith('http'):
|
||||||
continue
|
continue
|
||||||
if video_url.endswith('/Manifest'):
|
if 'geodeswowsmpra3player' in video_url:
|
||||||
if 'geodeswowsmpra3player' in video_url:
|
f4m_path = video_url.split('smil:', 1)[-1].split('free_', 1)[0]
|
||||||
f4m_path = video_url.split('smil:', 1)[-1].split('free_', 1)[0]
|
f4m_url = 'http://drg.antena3.com/{0}hds/es/sd.f4m'.format(f4m_path)
|
||||||
f4m_url = 'http://drg.antena3.com/{0}hds/es/sd.f4m'.format(f4m_path)
|
# this videos are protected by DRM, the f4m downloader doesn't support them
|
||||||
# this videos are protected by DRM, the f4m downloader doesn't support them
|
continue
|
||||||
continue
|
else:
|
||||||
else:
|
f4m_url = video_url[:-9] + '/manifest.f4m'
|
||||||
f4m_url = video_url[:-9] + '/manifest.f4m'
|
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
|
||||||
formats.extend(self._extract_f4m_formats(f4m_url, video_id))
|
|
||||||
else:
|
|
||||||
formats.append({
|
|
||||||
'url': video_url,
|
|
||||||
'format_id': 'android-%s' % format_id,
|
|
||||||
'preference': 1,
|
|
||||||
})
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
player = self._download_json(
|
|
||||||
self._PLAYER_URL_TEMPLATE % episode_id,
|
|
||||||
episode_id)
|
|
||||||
|
|
||||||
path_data = player.get('pathData')
|
path_data = player.get('pathData')
|
||||||
|
|
||||||
episode = self._download_xml(
|
episode = self._download_xml(
|
||||||
self._EPISODE_URL_TEMPLATE % path_data,
|
self._EPISODE_URL_TEMPLATE % path_data, video_id,
|
||||||
video_id, 'Downloading episode XML')
|
'Downloading episode XML')
|
||||||
|
|
||||||
duration = float_or_none(xpath_text(
|
duration = float_or_none(xpath_text(
|
||||||
episode, './media/asset/info/technical/contentDuration', 'duration'))
|
episode, './media/asset/info/technical/contentDuration', 'duration'))
|
||||||
|
|||||||
89
youtube_dl/extractor/audimedia.py
Normal file
89
youtube_dl/extractor/audimedia.py
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
sanitized_Request,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AudiMediaIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?audi-mediacenter\.com/(?:en|de)/audimediatv/(?P<id>[^/?#]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://www.audi-mediacenter.com/en/audimediatv/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test-1467',
|
||||||
|
'md5': '79a8b71c46d49042609795ab59779b66',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1565',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '60 Seconds of Audi Sport 104/2015 - WEC Bahrain, Rookie Test',
|
||||||
|
'description': 'md5:60e5d30a78ced725f7b8d34370762941',
|
||||||
|
'upload_date': '20151124',
|
||||||
|
'timestamp': 1448354940,
|
||||||
|
'duration': 74022,
|
||||||
|
'view_count': int,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# extracted from https://audimedia.tv/assets/embed/embedded-player.js (dataSourceAuthToken)
|
||||||
|
_AUTH_TOKEN = 'e25b42847dba18c6c8816d5d8ce94c326e06823ebf0859ed164b3ba169be97f2'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
raw_payload = self._search_regex([
|
||||||
|
r'class="amtv-embed"[^>]+id="([^"]+)"',
|
||||||
|
r'class=\\"amtv-embed\\"[^>]+id=\\"([^"]+)\\"',
|
||||||
|
], webpage, 'raw payload')
|
||||||
|
_, stage_mode, video_id, lang = raw_payload.split('-')
|
||||||
|
|
||||||
|
# TODO: handle s and e stage_mode (live streams and ended live streams)
|
||||||
|
if stage_mode not in ('s', 'e'):
|
||||||
|
request = sanitized_Request(
|
||||||
|
'https://audimedia.tv/api/video/v1/videos/%s?embed[]=video_versions&embed[]=thumbnail_image&where[content_language_iso]=%s' % (video_id, lang),
|
||||||
|
headers={'X-Auth-Token': self._AUTH_TOKEN})
|
||||||
|
json_data = self._download_json(request, video_id)['results']
|
||||||
|
formats = []
|
||||||
|
|
||||||
|
stream_url_hls = json_data.get('stream_url_hls')
|
||||||
|
if stream_url_hls:
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
stream_url_hls, video_id, 'mp4',
|
||||||
|
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
|
||||||
|
|
||||||
|
stream_url_hds = json_data.get('stream_url_hds')
|
||||||
|
if stream_url_hds:
|
||||||
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
stream_url_hds + '?hdcore=3.4.0',
|
||||||
|
video_id, f4m_id='hds', fatal=False))
|
||||||
|
|
||||||
|
for video_version in json_data.get('video_versions'):
|
||||||
|
video_version_url = video_version.get('download_url') or video_version.get('stream_url')
|
||||||
|
if not video_version_url:
|
||||||
|
continue
|
||||||
|
f = {
|
||||||
|
'url': video_version_url,
|
||||||
|
'width': int_or_none(video_version.get('width')),
|
||||||
|
'height': int_or_none(video_version.get('height')),
|
||||||
|
'abr': int_or_none(video_version.get('audio_bitrate')),
|
||||||
|
'vbr': int_or_none(video_version.get('video_bitrate')),
|
||||||
|
}
|
||||||
|
bitrate = self._search_regex(r'(\d+)k', video_version_url, 'bitrate', default=None)
|
||||||
|
if bitrate:
|
||||||
|
f.update({
|
||||||
|
'format_id': 'http-%s' % bitrate,
|
||||||
|
})
|
||||||
|
formats.append(f)
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': json_data['title'],
|
||||||
|
'description': json_data.get('subtitle'),
|
||||||
|
'thumbnail': json_data.get('thumbnail_image', {}).get('file'),
|
||||||
|
'timestamp': parse_iso8601(json_data.get('publication_date')),
|
||||||
|
'duration': int_or_none(json_data.get('duration')),
|
||||||
|
'view_count': int_or_none(json_data.get('view_count')),
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
66
youtube_dl/extractor/audioboom.py
Normal file
66
youtube_dl/extractor/audioboom.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import float_or_none
|
||||||
|
|
||||||
|
|
||||||
|
class AudioBoomIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?audioboom\.com/boos/(?P<id>[0-9]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://audioboom.com/boos/4279833-3-09-2016-czaban-hour-3?t=0',
|
||||||
|
'md5': '63a8d73a055c6ed0f1e51921a10a5a76',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '4279833',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': '3/09/2016 Czaban Hour 3',
|
||||||
|
'description': 'Guest: Nate Davis - NFL free agency, Guest: Stan Gans',
|
||||||
|
'duration': 2245.72,
|
||||||
|
'uploader': 'Steve Czaban',
|
||||||
|
'uploader_url': 're:https?://(?:www\.)?audioboom\.com/channel/steveczabanyahoosportsradio',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
clip = None
|
||||||
|
|
||||||
|
clip_store = self._parse_json(
|
||||||
|
self._search_regex(
|
||||||
|
r'data-new-clip-store=(["\'])(?P<json>{.*?"clipId"\s*:\s*%s.*?})\1' % video_id,
|
||||||
|
webpage, 'clip store', default='{}', group='json'),
|
||||||
|
video_id, fatal=False)
|
||||||
|
if clip_store:
|
||||||
|
clips = clip_store.get('clips')
|
||||||
|
if clips and isinstance(clips, list) and isinstance(clips[0], dict):
|
||||||
|
clip = clips[0]
|
||||||
|
|
||||||
|
def from_clip(field):
|
||||||
|
if clip:
|
||||||
|
clip.get(field)
|
||||||
|
|
||||||
|
audio_url = from_clip('clipURLPriorToLoading') or self._og_search_property(
|
||||||
|
'audio', webpage, 'audio url')
|
||||||
|
title = from_clip('title') or self._og_search_title(webpage)
|
||||||
|
description = from_clip('description') or self._og_search_description(webpage)
|
||||||
|
|
||||||
|
duration = float_or_none(from_clip('duration') or self._html_search_meta(
|
||||||
|
'weibo:audio:duration', webpage))
|
||||||
|
|
||||||
|
uploader = from_clip('author') or self._og_search_property(
|
||||||
|
'audio:artist', webpage, 'uploader', fatal=False)
|
||||||
|
uploader_url = from_clip('author_url') or self._html_search_meta(
|
||||||
|
'audioboo:channel', webpage, 'uploader url')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'url': audio_url,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'duration': duration,
|
||||||
|
'uploader': uploader,
|
||||||
|
'uploader_url': uploader_url,
|
||||||
|
}
|
||||||
@@ -30,14 +30,14 @@ class AudiomackIE(InfoExtractor):
|
|||||||
# audiomack wrapper around soundcloud song
|
# audiomack wrapper around soundcloud song
|
||||||
{
|
{
|
||||||
'add_ie': ['Soundcloud'],
|
'add_ie': ['Soundcloud'],
|
||||||
'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare',
|
'url': 'http://www.audiomack.com/song/hip-hop-daily/black-mamba-freestyle',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '172419696',
|
'id': '258901379',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'description': 'md5:1fc3272ed7a635cce5be1568c2822997',
|
'description': 'mamba day freestyle for the legend Kobe Bryant ',
|
||||||
'title': 'Young Thug ft Lil Wayne - Take Kare',
|
'title': 'Black Mamba Freestyle [Prod. By Danny Wolf]',
|
||||||
'uploader': 'Young Thug World',
|
'uploader': 'ILOVEMAKONNEN',
|
||||||
'upload_date': '20141016',
|
'upload_date': '20160414',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
@@ -56,7 +56,7 @@ class AudiomackIE(InfoExtractor):
|
|||||||
|
|
||||||
# API is inconsistent with errors
|
# API is inconsistent with errors
|
||||||
if 'url' not in api_response or not api_response['url'] or 'error' in api_response:
|
if 'url' not in api_response or not api_response['url'] or 'error' in api_response:
|
||||||
raise ExtractorError('Invalid url %s', url)
|
raise ExtractorError('Invalid url %s' % url)
|
||||||
|
|
||||||
# Audiomack wraps a lot of soundcloud tracks in their branded wrapper
|
# Audiomack wraps a lot of soundcloud tracks in their branded wrapper
|
||||||
# if so, pass the work off to the soundcloud extractor
|
# if so, pass the work off to the soundcloud extractor
|
||||||
|
|||||||
@@ -3,7 +3,11 @@ from __future__ import unicode_literals
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import float_or_none
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
float_or_none,
|
||||||
|
sanitized_Request,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class AzubuIE(InfoExtractor):
|
class AzubuIE(InfoExtractor):
|
||||||
@@ -91,3 +95,38 @@ class AzubuIE(InfoExtractor):
|
|||||||
'view_count': view_count,
|
'view_count': view_count,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class AzubuLiveIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://www.azubu.tv/(?P<id>[^/]+)$'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.azubu.tv/MarsTVMDLen',
|
||||||
|
'only_matching': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
user = self._match_id(url)
|
||||||
|
|
||||||
|
info = self._download_json(
|
||||||
|
'http://api.azubu.tv/public/modules/last-video/{0}/info'.format(user),
|
||||||
|
user)['data']
|
||||||
|
if info['type'] != 'STREAM':
|
||||||
|
raise ExtractorError('{0} is not streaming live'.format(user), expected=True)
|
||||||
|
|
||||||
|
req = sanitized_Request(
|
||||||
|
'https://edge-elb.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:' + info['reference_id'])
|
||||||
|
req.add_header('Accept', 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV')
|
||||||
|
bc_info = self._download_json(req, user)
|
||||||
|
m3u8_url = next(source['src'] for source in bc_info['sources'] if source['container'] == 'M2TS')
|
||||||
|
formats = self._extract_m3u8_formats(m3u8_url, user, ext='mp4')
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': info['id'],
|
||||||
|
'title': self._live_title(info['title']),
|
||||||
|
'uploader_id': user,
|
||||||
|
'formats': formats,
|
||||||
|
'is_live': True,
|
||||||
|
'thumbnail': bc_info['poster'],
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,17 +4,18 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urlparse
|
from ..utils import unescapeHTML
|
||||||
|
|
||||||
|
|
||||||
class BaiduVideoIE(InfoExtractor):
|
class BaiduVideoIE(InfoExtractor):
|
||||||
_VALID_URL = r'http://v\.baidu\.com/(?P<type>[a-z]+)/(?P<id>\d+)\.htm'
|
IE_DESC = '百度视频'
|
||||||
|
_VALID_URL = r'https?://v\.baidu\.com/(?P<type>[a-z]+)/(?P<id>\d+)\.htm'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://v.baidu.com/comic/1069.htm?frp=bdbrand&q=%E4%B8%AD%E5%8D%8E%E5%B0%8F%E5%BD%93%E5%AE%B6',
|
'url': 'http://v.baidu.com/comic/1069.htm?frp=bdbrand&q=%E4%B8%AD%E5%8D%8E%E5%B0%8F%E5%BD%93%E5%AE%B6',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1069',
|
'id': '1069',
|
||||||
'title': '中华小当家 TV版 (全52集)',
|
'title': '中华小当家 TV版国语',
|
||||||
'description': 'md5:395a419e41215e531c857bb037bbaf80',
|
'description': 'md5:51be07afe461cf99fa61231421b5397c',
|
||||||
},
|
},
|
||||||
'playlist_count': 52,
|
'playlist_count': 52,
|
||||||
}, {
|
}, {
|
||||||
@@ -24,45 +25,32 @@ class BaiduVideoIE(InfoExtractor):
|
|||||||
'title': 're:^奔跑吧兄弟',
|
'title': 're:^奔跑吧兄弟',
|
||||||
'description': 'md5:1bf88bad6d850930f542d51547c089b8',
|
'description': 'md5:1bf88bad6d850930f542d51547c089b8',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 3,
|
'playlist_mincount': 12,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
def _call_api(self, path, category, playlist_id, note):
|
||||||
|
return self._download_json('http://app.video.baidu.com/%s/?worktype=adnative%s&id=%s' % (
|
||||||
|
path, category, playlist_id), playlist_id, note)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
category, playlist_id = re.match(self._VALID_URL, url).groups()
|
||||||
playlist_id = mobj.group('id')
|
|
||||||
category = category2 = mobj.group('type')
|
|
||||||
if category == 'show':
|
if category == 'show':
|
||||||
category2 = 'tvshow'
|
category = 'tvshow'
|
||||||
|
if category == 'tv':
|
||||||
|
category = 'tvplay'
|
||||||
|
|
||||||
webpage = self._download_webpage(url, playlist_id)
|
playlist_detail = self._call_api(
|
||||||
|
'xqinfo', category, playlist_id, 'Download playlist JSON metadata')
|
||||||
|
|
||||||
playlist_title = self._html_search_regex(
|
playlist_title = playlist_detail['title']
|
||||||
r'title\s*:\s*(["\'])(?P<title>[^\']+)\1', webpage,
|
playlist_description = unescapeHTML(playlist_detail.get('intro'))
|
||||||
'playlist title', group='title')
|
|
||||||
playlist_description = self._html_search_regex(
|
|
||||||
r'<input[^>]+class="j-data-intro"[^>]+value="([^"]+)"/>', webpage,
|
|
||||||
playlist_id, 'playlist description')
|
|
||||||
|
|
||||||
site = self._html_search_regex(
|
episodes_detail = self._call_api(
|
||||||
r'filterSite\s*:\s*["\']([^"]*)["\']', webpage,
|
'xqsingle', category, playlist_id, 'Download episodes JSON metadata')
|
||||||
'primary provider site')
|
|
||||||
api_result = self._download_json(
|
|
||||||
'http://v.baidu.com/%s_intro/?dtype=%sPlayUrl&id=%s&site=%s' % (
|
|
||||||
category, category2, playlist_id, site),
|
|
||||||
playlist_id, 'Get playlist links')
|
|
||||||
|
|
||||||
entries = []
|
entries = [self.url_result(
|
||||||
for episode in api_result[0]['episodes']:
|
episode['url'], video_title=episode['title']
|
||||||
episode_id = '%s_%s' % (playlist_id, episode['episode'])
|
) for episode in episodes_detail['videos']]
|
||||||
|
|
||||||
redirect_page = self._download_webpage(
|
|
||||||
compat_urlparse.urljoin(url, episode['url']), episode_id,
|
|
||||||
note='Download Baidu redirect page')
|
|
||||||
real_url = self._html_search_regex(
|
|
||||||
r'location\.replace\("([^"]+)"\)', redirect_page, 'real URL')
|
|
||||||
|
|
||||||
entries.append(self.url_result(
|
|
||||||
real_url, video_title=episode['single_title']))
|
|
||||||
|
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
entries, playlist_id, playlist_title, playlist_description)
|
entries, playlist_id, playlist_title, playlist_description)
|
||||||
|
|||||||
@@ -4,15 +4,13 @@ import re
|
|||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_str
|
||||||
compat_urllib_parse,
|
|
||||||
compat_urllib_request,
|
|
||||||
compat_str,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
int_or_none,
|
||||||
|
sanitized_Request,
|
||||||
|
urlencode_postdata,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -57,8 +55,8 @@ class BambuserIE(InfoExtractor):
|
|||||||
'pass': password,
|
'pass': password,
|
||||||
}
|
}
|
||||||
|
|
||||||
request = compat_urllib_request.Request(
|
request = sanitized_Request(
|
||||||
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
self._LOGIN_URL, urlencode_postdata(login_form))
|
||||||
request.add_header('Referer', self._LOGIN_URL)
|
request.add_header('Referer', self._LOGIN_URL)
|
||||||
response = self._download_webpage(
|
response = self._download_webpage(
|
||||||
request, None, 'Logging in as %s' % username)
|
request, None, 'Logging in as %s' % username)
|
||||||
@@ -126,7 +124,7 @@ class BambuserChannelIE(InfoExtractor):
|
|||||||
'&sort=created&access_mode=0%2C1%2C2&limit={count}'
|
'&sort=created&access_mode=0%2C1%2C2&limit={count}'
|
||||||
'&method=broadcast&format=json&vid_older_than={last}'
|
'&method=broadcast&format=json&vid_older_than={last}'
|
||||||
).format(user=user, count=self._STEP, last=last_id)
|
).format(user=user, count=self._STEP, last=last_id)
|
||||||
req = compat_urllib_request.Request(req_url)
|
req = sanitized_Request(req_url)
|
||||||
# Without setting this header, we wouldn't get any result
|
# Without setting this header, we wouldn't get any result
|
||||||
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
|
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
|
||||||
data = self._download_json(
|
data = self._download_json(
|
||||||
|
|||||||
@@ -10,6 +10,8 @@ from ..compat import (
|
|||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
float_or_none,
|
||||||
|
int_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -27,7 +29,7 @@ class BandcampIE(InfoExtractor):
|
|||||||
'_skip': 'There is a limit of 200 free downloads / month for the test song'
|
'_skip': 'There is a limit of 200 free downloads / month for the test song'
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
|
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
|
||||||
'md5': '2b68e5851514c20efdff2afc5603b8b4',
|
'md5': '73d0b3171568232574e45652f8720b5c',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2650410135',
|
'id': '2650410135',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
@@ -46,26 +48,30 @@ class BandcampIE(InfoExtractor):
|
|||||||
if m_trackinfo:
|
if m_trackinfo:
|
||||||
json_code = m_trackinfo.group(1)
|
json_code = m_trackinfo.group(1)
|
||||||
data = json.loads(json_code)[0]
|
data = json.loads(json_code)[0]
|
||||||
|
track_id = compat_str(data['id'])
|
||||||
|
|
||||||
|
if not data.get('file'):
|
||||||
|
raise ExtractorError('Not streamable', video_id=track_id, expected=True)
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for format_id, format_url in data['file'].items():
|
for format_id, format_url in data['file'].items():
|
||||||
ext, abr_str = format_id.split('-', 1)
|
ext, abr_str = format_id.split('-', 1)
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': format_id,
|
'format_id': format_id,
|
||||||
'url': format_url,
|
'url': self._proto_relative_url(format_url, 'http:'),
|
||||||
'ext': ext,
|
'ext': ext,
|
||||||
'vcodec': 'none',
|
'vcodec': 'none',
|
||||||
'acodec': ext,
|
'acodec': ext,
|
||||||
'abr': int(abr_str),
|
'abr': int_or_none(abr_str),
|
||||||
})
|
})
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': compat_str(data['id']),
|
'id': track_id,
|
||||||
'title': data['title'],
|
'title': data['title'],
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'duration': float(data['duration']),
|
'duration': float_or_none(data.get('duration')),
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
raise ExtractorError('No free songs found')
|
raise ExtractorError('No free songs found')
|
||||||
@@ -93,8 +99,8 @@ class BandcampIE(InfoExtractor):
|
|||||||
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
|
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
|
||||||
# If we could correctly generate the .rand field the url would be
|
# If we could correctly generate the .rand field the url would be
|
||||||
# in the "download_url" key
|
# in the "download_url" key
|
||||||
final_url = self._search_regex(
|
final_url = self._proto_relative_url(self._search_regex(
|
||||||
r'"retry_url":"(.*?)"', final_url_webpage, 'final video URL')
|
r'"retry_url":"(.+?)"', final_url_webpage, 'final video URL'), 'http:')
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
|||||||
977
youtube_dl/extractor/bbc.py
Normal file
977
youtube_dl/extractor/bbc.py
Normal file
@@ -0,0 +1,977 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
float_or_none,
|
||||||
|
int_or_none,
|
||||||
|
parse_duration,
|
||||||
|
parse_iso8601,
|
||||||
|
unescapeHTML,
|
||||||
|
)
|
||||||
|
from ..compat import (
|
||||||
|
compat_etree_fromstring,
|
||||||
|
compat_HTTPError,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class BBCCoUkIE(InfoExtractor):
|
||||||
|
IE_NAME = 'bbc.co.uk'
|
||||||
|
IE_DESC = 'BBC iPlayer'
|
||||||
|
_ID_REGEX = r'[pb][\da-z]{7}'
|
||||||
|
_VALID_URL = r'''(?x)
|
||||||
|
https?://
|
||||||
|
(?:www\.)?bbc\.co\.uk/
|
||||||
|
(?:
|
||||||
|
programmes/(?!articles/)|
|
||||||
|
iplayer(?:/[^/]+)?/(?:episode/|playlist/)|
|
||||||
|
music/clips[/#]|
|
||||||
|
radio/player/
|
||||||
|
)
|
||||||
|
(?P<id>%s)
|
||||||
|
''' % _ID_REGEX
|
||||||
|
|
||||||
|
_MEDIASELECTOR_URLS = [
|
||||||
|
# Provides HQ HLS streams with even better quality that pc mediaset but fails
|
||||||
|
# with geolocation in some cases when it's even not geo restricted at all (e.g.
|
||||||
|
# http://www.bbc.co.uk/programmes/b06bp7lf). Also may fail with selectionunavailable.
|
||||||
|
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
|
||||||
|
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s',
|
||||||
|
]
|
||||||
|
|
||||||
|
_MEDIASELECTION_NS = 'http://bbc.co.uk/2008/mp/mediaselection'
|
||||||
|
_EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist'
|
||||||
|
|
||||||
|
_NAMESPACES = (
|
||||||
|
_MEDIASELECTION_NS,
|
||||||
|
_EMP_PLAYLIST_NS,
|
||||||
|
)
|
||||||
|
|
||||||
|
_TESTS = [
|
||||||
|
{
|
||||||
|
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b039d07m',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Leonard Cohen, Kaleidoscope - BBC Radio 4',
|
||||||
|
'description': 'The Canadian poet and songwriter reflects on his musical career.',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b00yng1d',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'The Man in Black: Series 3: The Printed Name',
|
||||||
|
'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
|
||||||
|
'duration': 1800,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'skip': 'Episode is no longer available on BBC iPlayer Radio',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b00yng1d',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'The Voice UK: Series 3: Blind Auditions 5',
|
||||||
|
'description': 'Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.',
|
||||||
|
'duration': 5100,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b03k3pb7',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
|
||||||
|
'description': '2. Invasion',
|
||||||
|
'duration': 3600,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b04v209v',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Pete Tong, The Essential New Tune Special',
|
||||||
|
'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
|
||||||
|
'duration': 10800,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'skip': 'Episode is no longer available on BBC iPlayer Radio',
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bbc.co.uk/music/clips/p022h44b',
|
||||||
|
'note': 'Audio',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p022h44j',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'BBC Proms Music Guides, Rachmaninov: Symphonic Dances',
|
||||||
|
'description': "In this Proms Music Guide, Andrew McGregor looks at Rachmaninov's Symphonic Dances.",
|
||||||
|
'duration': 227,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
|
||||||
|
'note': 'Video',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p025c103',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
|
||||||
|
'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
|
||||||
|
'duration': 226,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p02n76xf',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
|
||||||
|
'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
|
||||||
|
'duration': 3540,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'skip': 'geolocation',
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b05zmgw1',
|
||||||
|
'ext': 'flv',
|
||||||
|
'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
|
||||||
|
'title': 'Royal Academy Summer Exhibition',
|
||||||
|
'duration': 3540,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'skip': 'geolocation',
|
||||||
|
}, {
|
||||||
|
# iptv-all mediaset fails with geolocation however there is no geo restriction
|
||||||
|
# for this programme at all
|
||||||
|
'url': 'http://www.bbc.co.uk/programmes/b06rkn85',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b06rkms3',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': "Best of the Mini-Mixes 2015: Part 3, Annie Mac's Friday Night - BBC Radio 1",
|
||||||
|
'description': "Annie has part three in the Best of the Mini-Mixes 2015, plus the year's Most Played!",
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# compact player (https://github.com/rg3/youtube-dl/issues/8147)
|
||||||
|
'url': 'http://www.bbc.co.uk/programmes/p028bfkf/player',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p028bfkj',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews',
|
||||||
|
'description': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bbc.co.uk/radio/player/p03cchwf',
|
||||||
|
'only_matching': True,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
class MediaSelectionError(Exception):
|
||||||
|
def __init__(self, id):
|
||||||
|
self.id = id
|
||||||
|
|
||||||
|
def _extract_asx_playlist(self, connection, programme_id):
|
||||||
|
asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
|
||||||
|
return [ref.get('href') for ref in asx.findall('./Entry/ref')]
|
||||||
|
|
||||||
|
def _extract_connection(self, connection, programme_id):
|
||||||
|
formats = []
|
||||||
|
kind = connection.get('kind')
|
||||||
|
protocol = connection.get('protocol')
|
||||||
|
supplier = connection.get('supplier')
|
||||||
|
if protocol == 'http':
|
||||||
|
href = connection.get('href')
|
||||||
|
transfer_format = connection.get('transferFormat')
|
||||||
|
# ASX playlist
|
||||||
|
if supplier == 'asx':
|
||||||
|
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
|
||||||
|
formats.append({
|
||||||
|
'url': ref,
|
||||||
|
'format_id': 'ref%s_%s' % (i, supplier),
|
||||||
|
})
|
||||||
|
# Skip DASH until supported
|
||||||
|
elif transfer_format == 'dash':
|
||||||
|
pass
|
||||||
|
elif transfer_format == 'hls':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
|
||||||
|
m3u8_id=supplier, fatal=False))
|
||||||
|
# Direct link
|
||||||
|
else:
|
||||||
|
formats.append({
|
||||||
|
'url': href,
|
||||||
|
'format_id': supplier or kind or protocol,
|
||||||
|
})
|
||||||
|
elif protocol == 'rtmp':
|
||||||
|
application = connection.get('application', 'ondemand')
|
||||||
|
auth_string = connection.get('authString')
|
||||||
|
identifier = connection.get('identifier')
|
||||||
|
server = connection.get('server')
|
||||||
|
formats.append({
|
||||||
|
'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
|
||||||
|
'play_path': identifier,
|
||||||
|
'app': '%s?%s' % (application, auth_string),
|
||||||
|
'page_url': 'http://www.bbc.co.uk',
|
||||||
|
'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
|
||||||
|
'rtmp_live': False,
|
||||||
|
'ext': 'flv',
|
||||||
|
'format_id': supplier,
|
||||||
|
})
|
||||||
|
return formats
|
||||||
|
|
||||||
|
def _extract_items(self, playlist):
|
||||||
|
return playlist.findall('./{%s}item' % self._EMP_PLAYLIST_NS)
|
||||||
|
|
||||||
|
def _findall_ns(self, element, xpath):
|
||||||
|
elements = []
|
||||||
|
for ns in self._NAMESPACES:
|
||||||
|
elements.extend(element.findall(xpath % ns))
|
||||||
|
return elements
|
||||||
|
|
||||||
|
def _extract_medias(self, media_selection):
|
||||||
|
error = media_selection.find('./{%s}error' % self._MEDIASELECTION_NS)
|
||||||
|
if error is None:
|
||||||
|
media_selection.find('./{%s}error' % self._EMP_PLAYLIST_NS)
|
||||||
|
if error is not None:
|
||||||
|
raise BBCCoUkIE.MediaSelectionError(error.get('id'))
|
||||||
|
return self._findall_ns(media_selection, './{%s}media')
|
||||||
|
|
||||||
|
def _extract_connections(self, media):
|
||||||
|
return self._findall_ns(media, './{%s}connection')
|
||||||
|
|
||||||
|
def _extract_video(self, media, programme_id):
|
||||||
|
formats = []
|
||||||
|
vbr = int_or_none(media.get('bitrate'))
|
||||||
|
vcodec = media.get('encoding')
|
||||||
|
service = media.get('service')
|
||||||
|
width = int_or_none(media.get('width'))
|
||||||
|
height = int_or_none(media.get('height'))
|
||||||
|
file_size = int_or_none(media.get('media_file_size'))
|
||||||
|
for connection in self._extract_connections(media):
|
||||||
|
conn_formats = self._extract_connection(connection, programme_id)
|
||||||
|
for format in conn_formats:
|
||||||
|
format.update({
|
||||||
|
'width': width,
|
||||||
|
'height': height,
|
||||||
|
'vbr': vbr,
|
||||||
|
'vcodec': vcodec,
|
||||||
|
'filesize': file_size,
|
||||||
|
})
|
||||||
|
if service:
|
||||||
|
format['format_id'] = '%s_%s' % (service, format['format_id'])
|
||||||
|
formats.extend(conn_formats)
|
||||||
|
return formats
|
||||||
|
|
||||||
|
def _extract_audio(self, media, programme_id):
|
||||||
|
formats = []
|
||||||
|
abr = int_or_none(media.get('bitrate'))
|
||||||
|
acodec = media.get('encoding')
|
||||||
|
service = media.get('service')
|
||||||
|
for connection in self._extract_connections(media):
|
||||||
|
conn_formats = self._extract_connection(connection, programme_id)
|
||||||
|
for format in conn_formats:
|
||||||
|
format.update({
|
||||||
|
'format_id': '%s_%s' % (service, format['format_id']),
|
||||||
|
'abr': abr,
|
||||||
|
'acodec': acodec,
|
||||||
|
'vcodec': 'none',
|
||||||
|
})
|
||||||
|
formats.extend(conn_formats)
|
||||||
|
return formats
|
||||||
|
|
||||||
|
def _get_subtitles(self, media, programme_id):
|
||||||
|
subtitles = {}
|
||||||
|
for connection in self._extract_connections(media):
|
||||||
|
captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
|
||||||
|
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
|
||||||
|
subtitles[lang] = [
|
||||||
|
{
|
||||||
|
'url': connection.get('href'),
|
||||||
|
'ext': 'ttml',
|
||||||
|
},
|
||||||
|
]
|
||||||
|
return subtitles
|
||||||
|
|
||||||
|
def _raise_extractor_error(self, media_selection_error):
|
||||||
|
raise ExtractorError(
|
||||||
|
'%s returned error: %s' % (self.IE_NAME, media_selection_error.id),
|
||||||
|
expected=True)
|
||||||
|
|
||||||
|
def _download_media_selector(self, programme_id):
|
||||||
|
last_exception = None
|
||||||
|
for mediaselector_url in self._MEDIASELECTOR_URLS:
|
||||||
|
try:
|
||||||
|
return self._download_media_selector_url(
|
||||||
|
mediaselector_url % programme_id, programme_id)
|
||||||
|
except BBCCoUkIE.MediaSelectionError as e:
|
||||||
|
if e.id in ('notukerror', 'geolocation', 'selectionunavailable'):
|
||||||
|
last_exception = e
|
||||||
|
continue
|
||||||
|
self._raise_extractor_error(e)
|
||||||
|
self._raise_extractor_error(last_exception)
|
||||||
|
|
||||||
|
def _download_media_selector_url(self, url, programme_id=None):
|
||||||
|
try:
|
||||||
|
media_selection = self._download_xml(
|
||||||
|
url, programme_id, 'Downloading media selection XML')
|
||||||
|
except ExtractorError as ee:
|
||||||
|
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code in (403, 404):
|
||||||
|
media_selection = compat_etree_fromstring(ee.cause.read().decode('utf-8'))
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
return self._process_media_selector(media_selection, programme_id)
|
||||||
|
|
||||||
|
def _process_media_selector(self, media_selection, programme_id):
|
||||||
|
formats = []
|
||||||
|
subtitles = None
|
||||||
|
|
||||||
|
for media in self._extract_medias(media_selection):
|
||||||
|
kind = media.get('kind')
|
||||||
|
if kind == 'audio':
|
||||||
|
formats.extend(self._extract_audio(media, programme_id))
|
||||||
|
elif kind == 'video':
|
||||||
|
formats.extend(self._extract_video(media, programme_id))
|
||||||
|
elif kind == 'captions':
|
||||||
|
subtitles = self.extract_subtitles(media, programme_id)
|
||||||
|
return formats, subtitles
|
||||||
|
|
||||||
|
def _download_playlist(self, playlist_id):
|
||||||
|
try:
|
||||||
|
playlist = self._download_json(
|
||||||
|
'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
|
||||||
|
playlist_id, 'Downloading playlist JSON')
|
||||||
|
|
||||||
|
version = playlist.get('defaultAvailableVersion')
|
||||||
|
if version:
|
||||||
|
smp_config = version['smpConfig']
|
||||||
|
title = smp_config['title']
|
||||||
|
description = smp_config['summary']
|
||||||
|
for item in smp_config['items']:
|
||||||
|
kind = item['kind']
|
||||||
|
if kind != 'programme' and kind != 'radioProgramme':
|
||||||
|
continue
|
||||||
|
programme_id = item.get('vpid')
|
||||||
|
duration = int_or_none(item.get('duration'))
|
||||||
|
formats, subtitles = self._download_media_selector(programme_id)
|
||||||
|
return programme_id, title, description, duration, formats, subtitles
|
||||||
|
except ExtractorError as ee:
|
||||||
|
if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
|
||||||
|
raise
|
||||||
|
|
||||||
|
# fallback to legacy playlist
|
||||||
|
return self._process_legacy_playlist(playlist_id)
|
||||||
|
|
||||||
|
def _process_legacy_playlist_url(self, url, display_id):
|
||||||
|
playlist = self._download_legacy_playlist_url(url, display_id)
|
||||||
|
return self._extract_from_legacy_playlist(playlist, display_id)
|
||||||
|
|
||||||
|
def _process_legacy_playlist(self, playlist_id):
|
||||||
|
return self._process_legacy_playlist_url(
|
||||||
|
'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, playlist_id)
|
||||||
|
|
||||||
|
def _download_legacy_playlist_url(self, url, playlist_id=None):
|
||||||
|
return self._download_xml(
|
||||||
|
url, playlist_id, 'Downloading legacy playlist XML')
|
||||||
|
|
||||||
|
def _extract_from_legacy_playlist(self, playlist, playlist_id):
|
||||||
|
no_items = playlist.find('./{%s}noItems' % self._EMP_PLAYLIST_NS)
|
||||||
|
if no_items is not None:
|
||||||
|
reason = no_items.get('reason')
|
||||||
|
if reason == 'preAvailability':
|
||||||
|
msg = 'Episode %s is not yet available' % playlist_id
|
||||||
|
elif reason == 'postAvailability':
|
||||||
|
msg = 'Episode %s is no longer available' % playlist_id
|
||||||
|
elif reason == 'noMedia':
|
||||||
|
msg = 'Episode %s is not currently available' % playlist_id
|
||||||
|
else:
|
||||||
|
msg = 'Episode %s is not available: %s' % (playlist_id, reason)
|
||||||
|
raise ExtractorError(msg, expected=True)
|
||||||
|
|
||||||
|
for item in self._extract_items(playlist):
|
||||||
|
kind = item.get('kind')
|
||||||
|
if kind != 'programme' and kind != 'radioProgramme':
|
||||||
|
continue
|
||||||
|
title = playlist.find('./{%s}title' % self._EMP_PLAYLIST_NS).text
|
||||||
|
description_el = playlist.find('./{%s}summary' % self._EMP_PLAYLIST_NS)
|
||||||
|
description = description_el.text if description_el is not None else None
|
||||||
|
|
||||||
|
def get_programme_id(item):
|
||||||
|
def get_from_attributes(item):
|
||||||
|
for p in('identifier', 'group'):
|
||||||
|
value = item.get(p)
|
||||||
|
if value and re.match(r'^[pb][\da-z]{7}$', value):
|
||||||
|
return value
|
||||||
|
get_from_attributes(item)
|
||||||
|
mediator = item.find('./{%s}mediator' % self._EMP_PLAYLIST_NS)
|
||||||
|
if mediator is not None:
|
||||||
|
return get_from_attributes(mediator)
|
||||||
|
|
||||||
|
programme_id = get_programme_id(item)
|
||||||
|
duration = int_or_none(item.get('duration'))
|
||||||
|
|
||||||
|
if programme_id:
|
||||||
|
formats, subtitles = self._download_media_selector(programme_id)
|
||||||
|
else:
|
||||||
|
formats, subtitles = self._process_media_selector(item, playlist_id)
|
||||||
|
programme_id = playlist_id
|
||||||
|
|
||||||
|
return programme_id, title, description, duration, formats, subtitles
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
group_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, group_id, 'Downloading video page')
|
||||||
|
|
||||||
|
programme_id = None
|
||||||
|
duration = None
|
||||||
|
|
||||||
|
tviplayer = self._search_regex(
|
||||||
|
r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
|
||||||
|
webpage, 'player', default=None)
|
||||||
|
|
||||||
|
if tviplayer:
|
||||||
|
player = self._parse_json(tviplayer, group_id).get('player', {})
|
||||||
|
duration = int_or_none(player.get('duration'))
|
||||||
|
programme_id = player.get('vpid')
|
||||||
|
|
||||||
|
if not programme_id:
|
||||||
|
programme_id = self._search_regex(
|
||||||
|
r'"vpid"\s*:\s*"(%s)"' % self._ID_REGEX, webpage, 'vpid', fatal=False, default=None)
|
||||||
|
|
||||||
|
if programme_id:
|
||||||
|
formats, subtitles = self._download_media_selector(programme_id)
|
||||||
|
title = self._og_search_title(webpage, default=None) or self._html_search_regex(
|
||||||
|
(r'<h2[^>]+id="parent-title"[^>]*>(.+?)</h2>',
|
||||||
|
r'<div[^>]+class="info"[^>]*>\s*<h1>(.+?)</h1>'), webpage, 'title')
|
||||||
|
description = self._search_regex(
|
||||||
|
(r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
|
||||||
|
r'<div[^>]+class="info_+synopsis"[^>]*>([^<]+)</div>'),
|
||||||
|
webpage, 'description', default=None)
|
||||||
|
if not description:
|
||||||
|
description = self._html_search_meta('description', webpage)
|
||||||
|
else:
|
||||||
|
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': programme_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'thumbnail': self._og_search_thumbnail(webpage, default=None),
|
||||||
|
'duration': duration,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class BBCIE(BBCCoUkIE):
|
||||||
|
IE_NAME = 'bbc'
|
||||||
|
IE_DESC = 'BBC'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
|
||||||
|
|
||||||
|
_MEDIASELECTOR_URLS = [
|
||||||
|
# Provides HQ HLS streams but fails with geolocation in some cases when it's
|
||||||
|
# even not geo restricted at all
|
||||||
|
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
|
||||||
|
# Provides more formats, namely direct mp4 links, but fails on some videos with
|
||||||
|
# notukerror for non UK (?) users (e.g.
|
||||||
|
# http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
|
||||||
|
'http://open.live.bbc.co.uk/mediaselector/4/mtis/stream/%s',
|
||||||
|
# Provides fewer formats, but works everywhere for everybody (hopefully)
|
||||||
|
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/journalism-pc/vpid/%s',
|
||||||
|
]
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
# article with multiple videos embedded with data-playable containing vpids
|
||||||
|
'url': 'http://www.bbc.com/news/world-europe-32668511',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'world-europe-32668511',
|
||||||
|
'title': 'Russia stages massive WW2 parade despite Western boycott',
|
||||||
|
'description': 'md5:00ff61976f6081841f759a08bf78cc9c',
|
||||||
|
},
|
||||||
|
'playlist_count': 2,
|
||||||
|
}, {
|
||||||
|
# article with multiple videos embedded with data-playable (more videos)
|
||||||
|
'url': 'http://www.bbc.com/news/business-28299555',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'business-28299555',
|
||||||
|
'title': 'Farnborough Airshow: Video highlights',
|
||||||
|
'description': 'BBC reports and video highlights at the Farnborough Airshow.',
|
||||||
|
},
|
||||||
|
'playlist_count': 9,
|
||||||
|
'skip': 'Save time',
|
||||||
|
}, {
|
||||||
|
# article with multiple videos embedded with `new SMP()`
|
||||||
|
# broken
|
||||||
|
'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '3662a707-0af9-3149-963f-47bea720b460',
|
||||||
|
'title': 'BUGGER',
|
||||||
|
},
|
||||||
|
'playlist_count': 18,
|
||||||
|
}, {
|
||||||
|
# single video embedded with data-playable containing vpid
|
||||||
|
'url': 'http://www.bbc.com/news/world-europe-32041533',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p02mprgb',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
|
||||||
|
'description': 'md5:2868290467291b37feda7863f7a83f54',
|
||||||
|
'duration': 47,
|
||||||
|
'timestamp': 1427219242,
|
||||||
|
'upload_date': '20150324',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
# article with single video embedded with data-playable containing XML playlist
|
||||||
|
# with direct video links as progressiveDownloadUrl (for now these are extracted)
|
||||||
|
# and playlist with f4m and m3u8 as streamingUrl
|
||||||
|
'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '150615_telabyad_kentin_cogu',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde",
|
||||||
|
'timestamp': 1434397334,
|
||||||
|
'upload_date': '20150615',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
# single video embedded with data-playable containing XML playlists (regional section)
|
||||||
|
'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '150619_video_honduras_militares_hospitales_corrupcion_aw',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
|
||||||
|
'timestamp': 1434713142,
|
||||||
|
'upload_date': '20150619',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
# single video from video playlist embedded with vxp-playlist-data JSON
|
||||||
|
'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p02w6qjc',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
|
||||||
|
'duration': 56,
|
||||||
|
'description': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
# single video story with digitalData
|
||||||
|
'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p02q6gc4',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Sri Lanka’s spicy secret',
|
||||||
|
'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.',
|
||||||
|
'timestamp': 1437674293,
|
||||||
|
'upload_date': '20150723',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
# single video story without digitalData
|
||||||
|
'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p018zqqg',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Hyundai Santa Fe Sport: Rock star',
|
||||||
|
'description': 'md5:b042a26142c4154a6e472933cf20793d',
|
||||||
|
'timestamp': 1415867444,
|
||||||
|
'upload_date': '20141113',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
# single video with playlist.sxml URL in playlist param
|
||||||
|
'url': 'http://www.bbc.com/sport/0/football/33653409',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p02xycnp',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
|
||||||
|
'description': 'BBC Sport\'s David Ornstein has the latest transfer gossip, including rumours of a Manchester United return for Cristiano Ronaldo.',
|
||||||
|
'duration': 140,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
# article with multiple videos embedded with playlist.sxml in playlist param
|
||||||
|
'url': 'http://www.bbc.com/sport/0/football/34475836',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '34475836',
|
||||||
|
'title': 'Jurgen Klopp: Furious football from a witty and winning coach',
|
||||||
|
'description': 'Fast-paced football, wit, wisdom and a ready smile - why Liverpool fans should come to love new boss Jurgen Klopp.',
|
||||||
|
},
|
||||||
|
'playlist_count': 3,
|
||||||
|
}, {
|
||||||
|
# school report article with single video
|
||||||
|
'url': 'http://www.bbc.co.uk/schoolreport/35744779',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '35744779',
|
||||||
|
'title': 'School which breaks down barriers in Jerusalem',
|
||||||
|
},
|
||||||
|
'playlist_count': 1,
|
||||||
|
}, {
|
||||||
|
# single video with playlist URL from weather section
|
||||||
|
'url': 'http://www.bbc.com/weather/features/33601775',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# custom redirection to www.bbc.com
|
||||||
|
'url': 'http://www.bbc.co.uk/news/science-environment-33661876',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# single video article embedded with data-media-vpid
|
||||||
|
'url': 'http://www.bbc.co.uk/sport/rowing/35908187',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def suitable(cls, url):
|
||||||
|
return False if BBCCoUkIE.suitable(url) or BBCCoUkArticleIE.suitable(url) else super(BBCIE, cls).suitable(url)
|
||||||
|
|
||||||
|
def _extract_from_media_meta(self, media_meta, video_id):
|
||||||
|
# Direct links to media in media metadata (e.g.
|
||||||
|
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
|
||||||
|
# TODO: there are also f4m and m3u8 streams incorporated in playlist.sxml
|
||||||
|
source_files = media_meta.get('sourceFiles')
|
||||||
|
if source_files:
|
||||||
|
return [{
|
||||||
|
'url': f['url'],
|
||||||
|
'format_id': format_id,
|
||||||
|
'ext': f.get('encoding'),
|
||||||
|
'tbr': float_or_none(f.get('bitrate'), 1000),
|
||||||
|
'filesize': int_or_none(f.get('filesize')),
|
||||||
|
} for format_id, f in source_files.items() if f.get('url')], []
|
||||||
|
|
||||||
|
programme_id = media_meta.get('externalId')
|
||||||
|
if programme_id:
|
||||||
|
return self._download_media_selector(programme_id)
|
||||||
|
|
||||||
|
# Process playlist.sxml as legacy playlist
|
||||||
|
href = media_meta.get('href')
|
||||||
|
if href:
|
||||||
|
playlist = self._download_legacy_playlist_url(href)
|
||||||
|
_, _, _, _, formats, subtitles = self._extract_from_legacy_playlist(playlist, video_id)
|
||||||
|
return formats, subtitles
|
||||||
|
|
||||||
|
return [], []
|
||||||
|
|
||||||
|
def _extract_from_playlist_sxml(self, url, playlist_id, timestamp):
|
||||||
|
programme_id, title, description, duration, formats, subtitles = \
|
||||||
|
self._process_legacy_playlist_url(url, playlist_id)
|
||||||
|
self._sort_formats(formats)
|
||||||
|
return {
|
||||||
|
'id': programme_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'duration': duration,
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
playlist_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
|
||||||
|
json_ld_info = self._search_json_ld(webpage, playlist_id, default=None)
|
||||||
|
timestamp = json_ld_info.get('timestamp')
|
||||||
|
|
||||||
|
playlist_title = json_ld_info.get('title')
|
||||||
|
if not playlist_title:
|
||||||
|
playlist_title = self._og_search_title(
|
||||||
|
webpage, default=None) or self._html_search_regex(
|
||||||
|
r'<title>(.+?)</title>', webpage, 'playlist title', default=None)
|
||||||
|
if playlist_title:
|
||||||
|
playlist_title = re.sub(r'(.+)\s*-\s*BBC.*?$', r'\1', playlist_title).strip()
|
||||||
|
|
||||||
|
playlist_description = json_ld_info.get(
|
||||||
|
'description') or self._og_search_description(webpage, default=None)
|
||||||
|
|
||||||
|
if not timestamp:
|
||||||
|
timestamp = parse_iso8601(self._search_regex(
|
||||||
|
[r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"',
|
||||||
|
r'itemprop="datePublished"[^>]+datetime="([^"]+)"',
|
||||||
|
r'"datePublished":\s*"([^"]+)'],
|
||||||
|
webpage, 'date', default=None))
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
|
||||||
|
# article with multiple videos embedded with playlist.sxml (e.g.
|
||||||
|
# http://www.bbc.com/sport/0/football/34475836)
|
||||||
|
playlists = re.findall(r'<param[^>]+name="playlist"[^>]+value="([^"]+)"', webpage)
|
||||||
|
playlists.extend(re.findall(r'data-media-id="([^"]+/playlist\.sxml)"', webpage))
|
||||||
|
if playlists:
|
||||||
|
entries = [
|
||||||
|
self._extract_from_playlist_sxml(playlist_url, playlist_id, timestamp)
|
||||||
|
for playlist_url in playlists]
|
||||||
|
|
||||||
|
# news article with multiple videos embedded with data-playable
|
||||||
|
data_playables = re.findall(r'data-playable=(["\'])({.+?})\1', webpage)
|
||||||
|
if data_playables:
|
||||||
|
for _, data_playable_json in data_playables:
|
||||||
|
data_playable = self._parse_json(
|
||||||
|
unescapeHTML(data_playable_json), playlist_id, fatal=False)
|
||||||
|
if not data_playable:
|
||||||
|
continue
|
||||||
|
settings = data_playable.get('settings', {})
|
||||||
|
if settings:
|
||||||
|
# data-playable with video vpid in settings.playlistObject.items (e.g.
|
||||||
|
# http://www.bbc.com/news/world-us-canada-34473351)
|
||||||
|
playlist_object = settings.get('playlistObject', {})
|
||||||
|
if playlist_object:
|
||||||
|
items = playlist_object.get('items')
|
||||||
|
if items and isinstance(items, list):
|
||||||
|
title = playlist_object['title']
|
||||||
|
description = playlist_object.get('summary')
|
||||||
|
duration = int_or_none(items[0].get('duration'))
|
||||||
|
programme_id = items[0].get('vpid')
|
||||||
|
formats, subtitles = self._download_media_selector(programme_id)
|
||||||
|
self._sort_formats(formats)
|
||||||
|
entries.append({
|
||||||
|
'id': programme_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'duration': duration,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
# data-playable without vpid but with a playlist.sxml URLs
|
||||||
|
# in otherSettings.playlist (e.g.
|
||||||
|
# http://www.bbc.com/turkce/multimedya/2015/10/151010_vid_ankara_patlama_ani)
|
||||||
|
playlist = data_playable.get('otherSettings', {}).get('playlist', {})
|
||||||
|
if playlist:
|
||||||
|
entries.append(self._extract_from_playlist_sxml(
|
||||||
|
playlist.get('progressiveDownloadUrl'), playlist_id, timestamp))
|
||||||
|
|
||||||
|
if entries:
|
||||||
|
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
|
||||||
|
|
||||||
|
# single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
|
||||||
|
programme_id = self._search_regex(
|
||||||
|
[r'data-(?:video-player|media)-vpid="(%s)"' % self._ID_REGEX,
|
||||||
|
r'<param[^>]+name="externalIdentifier"[^>]+value="(%s)"' % self._ID_REGEX,
|
||||||
|
r'videoId\s*:\s*["\'](%s)["\']' % self._ID_REGEX],
|
||||||
|
webpage, 'vpid', default=None)
|
||||||
|
|
||||||
|
if programme_id:
|
||||||
|
formats, subtitles = self._download_media_selector(programme_id)
|
||||||
|
self._sort_formats(formats)
|
||||||
|
# digitalData may be missing (e.g. http://www.bbc.com/autos/story/20130513-hyundais-rock-star)
|
||||||
|
digital_data = self._parse_json(
|
||||||
|
self._search_regex(
|
||||||
|
r'var\s+digitalData\s*=\s*({.+?});?\n', webpage, 'digital data', default='{}'),
|
||||||
|
programme_id, fatal=False)
|
||||||
|
page_info = digital_data.get('page', {}).get('pageInfo', {})
|
||||||
|
title = page_info.get('pageName') or self._og_search_title(webpage)
|
||||||
|
description = page_info.get('description') or self._og_search_description(webpage)
|
||||||
|
timestamp = parse_iso8601(page_info.get('publicationDate')) or timestamp
|
||||||
|
return {
|
||||||
|
'id': programme_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
||||||
|
|
||||||
|
def extract_all(pattern):
|
||||||
|
return list(filter(None, map(
|
||||||
|
lambda s: self._parse_json(s, playlist_id, fatal=False),
|
||||||
|
re.findall(pattern, webpage))))
|
||||||
|
|
||||||
|
# Multiple video article (e.g.
|
||||||
|
# http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
|
||||||
|
EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+%s(?:\b[^"]+)?' % self._ID_REGEX
|
||||||
|
entries = []
|
||||||
|
for match in extract_all(r'new\s+SMP\(({.+?})\)'):
|
||||||
|
embed_url = match.get('playerSettings', {}).get('externalEmbedUrl')
|
||||||
|
if embed_url and re.match(EMBED_URL, embed_url):
|
||||||
|
entries.append(embed_url)
|
||||||
|
entries.extend(re.findall(
|
||||||
|
r'setPlaylist\("(%s)"\)' % EMBED_URL, webpage))
|
||||||
|
if entries:
|
||||||
|
return self.playlist_result(
|
||||||
|
[self.url_result(entry, 'BBCCoUk') for entry in entries],
|
||||||
|
playlist_id, playlist_title, playlist_description)
|
||||||
|
|
||||||
|
# Multiple video article (e.g. http://www.bbc.com/news/world-europe-32668511)
|
||||||
|
medias = extract_all(r"data-media-meta='({[^']+})'")
|
||||||
|
|
||||||
|
if not medias:
|
||||||
|
# Single video article (e.g. http://www.bbc.com/news/video_and_audio/international)
|
||||||
|
media_asset = self._search_regex(
|
||||||
|
r'mediaAssetPage\.init\(\s*({.+?}), "/',
|
||||||
|
webpage, 'media asset', default=None)
|
||||||
|
if media_asset:
|
||||||
|
media_asset_page = self._parse_json(media_asset, playlist_id, fatal=False)
|
||||||
|
medias = []
|
||||||
|
for video in media_asset_page.get('videos', {}).values():
|
||||||
|
medias.extend(video.values())
|
||||||
|
|
||||||
|
if not medias:
|
||||||
|
# Multiple video playlist with single `now playing` entry (e.g.
|
||||||
|
# http://www.bbc.com/news/video_and_audio/must_see/33767813)
|
||||||
|
vxp_playlist = self._parse_json(
|
||||||
|
self._search_regex(
|
||||||
|
r'<script[^>]+class="vxp-playlist-data"[^>]+type="application/json"[^>]*>([^<]+)</script>',
|
||||||
|
webpage, 'playlist data'),
|
||||||
|
playlist_id)
|
||||||
|
playlist_medias = []
|
||||||
|
for item in vxp_playlist:
|
||||||
|
media = item.get('media')
|
||||||
|
if not media:
|
||||||
|
continue
|
||||||
|
playlist_medias.append(media)
|
||||||
|
# Download single video if found media with asset id matching the video id from URL
|
||||||
|
if item.get('advert', {}).get('assetId') == playlist_id:
|
||||||
|
medias = [media]
|
||||||
|
break
|
||||||
|
# Fallback to the whole playlist
|
||||||
|
if not medias:
|
||||||
|
medias = playlist_medias
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
for num, media_meta in enumerate(medias, start=1):
|
||||||
|
formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id)
|
||||||
|
if not formats:
|
||||||
|
continue
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
video_id = media_meta.get('externalId')
|
||||||
|
if not video_id:
|
||||||
|
video_id = playlist_id if len(medias) == 1 else '%s-%s' % (playlist_id, num)
|
||||||
|
|
||||||
|
title = media_meta.get('caption')
|
||||||
|
if not title:
|
||||||
|
title = playlist_title if len(medias) == 1 else '%s - Video %s' % (playlist_title, num)
|
||||||
|
|
||||||
|
duration = int_or_none(media_meta.get('durationInSeconds')) or parse_duration(media_meta.get('duration'))
|
||||||
|
|
||||||
|
images = []
|
||||||
|
for image in media_meta.get('images', {}).values():
|
||||||
|
images.extend(image.values())
|
||||||
|
if 'image' in media_meta:
|
||||||
|
images.append(media_meta['image'])
|
||||||
|
|
||||||
|
thumbnails = [{
|
||||||
|
'url': image.get('href'),
|
||||||
|
'width': int_or_none(image.get('width')),
|
||||||
|
'height': int_or_none(image.get('height')),
|
||||||
|
} for image in images]
|
||||||
|
|
||||||
|
entries.append({
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'thumbnails': thumbnails,
|
||||||
|
'duration': duration,
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
})
|
||||||
|
|
||||||
|
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
|
||||||
|
|
||||||
|
|
||||||
|
class BBCCoUkArticleIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://www.bbc.co.uk/programmes/articles/(?P<id>[a-zA-Z0-9]+)'
|
||||||
|
IE_NAME = 'bbc.co.uk:article'
|
||||||
|
IE_DESC = 'BBC articles'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.bbc.co.uk/programmes/articles/3jNQLTMrPlYGTBn0WV6M2MS/not-your-typical-role-model-ada-lovelace-the-19th-century-programmer',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '3jNQLTMrPlYGTBn0WV6M2MS',
|
||||||
|
'title': 'Calculating Ada: The Countess of Computing - Not your typical role model: Ada Lovelace the 19th century programmer - BBC Four',
|
||||||
|
'description': 'Hannah Fry reveals some of her surprising discoveries about Ada Lovelace during filming.',
|
||||||
|
},
|
||||||
|
'playlist_count': 4,
|
||||||
|
'add_ie': ['BBCCoUk'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
playlist_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
|
||||||
|
title = self._og_search_title(webpage)
|
||||||
|
description = self._og_search_description(webpage).strip()
|
||||||
|
|
||||||
|
entries = [self.url_result(programme_url) for programme_url in re.findall(
|
||||||
|
r'<div[^>]+typeof="Clip"[^>]+resource="([^"]+)"', webpage)]
|
||||||
|
|
||||||
|
return self.playlist_result(entries, playlist_id, title, description)
|
||||||
@@ -1,379 +0,0 @@
|
|||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
import xml.etree.ElementTree
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
ExtractorError,
|
|
||||||
int_or_none,
|
|
||||||
)
|
|
||||||
from ..compat import compat_HTTPError
|
|
||||||
|
|
||||||
|
|
||||||
class BBCCoUkIE(InfoExtractor):
|
|
||||||
IE_NAME = 'bbc.co.uk'
|
|
||||||
IE_DESC = 'BBC iPlayer'
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})'
|
|
||||||
|
|
||||||
_TESTS = [
|
|
||||||
{
|
|
||||||
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'b039d07m',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': 'Kaleidoscope, Leonard Cohen',
|
|
||||||
'description': 'The Canadian poet and songwriter reflects on his musical career.',
|
|
||||||
'duration': 1740,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'b00yng1d',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': 'The Man in Black: Series 3: The Printed Name',
|
|
||||||
'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
|
|
||||||
'duration': 1800,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'Episode is no longer available on BBC iPlayer Radio',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'b00yng1d',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': 'The Voice UK: Series 3: Blind Auditions 5',
|
|
||||||
'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
|
|
||||||
'duration': 5100,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'b03k3pb7',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
|
|
||||||
'description': '2. Invasion',
|
|
||||||
'duration': 3600,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'b04v209v',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': 'Pete Tong, The Essential New Tune Special',
|
|
||||||
'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
|
|
||||||
'duration': 10800,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
}
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
|
|
||||||
'note': 'Audio',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'p02frcch',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix',
|
|
||||||
'description': 'French house superstar Madeon takes us out of the club and onto the after party.',
|
|
||||||
'duration': 3507,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
}
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
|
|
||||||
'note': 'Video',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'p025c103',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
|
|
||||||
'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
|
|
||||||
'duration': 226,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
}
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'p02n76xf',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
|
|
||||||
'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
|
|
||||||
'duration': 3540,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'geolocation',
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'b05zmgw1',
|
|
||||||
'ext': 'flv',
|
|
||||||
'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
|
|
||||||
'title': 'Royal Academy Summer Exhibition',
|
|
||||||
'duration': 3540,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'geolocation',
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
|
|
||||||
'only_matching': True,
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
def _extract_asx_playlist(self, connection, programme_id):
|
|
||||||
asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
|
|
||||||
return [ref.get('href') for ref in asx.findall('./Entry/ref')]
|
|
||||||
|
|
||||||
def _extract_connection(self, connection, programme_id):
|
|
||||||
formats = []
|
|
||||||
protocol = connection.get('protocol')
|
|
||||||
supplier = connection.get('supplier')
|
|
||||||
if protocol == 'http':
|
|
||||||
href = connection.get('href')
|
|
||||||
# ASX playlist
|
|
||||||
if supplier == 'asx':
|
|
||||||
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
|
|
||||||
formats.append({
|
|
||||||
'url': ref,
|
|
||||||
'format_id': 'ref%s_%s' % (i, supplier),
|
|
||||||
})
|
|
||||||
# Direct link
|
|
||||||
else:
|
|
||||||
formats.append({
|
|
||||||
'url': href,
|
|
||||||
'format_id': supplier,
|
|
||||||
})
|
|
||||||
elif protocol == 'rtmp':
|
|
||||||
application = connection.get('application', 'ondemand')
|
|
||||||
auth_string = connection.get('authString')
|
|
||||||
identifier = connection.get('identifier')
|
|
||||||
server = connection.get('server')
|
|
||||||
formats.append({
|
|
||||||
'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
|
|
||||||
'play_path': identifier,
|
|
||||||
'app': '%s?%s' % (application, auth_string),
|
|
||||||
'page_url': 'http://www.bbc.co.uk',
|
|
||||||
'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
|
|
||||||
'rtmp_live': False,
|
|
||||||
'ext': 'flv',
|
|
||||||
'format_id': supplier,
|
|
||||||
})
|
|
||||||
return formats
|
|
||||||
|
|
||||||
def _extract_items(self, playlist):
|
|
||||||
return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
|
|
||||||
|
|
||||||
def _extract_medias(self, media_selection):
|
|
||||||
error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error')
|
|
||||||
if error is not None:
|
|
||||||
raise ExtractorError(
|
|
||||||
'%s returned error: %s' % (self.IE_NAME, error.get('id')), expected=True)
|
|
||||||
return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
|
|
||||||
|
|
||||||
def _extract_connections(self, media):
|
|
||||||
return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection')
|
|
||||||
|
|
||||||
def _extract_video(self, media, programme_id):
|
|
||||||
formats = []
|
|
||||||
vbr = int(media.get('bitrate'))
|
|
||||||
vcodec = media.get('encoding')
|
|
||||||
service = media.get('service')
|
|
||||||
width = int(media.get('width'))
|
|
||||||
height = int(media.get('height'))
|
|
||||||
file_size = int(media.get('media_file_size'))
|
|
||||||
for connection in self._extract_connections(media):
|
|
||||||
conn_formats = self._extract_connection(connection, programme_id)
|
|
||||||
for format in conn_formats:
|
|
||||||
format.update({
|
|
||||||
'format_id': '%s_%s' % (service, format['format_id']),
|
|
||||||
'width': width,
|
|
||||||
'height': height,
|
|
||||||
'vbr': vbr,
|
|
||||||
'vcodec': vcodec,
|
|
||||||
'filesize': file_size,
|
|
||||||
})
|
|
||||||
formats.extend(conn_formats)
|
|
||||||
return formats
|
|
||||||
|
|
||||||
def _extract_audio(self, media, programme_id):
|
|
||||||
formats = []
|
|
||||||
abr = int(media.get('bitrate'))
|
|
||||||
acodec = media.get('encoding')
|
|
||||||
service = media.get('service')
|
|
||||||
for connection in self._extract_connections(media):
|
|
||||||
conn_formats = self._extract_connection(connection, programme_id)
|
|
||||||
for format in conn_formats:
|
|
||||||
format.update({
|
|
||||||
'format_id': '%s_%s' % (service, format['format_id']),
|
|
||||||
'abr': abr,
|
|
||||||
'acodec': acodec,
|
|
||||||
})
|
|
||||||
formats.extend(conn_formats)
|
|
||||||
return formats
|
|
||||||
|
|
||||||
def _get_subtitles(self, media, programme_id):
|
|
||||||
subtitles = {}
|
|
||||||
for connection in self._extract_connections(media):
|
|
||||||
captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
|
|
||||||
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
|
|
||||||
subtitles[lang] = [
|
|
||||||
{
|
|
||||||
'url': connection.get('href'),
|
|
||||||
'ext': 'ttml',
|
|
||||||
},
|
|
||||||
]
|
|
||||||
return subtitles
|
|
||||||
|
|
||||||
def _download_media_selector(self, programme_id):
|
|
||||||
try:
|
|
||||||
media_selection = self._download_xml(
|
|
||||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
|
|
||||||
programme_id, 'Downloading media selection XML')
|
|
||||||
except ExtractorError as ee:
|
|
||||||
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
|
|
||||||
media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().decode('utf-8'))
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
subtitles = None
|
|
||||||
|
|
||||||
for media in self._extract_medias(media_selection):
|
|
||||||
kind = media.get('kind')
|
|
||||||
if kind == 'audio':
|
|
||||||
formats.extend(self._extract_audio(media, programme_id))
|
|
||||||
elif kind == 'video':
|
|
||||||
formats.extend(self._extract_video(media, programme_id))
|
|
||||||
elif kind == 'captions':
|
|
||||||
subtitles = self.extract_subtitles(media, programme_id)
|
|
||||||
|
|
||||||
return formats, subtitles
|
|
||||||
|
|
||||||
def _download_playlist(self, playlist_id):
|
|
||||||
try:
|
|
||||||
playlist = self._download_json(
|
|
||||||
'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
|
|
||||||
playlist_id, 'Downloading playlist JSON')
|
|
||||||
|
|
||||||
version = playlist.get('defaultAvailableVersion')
|
|
||||||
if version:
|
|
||||||
smp_config = version['smpConfig']
|
|
||||||
title = smp_config['title']
|
|
||||||
description = smp_config['summary']
|
|
||||||
for item in smp_config['items']:
|
|
||||||
kind = item['kind']
|
|
||||||
if kind != 'programme' and kind != 'radioProgramme':
|
|
||||||
continue
|
|
||||||
programme_id = item.get('vpid')
|
|
||||||
duration = int(item.get('duration'))
|
|
||||||
formats, subtitles = self._download_media_selector(programme_id)
|
|
||||||
return programme_id, title, description, duration, formats, subtitles
|
|
||||||
except ExtractorError as ee:
|
|
||||||
if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
|
|
||||||
raise
|
|
||||||
|
|
||||||
# fallback to legacy playlist
|
|
||||||
playlist = self._download_xml(
|
|
||||||
'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id,
|
|
||||||
playlist_id, 'Downloading legacy playlist XML')
|
|
||||||
|
|
||||||
no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
|
|
||||||
if no_items is not None:
|
|
||||||
reason = no_items.get('reason')
|
|
||||||
if reason == 'preAvailability':
|
|
||||||
msg = 'Episode %s is not yet available' % playlist_id
|
|
||||||
elif reason == 'postAvailability':
|
|
||||||
msg = 'Episode %s is no longer available' % playlist_id
|
|
||||||
elif reason == 'noMedia':
|
|
||||||
msg = 'Episode %s is not currently available' % playlist_id
|
|
||||||
else:
|
|
||||||
msg = 'Episode %s is not available: %s' % (playlist_id, reason)
|
|
||||||
raise ExtractorError(msg, expected=True)
|
|
||||||
|
|
||||||
for item in self._extract_items(playlist):
|
|
||||||
kind = item.get('kind')
|
|
||||||
if kind != 'programme' and kind != 'radioProgramme':
|
|
||||||
continue
|
|
||||||
title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
|
|
||||||
description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
|
|
||||||
programme_id = item.get('identifier')
|
|
||||||
duration = int(item.get('duration'))
|
|
||||||
formats, subtitles = self._download_media_selector(programme_id)
|
|
||||||
|
|
||||||
return programme_id, title, description, duration, formats, subtitles
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
group_id = self._match_id(url)
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, group_id, 'Downloading video page')
|
|
||||||
|
|
||||||
programme_id = None
|
|
||||||
|
|
||||||
tviplayer = self._search_regex(
|
|
||||||
r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
|
|
||||||
webpage, 'player', default=None)
|
|
||||||
|
|
||||||
if tviplayer:
|
|
||||||
player = self._parse_json(tviplayer, group_id).get('player', {})
|
|
||||||
duration = int_or_none(player.get('duration'))
|
|
||||||
programme_id = player.get('vpid')
|
|
||||||
|
|
||||||
if not programme_id:
|
|
||||||
programme_id = self._search_regex(
|
|
||||||
r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None)
|
|
||||||
|
|
||||||
if programme_id:
|
|
||||||
formats, subtitles = self._download_media_selector(programme_id)
|
|
||||||
title = self._og_search_title(webpage)
|
|
||||||
description = self._search_regex(
|
|
||||||
r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
|
|
||||||
webpage, 'description', fatal=False)
|
|
||||||
else:
|
|
||||||
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
|
|
||||||
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': programme_id,
|
|
||||||
'title': title,
|
|
||||||
'description': description,
|
|
||||||
'thumbnail': self._og_search_thumbnail(webpage, default=None),
|
|
||||||
'duration': duration,
|
|
||||||
'formats': formats,
|
|
||||||
'subtitles': subtitles,
|
|
||||||
}
|
|
||||||
@@ -1,65 +1,130 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import (
|
||||||
|
compat_chr,
|
||||||
|
compat_ord,
|
||||||
|
compat_urllib_parse_unquote,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class BeegIE(InfoExtractor):
|
class BeegIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://beeg.com/5416503',
|
'url': 'http://beeg.com/5416503',
|
||||||
'md5': '1bff67111adb785c51d1b42959ec10e5',
|
'md5': '46c384def73b33dbc581262e5ee67cef',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '5416503',
|
'id': '5416503',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Sultry Striptease',
|
'title': 'Sultry Striptease',
|
||||||
'description': 'md5:6db3c6177972822aaba18652ff59c773',
|
'description': 'md5:d22219c09da287c14bed3d6c37ce4bc2',
|
||||||
'categories': list, # NSFW
|
'timestamp': 1391813355,
|
||||||
'thumbnail': 're:https?://.*\.jpg$',
|
'upload_date': '20140207',
|
||||||
|
'duration': 383,
|
||||||
|
'tags': list,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
quality_arr = self._search_regex(
|
cpl_url = self._search_regex(
|
||||||
r'(?s)var\s+qualityArr\s*=\s*{\s*(.+?)\s*}', webpage, 'quality formats')
|
r'<script[^>]+src=(["\'])(?P<url>(?:https?:)?//static\.beeg\.com/cpl/\d+\.js.*?)\1',
|
||||||
|
webpage, 'cpl', default=None, group='url')
|
||||||
|
|
||||||
formats = [{
|
beeg_version, beeg_salt = [None] * 2
|
||||||
'url': fmt[1],
|
|
||||||
'format_id': fmt[0],
|
|
||||||
'height': int(fmt[0][:-1]),
|
|
||||||
} for fmt in re.findall(r"'([^']+)'\s*:\s*'([^']+)'", quality_arr)]
|
|
||||||
|
|
||||||
|
if cpl_url:
|
||||||
|
cpl = self._download_webpage(
|
||||||
|
self._proto_relative_url(cpl_url), video_id,
|
||||||
|
'Downloading cpl JS', fatal=False)
|
||||||
|
if cpl:
|
||||||
|
beeg_version = self._search_regex(
|
||||||
|
r'beeg_version\s*=\s*(\d+)', cpl,
|
||||||
|
'beeg version', default=None) or self._search_regex(
|
||||||
|
r'/(\d+)\.js', cpl_url, 'beeg version', default=None)
|
||||||
|
beeg_salt = self._search_regex(
|
||||||
|
r'beeg_salt\s*=\s*(["\'])(?P<beeg_salt>.+?)\1', cpl, 'beeg beeg_salt',
|
||||||
|
default=None, group='beeg_salt')
|
||||||
|
|
||||||
|
beeg_version = beeg_version or '1750'
|
||||||
|
beeg_salt = beeg_salt or 'MIDtGaw96f0N1kMMAM1DE46EC9pmFr'
|
||||||
|
|
||||||
|
video = self._download_json(
|
||||||
|
'http://api.beeg.com/api/v6/%s/video/%s' % (beeg_version, video_id),
|
||||||
|
video_id)
|
||||||
|
|
||||||
|
def split(o, e):
|
||||||
|
def cut(s, x):
|
||||||
|
n.append(s[:x])
|
||||||
|
return s[x:]
|
||||||
|
n = []
|
||||||
|
r = len(o) % e
|
||||||
|
if r > 0:
|
||||||
|
o = cut(o, r)
|
||||||
|
while len(o) > e:
|
||||||
|
o = cut(o, e)
|
||||||
|
n.append(o)
|
||||||
|
return n
|
||||||
|
|
||||||
|
def decrypt_key(key):
|
||||||
|
# Reverse engineered from http://static.beeg.com/cpl/1738.js
|
||||||
|
a = beeg_salt
|
||||||
|
e = compat_urllib_parse_unquote(key)
|
||||||
|
o = ''.join([
|
||||||
|
compat_chr(compat_ord(e[n]) - compat_ord(a[n % len(a)]) % 21)
|
||||||
|
for n in range(len(e))])
|
||||||
|
return ''.join(split(o, 3)[::-1])
|
||||||
|
|
||||||
|
def decrypt_url(encrypted_url):
|
||||||
|
encrypted_url = self._proto_relative_url(
|
||||||
|
encrypted_url.replace('{DATA_MARKERS}', ''), 'https:')
|
||||||
|
key = self._search_regex(
|
||||||
|
r'/key=(.*?)%2Cend=', encrypted_url, 'key', default=None)
|
||||||
|
if not key:
|
||||||
|
return encrypted_url
|
||||||
|
return encrypted_url.replace(key, decrypt_key(key))
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for format_id, video_url in video.items():
|
||||||
|
if not video_url:
|
||||||
|
continue
|
||||||
|
height = self._search_regex(
|
||||||
|
r'^(\d+)[pP]$', format_id, 'height', default=None)
|
||||||
|
if not height:
|
||||||
|
continue
|
||||||
|
formats.append({
|
||||||
|
'url': decrypt_url(video_url),
|
||||||
|
'format_id': format_id,
|
||||||
|
'height': int(height),
|
||||||
|
})
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
title = self._html_search_regex(
|
title = video['title']
|
||||||
r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title')
|
video_id = video.get('id') or video_id
|
||||||
|
display_id = video.get('code')
|
||||||
|
description = video.get('desc')
|
||||||
|
|
||||||
description = self._html_search_regex(
|
timestamp = parse_iso8601(video.get('date'), ' ')
|
||||||
r'<meta name="description" content="([^"]*)"',
|
duration = int_or_none(video.get('duration'))
|
||||||
webpage, 'description', fatal=False)
|
|
||||||
thumbnail = self._html_search_regex(
|
|
||||||
r'\'previewer.url\'\s*:\s*"([^"]*)"',
|
|
||||||
webpage, 'thumbnail', fatal=False)
|
|
||||||
|
|
||||||
categories_str = self._html_search_regex(
|
tags = [tag.strip() for tag in video['tags'].split(',')] if video.get('tags') else None
|
||||||
r'<meta name="keywords" content="([^"]+)"', webpage, 'categories', fatal=False)
|
|
||||||
categories = (
|
|
||||||
None if categories_str is None
|
|
||||||
else categories_str.split(','))
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
'thumbnail': thumbnail,
|
'timestamp': timestamp,
|
||||||
'categories': categories,
|
'duration': duration,
|
||||||
|
'tags': tags,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'age_limit': 18,
|
'age_limit': self._rta_search(webpage),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from ..utils import url_basename
|
|||||||
|
|
||||||
|
|
||||||
class BehindKinkIE(InfoExtractor):
|
class BehindKinkIE(InfoExtractor):
|
||||||
_VALID_URL = r'http://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)'
|
_VALID_URL = r'https?://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.behindkink.com/2014/12/05/what-are-you-passionate-about-marley-blaze/',
|
'url': 'http://www.behindkink.com/2014/12/05/what-are-you-passionate-about-marley-blaze/',
|
||||||
'md5': '507b57d8fdcd75a41a9a7bdb7989c762',
|
'md5': '507b57d8fdcd75a41a9a7bdb7989c762',
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse
|
from ..compat import compat_urllib_parse_unquote
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
xpath_text,
|
xpath_text,
|
||||||
xpath_with_ns,
|
xpath_with_ns,
|
||||||
@@ -57,7 +57,7 @@ class BetIE(InfoExtractor):
|
|||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
media_url = compat_urllib_parse.unquote(self._search_regex(
|
media_url = compat_urllib_parse_unquote(self._search_regex(
|
||||||
[r'mediaURL\s*:\s*"([^"]+)"', r"var\s+mrssMediaUrl\s*=\s*'([^']+)'"],
|
[r'mediaURL\s*:\s*"([^"]+)"', r"var\s+mrssMediaUrl\s*=\s*'([^']+)'"],
|
||||||
webpage, 'media URL'))
|
webpage, 'media URL'))
|
||||||
|
|
||||||
@@ -94,6 +94,7 @@ class BetIE(InfoExtractor):
|
|||||||
xpath_with_ns('./media:thumbnail', NS_MAP)).get('url')
|
xpath_with_ns('./media:thumbnail', NS_MAP)).get('url')
|
||||||
|
|
||||||
formats = self._extract_smil_formats(smil_url, display_id)
|
formats = self._extract_smil_formats(smil_url, display_id)
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
|||||||
85
youtube_dl/extractor/bigflix.py
Normal file
85
youtube_dl/extractor/bigflix.py
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_urllib_parse_unquote
|
||||||
|
|
||||||
|
|
||||||
|
class BigflixIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?bigflix\.com/.+/(?P<id>[0-9]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.bigflix.com/Hindi-movies/Action-movies/Singham-Returns/16537',
|
||||||
|
'md5': 'ec76aa9b1129e2e5b301a474e54fab74',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '16537',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Singham Returns',
|
||||||
|
'description': 'md5:3d2ba5815f14911d5cc6a501ae0cf65d',
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
# 2 formats
|
||||||
|
'url': 'http://www.bigflix.com/Tamil-movies/Drama-movies/Madarasapatinam/16070',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '16070',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Madarasapatinam',
|
||||||
|
'description': 'md5:63b9b8ed79189c6f0418c26d9a3452ca',
|
||||||
|
'formats': 'mincount:2',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
# multiple formats
|
||||||
|
'url': 'http://www.bigflix.com/Malayalam-movies/Drama-movies/Indian-Rupee/15967',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'<div[^>]+class=["\']pagetitle["\'][^>]*>(.+?)</div>',
|
||||||
|
webpage, 'title')
|
||||||
|
|
||||||
|
def decode_url(quoted_b64_url):
|
||||||
|
return base64.b64decode(compat_urllib_parse_unquote(
|
||||||
|
quoted_b64_url).encode('ascii')).decode('utf-8')
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for height, encoded_url in re.findall(
|
||||||
|
r'ContentURL_(\d{3,4})[pP][^=]+=([^&]+)', webpage):
|
||||||
|
video_url = decode_url(encoded_url)
|
||||||
|
f = {
|
||||||
|
'url': video_url,
|
||||||
|
'format_id': '%sp' % height,
|
||||||
|
'height': int(height),
|
||||||
|
}
|
||||||
|
if video_url.startswith('rtmp'):
|
||||||
|
f['ext'] = 'flv'
|
||||||
|
formats.append(f)
|
||||||
|
|
||||||
|
file_url = self._search_regex(
|
||||||
|
r'file=([^&]+)', webpage, 'video url', default=None)
|
||||||
|
if file_url:
|
||||||
|
video_url = decode_url(file_url)
|
||||||
|
if all(f['url'] != video_url for f in formats):
|
||||||
|
formats.append({
|
||||||
|
'url': decode_url(file_url),
|
||||||
|
})
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
description = self._html_search_meta('description', webpage)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'formats': formats
|
||||||
|
}
|
||||||
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
fix_xml_ampersands,
|
unescapeHTML,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -17,26 +17,24 @@ class BildIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '38184146',
|
'id': '38184146',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'BILD hat sie getestet',
|
'title': 'Das können die neuen iPads',
|
||||||
|
'description': 'md5:a4058c4fa2a804ab59c00d7244bbf62f',
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
'duration': 196,
|
'duration': 196,
|
||||||
'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle präsentiert. BILD-Reporter Sven Stein durfte die Geräte bereits testen. ',
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml"
|
video_data = self._download_json(
|
||||||
doc = self._download_xml(xml_url, video_id, transform_source=fix_xml_ampersands)
|
url.split('.bild.html')[0] + ',view=json.bild.html', video_id)
|
||||||
|
|
||||||
duration = int_or_none(doc.attrib.get('duration'), scale=1000)
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': doc.attrib['ueberschrift'],
|
'title': unescapeHTML(video_data['title']).strip(),
|
||||||
'description': doc.attrib.get('text'),
|
'description': unescapeHTML(video_data.get('description')),
|
||||||
'url': doc.attrib['src'],
|
'url': video_data['clipList'][0]['srces'][0]['src'],
|
||||||
'thumbnail': doc.attrib.get('img'),
|
'thumbnail': video_data.get('poster'),
|
||||||
'duration': duration,
|
'duration': int_or_none(video_data.get('durationSec')),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,135 +1,153 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import calendar
|
||||||
|
import datetime
|
||||||
import re
|
import re
|
||||||
import itertools
|
|
||||||
import json
|
|
||||||
import xml.etree.ElementTree as ET
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import (
|
||||||
|
compat_etree_fromstring,
|
||||||
|
compat_str,
|
||||||
|
compat_parse_qs,
|
||||||
|
compat_xml_parse_error,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
|
||||||
unified_strdate,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
int_or_none,
|
||||||
|
float_or_none,
|
||||||
|
xpath_text,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class BiliBiliIE(InfoExtractor):
|
class BiliBiliIE(InfoExtractor):
|
||||||
_VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>[0-9]+)/'
|
_VALID_URL = r'https?://www\.bilibili\.(?:tv|com)/video/av(?P<id>\d+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.bilibili.tv/video/av1074402/',
|
'url': 'http://www.bilibili.tv/video/av1074402/',
|
||||||
'md5': '2c301e4dab317596e837c3e7633e7d86',
|
'md5': '5f7d29e1a2872f3df0cf76b1f87d3788',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1074402_part1',
|
'id': '1554319',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'title': '【金坷垃】金泡沫',
|
'title': '【金坷垃】金泡沫',
|
||||||
'duration': 308,
|
'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
|
||||||
|
'duration': 308.067,
|
||||||
|
'timestamp': 1398012660,
|
||||||
'upload_date': '20140420',
|
'upload_date': '20140420',
|
||||||
'thumbnail': 're:^https?://.+\.jpg',
|
'thumbnail': 're:^https?://.+\.jpg',
|
||||||
|
'uploader': '菊子桑',
|
||||||
|
'uploader_id': '156160',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.bilibili.com/video/av1041170/',
|
'url': 'http://www.bilibili.com/video/av1041170/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1041170',
|
'id': '1041170',
|
||||||
'title': '【BD1080P】刀语【诸神&异域】',
|
'title': '【BD1080P】刀语【诸神&异域】',
|
||||||
|
'description': '这是个神奇的故事~每个人不留弹幕不给走哦~切利哦!~',
|
||||||
},
|
},
|
||||||
'playlist_count': 9,
|
'playlist_count': 9,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
# BiliBili blocks keys from time to time. The current key is extracted from
|
||||||
|
# the Android client
|
||||||
|
# TODO: find the sign algorithm used in the flash player
|
||||||
|
_APP_KEY = '86385cdc024c0f6c'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
if self._search_regex(r'(此视频不存在或被删除)', webpage, 'error message', default=None):
|
params = compat_parse_qs(self._search_regex(
|
||||||
raise ExtractorError('The video does not exist or was deleted', expected=True)
|
[r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)',
|
||||||
video_code = self._search_regex(
|
r'<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'],
|
||||||
r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code')
|
webpage, 'player parameters'))
|
||||||
|
cid = params['cid'][0]
|
||||||
|
|
||||||
title = self._html_search_meta(
|
info_xml_str = self._download_webpage(
|
||||||
'media:title', video_code, 'title', fatal=True)
|
'http://interface.bilibili.com/v_cdn_play',
|
||||||
duration_str = self._html_search_meta(
|
cid, query={'appkey': self._APP_KEY, 'cid': cid},
|
||||||
'duration', video_code, 'duration')
|
note='Downloading video info page')
|
||||||
if duration_str is None:
|
|
||||||
duration = None
|
err_msg = None
|
||||||
|
durls = None
|
||||||
|
info_xml = None
|
||||||
|
try:
|
||||||
|
info_xml = compat_etree_fromstring(info_xml_str.encode('utf-8'))
|
||||||
|
except compat_xml_parse_error:
|
||||||
|
info_json = self._parse_json(info_xml_str, video_id, fatal=False)
|
||||||
|
err_msg = (info_json or {}).get('error_text')
|
||||||
else:
|
else:
|
||||||
duration_mobj = re.match(
|
err_msg = xpath_text(info_xml, './message')
|
||||||
r'^T(?:(?P<hours>[0-9]+)H)?(?P<minutes>[0-9]+)M(?P<seconds>[0-9]+)S$',
|
|
||||||
duration_str)
|
|
||||||
duration = (
|
|
||||||
int_or_none(duration_mobj.group('hours'), default=0) * 3600 +
|
|
||||||
int(duration_mobj.group('minutes')) * 60 +
|
|
||||||
int(duration_mobj.group('seconds')))
|
|
||||||
upload_date = unified_strdate(self._html_search_meta(
|
|
||||||
'uploadDate', video_code, fatal=False))
|
|
||||||
thumbnail = self._html_search_meta(
|
|
||||||
'thumbnailUrl', video_code, 'thumbnail', fatal=False)
|
|
||||||
|
|
||||||
cid = self._search_regex(r'cid=(\d+)', webpage, 'cid')
|
if info_xml is not None:
|
||||||
|
durls = info_xml.findall('./durl')
|
||||||
|
if not durls:
|
||||||
|
if err_msg:
|
||||||
|
raise ExtractorError('%s said: %s' % (self.IE_NAME, err_msg), expected=True)
|
||||||
|
else:
|
||||||
|
raise ExtractorError('No videos found!')
|
||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
|
|
||||||
lq_page = self._download_webpage(
|
for durl in durls:
|
||||||
'http://interface.bilibili.com/v_cdn_play?appkey=1&cid=%s' % cid,
|
size = xpath_text(durl, ['./filesize', './size'])
|
||||||
video_id,
|
|
||||||
note='Downloading LQ video info'
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
err_info = json.loads(lq_page)
|
|
||||||
raise ExtractorError(
|
|
||||||
'BiliBili said: ' + err_info['error_text'], expected=True)
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
lq_doc = ET.fromstring(lq_page)
|
|
||||||
lq_durls = lq_doc.findall('./durl')
|
|
||||||
|
|
||||||
hq_doc = self._download_xml(
|
|
||||||
'http://interface.bilibili.com/playurl?appkey=1&cid=%s' % cid,
|
|
||||||
video_id,
|
|
||||||
note='Downloading HQ video info',
|
|
||||||
fatal=False,
|
|
||||||
)
|
|
||||||
if hq_doc is not False:
|
|
||||||
hq_durls = hq_doc.findall('./durl')
|
|
||||||
assert len(lq_durls) == len(hq_durls)
|
|
||||||
else:
|
|
||||||
hq_durls = itertools.repeat(None)
|
|
||||||
|
|
||||||
i = 1
|
|
||||||
for lq_durl, hq_durl in zip(lq_durls, hq_durls):
|
|
||||||
formats = [{
|
formats = [{
|
||||||
'format_id': 'lq',
|
'url': durl.find('./url').text,
|
||||||
'quality': 1,
|
'filesize': int_or_none(size),
|
||||||
'url': lq_durl.find('./url').text,
|
|
||||||
'filesize': int_or_none(
|
|
||||||
lq_durl.find('./size'), get_attr='text'),
|
|
||||||
}]
|
}]
|
||||||
if hq_durl is not None:
|
for backup_url in durl.findall('./backup_url/url'):
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': 'hq',
|
'url': backup_url.text,
|
||||||
'quality': 2,
|
# backup URLs have lower priorities
|
||||||
'ext': 'flv',
|
'preference': -2 if 'hd.mp4' in backup_url.text else -3,
|
||||||
'url': hq_durl.find('./url').text,
|
|
||||||
'filesize': int_or_none(
|
|
||||||
hq_durl.find('./size'), get_attr='text'),
|
|
||||||
})
|
})
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
entries.append({
|
entries.append({
|
||||||
'id': '%s_part%d' % (video_id, i),
|
'id': '%s_part%s' % (cid, xpath_text(durl, './order')),
|
||||||
'title': title,
|
'duration': int_or_none(xpath_text(durl, './length'), 1000),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'duration': duration,
|
|
||||||
'upload_date': upload_date,
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
i += 1
|
title = self._html_search_regex('<h1[^>]+title="([^"]+)">', webpage, 'title')
|
||||||
|
description = self._html_search_meta('description', webpage)
|
||||||
|
datetime_str = self._html_search_regex(
|
||||||
|
r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time', fatal=False)
|
||||||
|
if datetime_str:
|
||||||
|
timestamp = calendar.timegm(datetime.datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M').timetuple())
|
||||||
|
|
||||||
return {
|
# TODO 'view_count' requires deobfuscating Javascript
|
||||||
'_type': 'multi_video',
|
info = {
|
||||||
'entries': entries,
|
'id': compat_str(cid),
|
||||||
'id': video_id,
|
'title': title,
|
||||||
'title': title
|
'description': description,
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'thumbnail': self._html_search_meta('thumbnailUrl', webpage),
|
||||||
|
'duration': float_or_none(xpath_text(info_xml, './timelength'), scale=1000),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uploader_mobj = re.search(
|
||||||
|
r'<a[^>]+href="https?://space\.bilibili\.com/(?P<id>\d+)"[^>]+title="(?P<name>[^"]+)"',
|
||||||
|
webpage)
|
||||||
|
if uploader_mobj:
|
||||||
|
info.update({
|
||||||
|
'uploader': uploader_mobj.group('name'),
|
||||||
|
'uploader_id': uploader_mobj.group('id'),
|
||||||
|
})
|
||||||
|
|
||||||
|
for entry in entries:
|
||||||
|
entry.update(info)
|
||||||
|
|
||||||
|
if len(entries) == 1:
|
||||||
|
return entries[0]
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
'_type': 'multi_video',
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'entries': entries,
|
||||||
|
}
|
||||||
|
|||||||
86
youtube_dl/extractor/biobiochiletv.py
Normal file
86
youtube_dl/extractor/biobiochiletv.py
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import remove_end
|
||||||
|
|
||||||
|
|
||||||
|
class BioBioChileTVIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://tv\.biobiochile\.cl/notas/(?:[^/]+/)+(?P<id>[^/]+)\.shtml'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://tv.biobiochile.cl/notas/2015/10/21/sobre-camaras-y-camarillas-parlamentarias.shtml',
|
||||||
|
'md5': '26f51f03cf580265defefb4518faec09',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'sobre-camaras-y-camarillas-parlamentarias',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Sobre Cámaras y camarillas parlamentarias',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'uploader': 'Fernando Atria',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# different uploader layout
|
||||||
|
'url': 'http://tv.biobiochile.cl/notas/2016/03/18/natalia-valdebenito-repasa-a-diputado-hasbun-paso-a-la-categoria-de-hablar-brutalidades.shtml',
|
||||||
|
'md5': 'edc2e6b58974c46d5b047dea3c539ff3',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'natalia-valdebenito-repasa-a-diputado-hasbun-paso-a-la-categoria-de-hablar-brutalidades',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Natalia Valdebenito repasa a diputado Hasbún: Pasó a la categoría de hablar brutalidades',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'uploader': 'Piangella Obrador',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://tv.biobiochile.cl/notas/2015/10/22/ninos-transexuales-de-quien-es-la-decision.shtml',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://tv.biobiochile.cl/notas/2015/10/21/exclusivo-hector-pinto-formador-de-chupete-revela-version-del-ex-delantero-albo.shtml',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
title = remove_end(self._og_search_title(webpage), ' - BioBioChile TV')
|
||||||
|
|
||||||
|
file_url = self._search_regex(
|
||||||
|
r'loadFWPlayerVideo\([^,]+,\s*(["\'])(?P<url>.+?)\1',
|
||||||
|
webpage, 'file url', group='url')
|
||||||
|
|
||||||
|
base_url = self._search_regex(
|
||||||
|
r'file\s*:\s*(["\'])(?P<url>.+?)\1\s*\+\s*fileURL', webpage,
|
||||||
|
'base url', default='http://unlimited2-cl.digitalproserver.com/bbtv/',
|
||||||
|
group='url')
|
||||||
|
|
||||||
|
formats = self._extract_m3u8_formats(
|
||||||
|
'%s%s/playlist.m3u8' % (base_url, file_url), video_id, 'mp4',
|
||||||
|
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
|
||||||
|
f = {
|
||||||
|
'url': '%s%s' % (base_url, file_url),
|
||||||
|
'format_id': 'http',
|
||||||
|
'protocol': 'http',
|
||||||
|
'preference': 1,
|
||||||
|
}
|
||||||
|
if formats:
|
||||||
|
f_copy = formats[-1].copy()
|
||||||
|
f_copy.update(f)
|
||||||
|
f = f_copy
|
||||||
|
formats.append(f)
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
uploader = self._html_search_regex(
|
||||||
|
r'<a[^>]+href=["\']https?://busca\.biobiochile\.cl/author[^>]+>(.+?)</a>',
|
||||||
|
webpage, 'uploader', fatal=False)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'uploader': uploader,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
39
youtube_dl/extractor/biqle.py
Normal file
39
youtube_dl/extractor/biqle.py
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class BIQLEIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?biqle\.(?:com|org|ru)/watch/(?P<id>-?\d+_\d+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.biqle.ru/watch/847655_160197695',
|
||||||
|
'md5': 'ad5f746a874ccded7b8f211aeea96637',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '160197695',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Foo Fighters - The Pretender (Live at Wembley Stadium)',
|
||||||
|
'uploader': 'Andrey Rogozin',
|
||||||
|
'upload_date': '20110605',
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://biqle.org/watch/-44781847_168547604',
|
||||||
|
'md5': '7f24e72af1db0edf7c1aaba513174f97',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '168547604',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Ребенок в шоке от автоматической мойки',
|
||||||
|
'uploader': 'Dmitry Kotov',
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
embed_url = self._proto_relative_url(self._search_regex(
|
||||||
|
r'<iframe.+?src="((?:http:)?//daxab\.com/[^"]+)".*?></iframe>', webpage, 'embed url'))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': embed_url,
|
||||||
|
}
|
||||||
110
youtube_dl/extractor/bleacherreport.py
Normal file
110
youtube_dl/extractor/bleacherreport.py
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from .amp import AMPIE
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
int_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class BleacherReportIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/articles/(?P<id>\d+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://bleacherreport.com/articles/2496438-fsu-stat-projections-is-jalen-ramsey-best-defensive-player-in-college-football',
|
||||||
|
'md5': 'a3ffc3dc73afdbc2010f02d98f990f20',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '2496438',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'FSU Stat Projections: Is Jalen Ramsey Best Defensive Player in College Football?',
|
||||||
|
'uploader_id': 3992341,
|
||||||
|
'description': 'CFB, ACC, Florida State',
|
||||||
|
'timestamp': 1434380212,
|
||||||
|
'upload_date': '20150615',
|
||||||
|
'uploader': 'Team Stream Now ',
|
||||||
|
},
|
||||||
|
'add_ie': ['Ooyala'],
|
||||||
|
}, {
|
||||||
|
'url': 'http://bleacherreport.com/articles/2586817-aussie-golfers-get-fright-of-their-lives-after-being-chased-by-angry-kangaroo',
|
||||||
|
'md5': '6a5cd403418c7b01719248ca97fb0692',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '2586817',
|
||||||
|
'ext': 'webm',
|
||||||
|
'title': 'Aussie Golfers Get Fright of Their Lives After Being Chased by Angry Kangaroo',
|
||||||
|
'timestamp': 1446839961,
|
||||||
|
'uploader': 'Sean Fay',
|
||||||
|
'description': 'md5:825e94e0f3521df52fa83b2ed198fa20',
|
||||||
|
'uploader_id': 6466954,
|
||||||
|
'upload_date': '20151011',
|
||||||
|
},
|
||||||
|
'add_ie': ['Youtube'],
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
article_id = self._match_id(url)
|
||||||
|
|
||||||
|
article_data = self._download_json('http://api.bleacherreport.com/api/v1/articles/%s' % article_id, article_id)['article']
|
||||||
|
|
||||||
|
thumbnails = []
|
||||||
|
primary_photo = article_data.get('primaryPhoto')
|
||||||
|
if primary_photo:
|
||||||
|
thumbnails = [{
|
||||||
|
'url': primary_photo['url'],
|
||||||
|
'width': primary_photo.get('width'),
|
||||||
|
'height': primary_photo.get('height'),
|
||||||
|
}]
|
||||||
|
|
||||||
|
info = {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'id': article_id,
|
||||||
|
'title': article_data['title'],
|
||||||
|
'uploader': article_data.get('author', {}).get('name'),
|
||||||
|
'uploader_id': article_data.get('authorId'),
|
||||||
|
'timestamp': parse_iso8601(article_data.get('createdAt')),
|
||||||
|
'thumbnails': thumbnails,
|
||||||
|
'comment_count': int_or_none(article_data.get('commentsCount')),
|
||||||
|
'view_count': int_or_none(article_data.get('hitCount')),
|
||||||
|
}
|
||||||
|
|
||||||
|
video = article_data.get('video')
|
||||||
|
if video:
|
||||||
|
video_type = video['type']
|
||||||
|
if video_type == 'cms.bleacherreport.com':
|
||||||
|
info['url'] = 'http://bleacherreport.com/video_embed?id=%s' % video['id']
|
||||||
|
elif video_type == 'ooyala.com':
|
||||||
|
info['url'] = 'ooyala:%s' % video['id']
|
||||||
|
elif video_type == 'youtube.com':
|
||||||
|
info['url'] = video['id']
|
||||||
|
elif video_type == 'vine.co':
|
||||||
|
info['url'] = 'https://vine.co/v/%s' % video['id']
|
||||||
|
else:
|
||||||
|
info['url'] = video_type + video['id']
|
||||||
|
return info
|
||||||
|
else:
|
||||||
|
raise ExtractorError('no video in the article', expected=True)
|
||||||
|
|
||||||
|
|
||||||
|
class BleacherReportCMSIE(AMPIE):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/video_embed\?id=(?P<id>[0-9a-f-]{36})'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://bleacherreport.com/video_embed?id=8fd44c2f-3dc5-4821-9118-2c825a98c0e1',
|
||||||
|
'md5': '8c2c12e3af7805152675446c905d159b',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '8fd44c2f-3dc5-4821-9118-2c825a98c0e1',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Cena vs. Rollins Would Expose the Heavyweight Division',
|
||||||
|
'description': 'md5:984afb4ade2f9c0db35f3267ed88b36e',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
info = self._extract_feed_info('http://cms.bleacherreport.com/media/items/%s/akamai.json' % video_id)
|
||||||
|
info['id'] = video_id
|
||||||
|
return info
|
||||||
@@ -1,278 +0,0 @@
|
|||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
|
|
||||||
from ..compat import (
|
|
||||||
compat_str,
|
|
||||||
compat_urllib_request,
|
|
||||||
compat_urlparse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
|
||||||
clean_html,
|
|
||||||
int_or_none,
|
|
||||||
parse_iso8601,
|
|
||||||
unescapeHTML,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class BlipTVIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:\w+\.)?blip\.tv/(?:(?:.+-|rss/flash/)(?P<id>\d+)|((?:play/|api\.swf#)(?P<lookup_id>[\da-zA-Z+_]+)))'
|
|
||||||
|
|
||||||
_TESTS = [
|
|
||||||
{
|
|
||||||
'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
|
|
||||||
'md5': 'c6934ad0b6acf2bd920720ec888eb812',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '5779306',
|
|
||||||
'ext': 'mov',
|
|
||||||
'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3',
|
|
||||||
'description': 'md5:9bc31f227219cde65e47eeec8d2dc596',
|
|
||||||
'timestamp': 1323138843,
|
|
||||||
'upload_date': '20111206',
|
|
||||||
'uploader': 'cbr',
|
|
||||||
'uploader_id': '679425',
|
|
||||||
'duration': 81,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
# https://github.com/rg3/youtube-dl/pull/2274
|
|
||||||
'note': 'Video with subtitles',
|
|
||||||
'url': 'http://blip.tv/play/h6Uag5OEVgI.html',
|
|
||||||
'md5': '309f9d25b820b086ca163ffac8031806',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '6586561',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Red vs. Blue Season 11 Episode 1',
|
|
||||||
'description': 'One-Zero-One',
|
|
||||||
'timestamp': 1371261608,
|
|
||||||
'upload_date': '20130615',
|
|
||||||
'uploader': 'redvsblue',
|
|
||||||
'uploader_id': '792887',
|
|
||||||
'duration': 279,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
# https://bugzilla.redhat.com/show_bug.cgi?id=967465
|
|
||||||
'url': 'http://a.blip.tv/api.swf#h6Uag5KbVwI',
|
|
||||||
'md5': '314e87b1ebe7a48fcbfdd51b791ce5a6',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '6573122',
|
|
||||||
'ext': 'mov',
|
|
||||||
'upload_date': '20130520',
|
|
||||||
'description': 'Two hapless space marines argue over what to do when they realize they have an astronomically huge problem on their hands.',
|
|
||||||
'title': 'Red vs. Blue Season 11 Trailer',
|
|
||||||
'timestamp': 1369029609,
|
|
||||||
'uploader': 'redvsblue',
|
|
||||||
'uploader_id': '792887',
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'url': 'http://blip.tv/play/gbk766dkj4Yn',
|
|
||||||
'md5': 'fe0a33f022d49399a241e84a8ea8b8e3',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '1749452',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'upload_date': '20090208',
|
|
||||||
'description': 'Witness the first appearance of the Nostalgia Critic character, as Doug reviews the movie Transformers.',
|
|
||||||
'title': 'Nostalgia Critic: Transformers',
|
|
||||||
'timestamp': 1234068723,
|
|
||||||
'uploader': 'NostalgiaCritic',
|
|
||||||
'uploader_id': '246467',
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
# https://github.com/rg3/youtube-dl/pull/4404
|
|
||||||
'note': 'Audio only',
|
|
||||||
'url': 'http://blip.tv/hilarios-productions/weekly-manga-recap-kingdom-7119982',
|
|
||||||
'md5': '76c0a56f24e769ceaab21fbb6416a351',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '7103299',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': 'Weekly Manga Recap: Kingdom',
|
|
||||||
'description': 'And then Shin breaks the enemy line, and he's all like HWAH! And then he slices a guy and it's all like FWASHING! And... it's really hard to describe the best parts of this series without breaking down into sound effects, okay?',
|
|
||||||
'timestamp': 1417660321,
|
|
||||||
'upload_date': '20141204',
|
|
||||||
'uploader': 'The Rollo T',
|
|
||||||
'uploader_id': '407429',
|
|
||||||
'duration': 7251,
|
|
||||||
'vcodec': 'none',
|
|
||||||
}
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _extract_url(webpage):
|
|
||||||
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
|
|
||||||
if mobj:
|
|
||||||
return 'http://blip.tv/a/a-' + mobj.group(1)
|
|
||||||
mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage)
|
|
||||||
if mobj:
|
|
||||||
return mobj.group(1)
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
lookup_id = mobj.group('lookup_id')
|
|
||||||
|
|
||||||
# See https://github.com/rg3/youtube-dl/issues/857 and
|
|
||||||
# https://github.com/rg3/youtube-dl/issues/4197
|
|
||||||
if lookup_id:
|
|
||||||
urlh = self._request_webpage(
|
|
||||||
'http://blip.tv/play/%s' % lookup_id, lookup_id, 'Resolving lookup id')
|
|
||||||
url = compat_urlparse.urlparse(urlh.geturl())
|
|
||||||
qs = compat_urlparse.parse_qs(url.query)
|
|
||||||
mobj = re.match(self._VALID_URL, qs['file'][0])
|
|
||||||
|
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
rss = self._download_xml('http://blip.tv/rss/flash/%s' % video_id, video_id, 'Downloading video RSS')
|
|
||||||
|
|
||||||
def blip(s):
|
|
||||||
return '{http://blip.tv/dtd/blip/1.0}%s' % s
|
|
||||||
|
|
||||||
def media(s):
|
|
||||||
return '{http://search.yahoo.com/mrss/}%s' % s
|
|
||||||
|
|
||||||
def itunes(s):
|
|
||||||
return '{http://www.itunes.com/dtds/podcast-1.0.dtd}%s' % s
|
|
||||||
|
|
||||||
item = rss.find('channel/item')
|
|
||||||
|
|
||||||
video_id = item.find(blip('item_id')).text
|
|
||||||
title = item.find('./title').text
|
|
||||||
description = clean_html(compat_str(item.find(blip('puredescription')).text))
|
|
||||||
timestamp = parse_iso8601(item.find(blip('datestamp')).text)
|
|
||||||
uploader = item.find(blip('user')).text
|
|
||||||
uploader_id = item.find(blip('userid')).text
|
|
||||||
duration = int(item.find(blip('runtime')).text)
|
|
||||||
media_thumbnail = item.find(media('thumbnail'))
|
|
||||||
thumbnail = media_thumbnail.get('url') if media_thumbnail is not None else item.find(itunes('image')).text
|
|
||||||
categories = [category.text for category in item.findall('category')]
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
subtitles_urls = {}
|
|
||||||
|
|
||||||
media_group = item.find(media('group'))
|
|
||||||
for media_content in media_group.findall(media('content')):
|
|
||||||
url = media_content.get('url')
|
|
||||||
role = media_content.get(blip('role'))
|
|
||||||
msg = self._download_webpage(
|
|
||||||
url + '?showplayer=20140425131715&referrer=http://blip.tv&mask=7&skin=flashvars&view=url',
|
|
||||||
video_id, 'Resolving URL for %s' % role)
|
|
||||||
real_url = compat_urlparse.parse_qs(msg.strip())['message'][0]
|
|
||||||
|
|
||||||
media_type = media_content.get('type')
|
|
||||||
if media_type == 'text/srt' or url.endswith('.srt'):
|
|
||||||
LANGS = {
|
|
||||||
'english': 'en',
|
|
||||||
}
|
|
||||||
lang = role.rpartition('-')[-1].strip().lower()
|
|
||||||
langcode = LANGS.get(lang, lang)
|
|
||||||
subtitles_urls[langcode] = url
|
|
||||||
elif media_type.startswith('video/'):
|
|
||||||
formats.append({
|
|
||||||
'url': real_url,
|
|
||||||
'format_id': role,
|
|
||||||
'format_note': media_type,
|
|
||||||
'vcodec': media_content.get(blip('vcodec')) or 'none',
|
|
||||||
'acodec': media_content.get(blip('acodec')),
|
|
||||||
'filesize': media_content.get('filesize'),
|
|
||||||
'width': int_or_none(media_content.get('width')),
|
|
||||||
'height': int_or_none(media_content.get('height')),
|
|
||||||
})
|
|
||||||
self._check_formats(formats, video_id)
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
subtitles = self.extract_subtitles(video_id, subtitles_urls)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'description': description,
|
|
||||||
'timestamp': timestamp,
|
|
||||||
'uploader': uploader,
|
|
||||||
'uploader_id': uploader_id,
|
|
||||||
'duration': duration,
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
'categories': categories,
|
|
||||||
'formats': formats,
|
|
||||||
'subtitles': subtitles,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _get_subtitles(self, video_id, subtitles_urls):
|
|
||||||
subtitles = {}
|
|
||||||
for lang, url in subtitles_urls.items():
|
|
||||||
# For some weird reason, blip.tv serves a video instead of subtitles
|
|
||||||
# when we request with a common UA
|
|
||||||
req = compat_urllib_request.Request(url)
|
|
||||||
req.add_header('User-Agent', 'youtube-dl')
|
|
||||||
subtitles[lang] = [{
|
|
||||||
# The extension is 'srt' but it's actually an 'ass' file
|
|
||||||
'ext': 'ass',
|
|
||||||
'data': self._download_webpage(req, None, note=False),
|
|
||||||
}]
|
|
||||||
return subtitles
|
|
||||||
|
|
||||||
|
|
||||||
class BlipTVUserIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'(?:(?:https?://(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$'
|
|
||||||
_PAGE_SIZE = 12
|
|
||||||
IE_NAME = 'blip.tv:user'
|
|
||||||
_TEST = {
|
|
||||||
'url': 'http://blip.tv/actone',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'actone',
|
|
||||||
'title': 'Act One: The Series',
|
|
||||||
},
|
|
||||||
'playlist_count': 5,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
username = mobj.group(1)
|
|
||||||
|
|
||||||
page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
|
|
||||||
|
|
||||||
page = self._download_webpage(url, username, 'Downloading user page')
|
|
||||||
mobj = re.search(r'data-users-id="([^"]+)"', page)
|
|
||||||
page_base = page_base % mobj.group(1)
|
|
||||||
title = self._og_search_title(page)
|
|
||||||
|
|
||||||
# Download video ids using BlipTV Ajax calls. Result size per
|
|
||||||
# query is limited (currently to 12 videos) so we need to query
|
|
||||||
# page by page until there are no video ids - it means we got
|
|
||||||
# all of them.
|
|
||||||
|
|
||||||
video_ids = []
|
|
||||||
pagenum = 1
|
|
||||||
|
|
||||||
while True:
|
|
||||||
url = page_base + "&page=" + str(pagenum)
|
|
||||||
page = self._download_webpage(
|
|
||||||
url, username, 'Downloading video ids from page %d' % pagenum)
|
|
||||||
|
|
||||||
# Extract video identifiers
|
|
||||||
ids_in_page = []
|
|
||||||
|
|
||||||
for mobj in re.finditer(r'href="/([^"]+)"', page):
|
|
||||||
if mobj.group(1) not in ids_in_page:
|
|
||||||
ids_in_page.append(unescapeHTML(mobj.group(1)))
|
|
||||||
|
|
||||||
video_ids.extend(ids_in_page)
|
|
||||||
|
|
||||||
# A little optimization - if current page is not
|
|
||||||
# "full", ie. does not contain PAGE_SIZE video ids then
|
|
||||||
# we can assume that this page is the last one - there
|
|
||||||
# are no more ids on further pages - no need to query
|
|
||||||
# again.
|
|
||||||
|
|
||||||
if len(ids_in_page) < self._PAGE_SIZE:
|
|
||||||
break
|
|
||||||
|
|
||||||
pagenum += 1
|
|
||||||
|
|
||||||
urls = ['http://blip.tv/%s' % video_id for video_id in video_ids]
|
|
||||||
url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
|
|
||||||
return self.playlist_result(
|
|
||||||
url_entries, playlist_title=title, playlist_id=username)
|
|
||||||
@@ -6,9 +6,9 @@ from .common import InfoExtractor
|
|||||||
|
|
||||||
|
|
||||||
class BloombergIE(InfoExtractor):
|
class BloombergIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://www\.bloomberg\.com/news/videos/[^/]+/(?P<id>[^/?#]+)'
|
_VALID_URL = r'https?://(?:www\.)?bloomberg\.com/(?:[^/]+/)*(?P<id>[^/?#]+)'
|
||||||
|
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.bloomberg.com/news/videos/b/aaeae121-5949-481e-a1ce-4562db6f5df2',
|
'url': 'http://www.bloomberg.com/news/videos/b/aaeae121-5949-481e-a1ce-4562db6f5df2',
|
||||||
# The md5 checksum changes
|
# The md5 checksum changes
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -17,22 +17,38 @@ class BloombergIE(InfoExtractor):
|
|||||||
'title': 'Shah\'s Presentation on Foreign-Exchange Strategies',
|
'title': 'Shah\'s Presentation on Foreign-Exchange Strategies',
|
||||||
'description': 'md5:a8ba0302912d03d246979735c17d2761',
|
'description': 'md5:a8ba0302912d03d246979735c17d2761',
|
||||||
},
|
},
|
||||||
}
|
'params': {
|
||||||
|
'format': 'best[format_id^=hds]',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bloomberg.com/news/articles/2015-11-12/five-strange-things-that-have-been-happening-in-financial-markets',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bloomberg.com/politics/videos/2015-11-25/karl-rove-on-jeb-bush-s-struggles-stopping-trump',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
name = self._match_id(url)
|
name = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, name)
|
webpage = self._download_webpage(url, name)
|
||||||
video_id = self._search_regex(r'"bmmrId":"(.+?)"', webpage, 'id')
|
video_id = self._search_regex(
|
||||||
|
r'["\']bmmrId["\']\s*:\s*(["\'])(?P<url>.+?)\1',
|
||||||
|
webpage, 'id', group='url')
|
||||||
title = re.sub(': Video$', '', self._og_search_title(webpage))
|
title = re.sub(': Video$', '', self._og_search_title(webpage))
|
||||||
|
|
||||||
embed_info = self._download_json(
|
embed_info = self._download_json(
|
||||||
'http://www.bloomberg.com/api/embed?id=%s' % video_id, video_id)
|
'http://www.bloomberg.com/api/embed?id=%s' % video_id, video_id)
|
||||||
formats = []
|
formats = []
|
||||||
for stream in embed_info['streams']:
|
for stream in embed_info['streams']:
|
||||||
if stream["muxing_format"] == "TS":
|
stream_url = stream.get('url')
|
||||||
formats.extend(self._extract_m3u8_formats(stream['url'], video_id))
|
if not stream_url:
|
||||||
|
continue
|
||||||
|
if stream['muxing_format'] == 'TS':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
|
||||||
else:
|
else:
|
||||||
formats.extend(self._extract_f4m_formats(stream['url'], video_id))
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
stream_url, video_id, f4m_id='hds', fatal=False))
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user