mirror of
https://github.com/ytdl-org/youtube-dl
synced 2025-10-24 17:18:41 +09:00
Compare commits
1528 Commits
2013.05.04
...
rtmp_test
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
00b350d209 | ||
|
|
d8ec4959c8 | ||
|
|
d31209a144 | ||
|
|
529a2e2cc3 | ||
|
|
781a7d0546 | ||
|
|
fb04e40396 | ||
|
|
d9b011f201 | ||
|
|
b0b9eaa196 | ||
|
|
8b134b1062 | ||
|
|
0c75c3fa7a | ||
|
|
a3927cf7ee | ||
|
|
1a62c18f65 | ||
|
|
2a15e7063b | ||
|
|
d46cc192d7 | ||
|
|
bb2bebdbe1 | ||
|
|
5db07df634 | ||
|
|
ea36cbac5e | ||
|
|
d0d2b49ab7 | ||
|
|
31cb6d8fef | ||
|
|
daa0dd2973 | ||
|
|
de79c46c8f | ||
|
|
94ccb6fa2e | ||
|
|
07e4035879 | ||
|
|
d0efb9ec9a | ||
|
|
ac05067d3d | ||
|
|
113577e155 | ||
|
|
79d09f47c2 | ||
|
|
c059bdd432 | ||
|
|
02dbf93f0e | ||
|
|
1fb2bcbbf7 | ||
|
|
16e055849e | ||
|
|
66cfab4226 | ||
|
|
6d88bc37a3 | ||
|
|
b7553b2554 | ||
|
|
e03db0a077 | ||
|
|
a1ee09e815 | ||
|
|
267ed0c5d3 | ||
|
|
f459d17018 | ||
|
|
dc65dcbb6d | ||
|
|
d214fdb8fe | ||
|
|
138df537ff | ||
|
|
0c7c19d6bc | ||
|
|
eaaafc59c2 | ||
|
|
382ed50e0e | ||
|
|
66ec019240 | ||
|
|
bd49928f7a | ||
|
|
23e6d50d73 | ||
|
|
2e767313e4 | ||
|
|
38b2db6a66 | ||
|
|
13ebea791f | ||
|
|
4c9c57428f | ||
|
|
8bf9319e9c | ||
|
|
4914120727 | ||
|
|
36de0a0e1a | ||
|
|
e5c146d586 | ||
|
|
52ad14aeb0 | ||
|
|
43afe28588 | ||
|
|
a87b0615aa | ||
|
|
d7386f6276 | ||
|
|
081640940e | ||
|
|
7012b23c94 | ||
|
|
d3b30148ed | ||
|
|
9f79463803 | ||
|
|
d35dc6d3b5 | ||
|
|
50123be421 | ||
|
|
3f8ced5144 | ||
|
|
00ea0f11eb | ||
|
|
dca0872056 | ||
|
|
0b63aed8df | ||
|
|
15c3adbb16 | ||
|
|
f143a42fe6 | ||
|
|
241650c7ff | ||
|
|
bfe7439a20 | ||
|
|
cffa6aa107 | ||
|
|
02e4ebbbad | ||
|
|
ab009f59ef | ||
|
|
0980426559 | ||
|
|
b1c9c66936 | ||
|
|
a6a173c2fd | ||
|
|
2bb683c201 | ||
|
|
64bb5187f5 | ||
|
|
9e4f50a8ae | ||
|
|
0190eecc00 | ||
|
|
ca872a4c0b | ||
|
|
f2e87ef4fa | ||
|
|
0ad97bbc05 | ||
|
|
c4864091a1 | ||
|
|
9a98a466b3 | ||
|
|
f99e0f1ed6 | ||
|
|
d323bcb152 | ||
|
|
da6a795fdb | ||
|
|
c5edcde21f | ||
|
|
15ff3c831e | ||
|
|
100959a6d9 | ||
|
|
0a120f74b2 | ||
|
|
8f05351984 | ||
|
|
4eb92208a3 | ||
|
|
71791f414c | ||
|
|
f3682997d7 | ||
|
|
cc13cc0251 | ||
|
|
86bd5f2ca9 | ||
|
|
8694c60000 | ||
|
|
9d1538182f | ||
|
|
5904088811 | ||
|
|
69545c2aff | ||
|
|
495da337ae | ||
|
|
34b3afc7be | ||
|
|
00373a4c5d | ||
|
|
cb7dfeeac4 | ||
|
|
efd6c574a2 | ||
|
|
4113e6ab56 | ||
|
|
9a942a4671 | ||
|
|
9906d397a0 | ||
|
|
ae8f787141 | ||
|
|
a81b4d5c8f | ||
|
|
887c6acdf2 | ||
|
|
83aa529330 | ||
|
|
96b31b6533 | ||
|
|
fccd377198 | ||
|
|
2b35c9ef74 | ||
|
|
73c566695f | ||
|
|
63b7b7224a | ||
|
|
ce80c8b8ee | ||
|
|
749febf4d1 | ||
|
|
bdde425cbe | ||
|
|
746f491f82 | ||
|
|
1672647ade | ||
|
|
90b6bbc38c | ||
|
|
ce02ed60f2 | ||
|
|
1e5b9a95fd | ||
|
|
1d699755e0 | ||
|
|
ddf49c6344 | ||
|
|
ba3881dffd | ||
|
|
d1c252048b | ||
|
|
eab2724138 | ||
|
|
21ea3e06c9 | ||
|
|
52d703d3d1 | ||
|
|
ce152341a1 | ||
|
|
f058e34011 | ||
|
|
b5349e8721 | ||
|
|
7150858d49 | ||
|
|
91c7271aab | ||
|
|
aa13b2dffd | ||
|
|
fc2ef392be | ||
|
|
463a908705 | ||
|
|
d24ffe1cfa | ||
|
|
78fb87b283 | ||
|
|
ab2d524780 | ||
|
|
85d61685f1 | ||
|
|
b9643eed7c | ||
|
|
feee2ecfa9 | ||
|
|
a25a5cfeec | ||
|
|
0e145dd541 | ||
|
|
9f9be844fc | ||
|
|
e3b9ab5e18 | ||
|
|
c66d2baa9c | ||
|
|
08bc37cdd0 | ||
|
|
9771cceb2c | ||
|
|
ca715127a2 | ||
|
|
ea7a7af1d4 | ||
|
|
880e1c529d | ||
|
|
dcbb45803f | ||
|
|
80b9bbce86 | ||
|
|
d37936386f | ||
|
|
c3a3028f9f | ||
|
|
6c5ad80cdc | ||
|
|
b5bdc2699a | ||
|
|
384b98cd8f | ||
|
|
eb9b5bffef | ||
|
|
0bd59f3723 | ||
|
|
8b8cbd8f6d | ||
|
|
72b18c5d34 | ||
|
|
eb0a839866 | ||
|
|
1777d5a952 | ||
|
|
d4b7da84c3 | ||
|
|
801dbbdffd | ||
|
|
0ed05a1d2d | ||
|
|
1008bebade | ||
|
|
ae84f879d7 | ||
|
|
be6dfd1b49 | ||
|
|
231516b6c9 | ||
|
|
fb53d58dcf | ||
|
|
2a9e9b210b | ||
|
|
897d6cc43a | ||
|
|
f470c6c812 | ||
|
|
566d4e0425 | ||
|
|
81be02d2f9 | ||
|
|
c2b6a482d5 | ||
|
|
12c167c881 | ||
|
|
20aafee7fa | ||
|
|
be07375b66 | ||
|
|
4894fe8c5b | ||
|
|
dd5bcdc4c9 | ||
|
|
6161d17579 | ||
|
|
4ac5306ae7 | ||
|
|
b1a80ec1a9 | ||
|
|
672fe94dcb | ||
|
|
51040b72ed | ||
|
|
4f045eef8f | ||
|
|
5d7b253ea0 | ||
|
|
b0759f0c19 | ||
|
|
065472936a | ||
|
|
fc4a0c2aec | ||
|
|
eeb165e674 | ||
|
|
9ee2b5f6f2 | ||
|
|
da54be877a | ||
|
|
50a886b7ab | ||
|
|
76e67c2cb6 | ||
|
|
5137ebac0b | ||
|
|
a8eeb0597b | ||
|
|
4ed3e51080 | ||
|
|
7f34001d57 | ||
|
|
2dcf7d8f99 | ||
|
|
19b0668251 | ||
|
|
e7e6b54d8a | ||
|
|
2a1a8ffe41 | ||
|
|
08fb86c49b | ||
|
|
3633d77c0f | ||
|
|
165e179764 | ||
|
|
12ebdd1506 | ||
|
|
1baf9a5938 | ||
|
|
a56f9de156 | ||
|
|
fa5d47af4b | ||
|
|
d607038753 | ||
|
|
9ac6a01aaf | ||
|
|
be97abc247 | ||
|
|
9103bbc5cd | ||
|
|
b6c45014ae | ||
|
|
a3dd924871 | ||
|
|
137bbb3e37 | ||
|
|
86ad94bb2e | ||
|
|
3e56add7c9 | ||
|
|
f52f01b5d2 | ||
|
|
98d7efb537 | ||
|
|
cf51923545 | ||
|
|
38fcd4597a | ||
|
|
165e3bb67a | ||
|
|
38db46794f | ||
|
|
a9a3876d55 | ||
|
|
1f343eaabb | ||
|
|
72a5b4f702 | ||
|
|
0a43ddf320 | ||
|
|
31366066bd | ||
|
|
aa2484e390 | ||
|
|
8eddf3e91d | ||
|
|
60d142aa8d | ||
|
|
66cf3ac342 | ||
|
|
ab4e151347 | ||
|
|
ac2547f5ff | ||
|
|
5f1ea943ab | ||
|
|
0ef7ad5cd4 | ||
|
|
9f1109a564 | ||
|
|
33b1d9595d | ||
|
|
7193498811 | ||
|
|
72321ead7b | ||
|
|
b5d0d817bc | ||
|
|
94badb2599 | ||
|
|
b9a836515f | ||
|
|
21c924f406 | ||
|
|
e54fd4b23b | ||
|
|
57dd9a8f2f | ||
|
|
912cbf5d4e | ||
|
|
43d7895ea0 | ||
|
|
f7ff55aa78 | ||
|
|
795f28f871 | ||
|
|
f6cc16f5d8 | ||
|
|
321a01f971 | ||
|
|
646e17a53d | ||
|
|
dd508b7c4f | ||
|
|
2563bcc85c | ||
|
|
702665c085 | ||
|
|
dcc2a706ef | ||
|
|
2bc67c35ac | ||
|
|
77ae65877e | ||
|
|
32a35e4418 | ||
|
|
369a759acc | ||
|
|
79b3f61228 | ||
|
|
216d71d001 | ||
|
|
78a3a9f89e | ||
|
|
a7685f3bf4 | ||
|
|
f088ea5486 | ||
|
|
1003d108d5 | ||
|
|
8abeeb9449 | ||
|
|
c1002e96e9 | ||
|
|
77d0a82fef | ||
|
|
ebc14f251c | ||
|
|
d41e6efc85 | ||
|
|
8ffa13e03e | ||
|
|
db477d3a37 | ||
|
|
750e9833b8 | ||
|
|
82f0ac657c | ||
|
|
eb6a2277a2 | ||
|
|
f8778fb0fa | ||
|
|
e2f9de207c | ||
|
|
a93cc0d943 | ||
|
|
7d8c2e07f2 | ||
|
|
efb4c36b18 | ||
|
|
29526d0d2b | ||
|
|
198e370f23 | ||
|
|
c19f7764a5 | ||
|
|
bc63d9d329 | ||
|
|
aa929c37d5 | ||
|
|
af4d506eb3 | ||
|
|
5da0549581 | ||
|
|
749a4fd2fd | ||
|
|
6f71ef580c | ||
|
|
67874aeffa | ||
|
|
3e6a330d38 | ||
|
|
aee5e18c8f | ||
|
|
5b11143d05 | ||
|
|
7b2212e954 | ||
|
|
71865091ab | ||
|
|
125cfd78e8 | ||
|
|
8cb57d9b91 | ||
|
|
14e10b2b6e | ||
|
|
6e76104d66 | ||
|
|
1d45a23b74 | ||
|
|
7df286540f | ||
|
|
5d0c97541a | ||
|
|
49a25557b0 | ||
|
|
b5936c0059 | ||
|
|
600cc1a4f0 | ||
|
|
ea32fbacc8 | ||
|
|
00fe14fc75 | ||
|
|
fcc28edb2f | ||
|
|
fac6be2dd5 | ||
|
|
1cf64ee468 | ||
|
|
cdec0190c4 | ||
|
|
2450bcb28b | ||
|
|
3126050c0f | ||
|
|
93b22c7828 | ||
|
|
0a89b2852e | ||
|
|
55b3e45bba | ||
|
|
365bcf6d97 | ||
|
|
71907db3ba | ||
|
|
6803655ced | ||
|
|
df1c39ec5c | ||
|
|
80f55a9511 | ||
|
|
7853cc5ae1 | ||
|
|
586a91b67f | ||
|
|
b028e96144 | ||
|
|
ce68b5907c | ||
|
|
fe7e0c9825 | ||
|
|
12893efe01 | ||
|
|
a6387bfd3c | ||
|
|
f6a54188c2 | ||
|
|
cbbd9a9c69 | ||
|
|
685a9cd2f1 | ||
|
|
182a107877 | ||
|
|
8c51aa6506 | ||
|
|
3fd39e37f2 | ||
|
|
49e86983e7 | ||
|
|
a9c58ad945 | ||
|
|
f8b45beacc | ||
|
|
9d92015d43 | ||
|
|
50a6150ed9 | ||
|
|
d5a9bb4ea9 | ||
|
|
b0505eb611 | ||
|
|
284acd57d6 | ||
|
|
8ed6b34477 | ||
|
|
f6f1fc9286 | ||
|
|
8e590a117f | ||
|
|
d5594202aa | ||
|
|
b186d949cf | ||
|
|
3d2986063c | ||
|
|
41fd7c7e60 | ||
|
|
fdefe96bf2 | ||
|
|
16f36a6fc9 | ||
|
|
f44415360e | ||
|
|
cce722b79c | ||
|
|
82697fb2ab | ||
|
|
53c1d3ef49 | ||
|
|
8e55e9abfc | ||
|
|
7c58ef3275 | ||
|
|
416a5efce7 | ||
|
|
f4d96df0f1 | ||
|
|
5d254f776a | ||
|
|
1c1218fefc | ||
|
|
d21ab29200 | ||
|
|
54ed626cf8 | ||
|
|
a733eb6c53 | ||
|
|
591454798d | ||
|
|
38604f1a4f | ||
|
|
2d0efe70a6 | ||
|
|
bfd14b1b2f | ||
|
|
76965512da | ||
|
|
996d1c3242 | ||
|
|
8abbf43f21 | ||
|
|
10eaae48ff | ||
|
|
9d4660cab1 | ||
|
|
9d74e308f7 | ||
|
|
e772692ffd | ||
|
|
8381a92120 | ||
|
|
cd054fc491 | ||
|
|
f219743e33 | ||
|
|
4f41664de8 | ||
|
|
a4fd04158e | ||
|
|
44a5f1718a | ||
|
|
a623df4c7b | ||
|
|
7cf67fbe29 | ||
|
|
3ddf1a6d01 | ||
|
|
850555c484 | ||
|
|
9ed3bdc64d | ||
|
|
c45aa56080 | ||
|
|
7394b8db3b | ||
|
|
f9b3d7af47 | ||
|
|
ea62a2da46 | ||
|
|
7468b6b71d | ||
|
|
1fb07d10a3 | ||
|
|
9378ae6e1d | ||
|
|
06723d47c4 | ||
|
|
69a0c470b5 | ||
|
|
c40f5cf45c | ||
|
|
4b7b839f24 | ||
|
|
3d60d33773 | ||
|
|
d7e66d39a0 | ||
|
|
d3f46b9aa5 | ||
|
|
f5e54a1fda | ||
|
|
4eb7f1d12e | ||
|
|
0f6d12e43c | ||
|
|
b4cdc245cf | ||
|
|
3283533149 | ||
|
|
8032e31f2d | ||
|
|
d2f9cdb205 | ||
|
|
8016c92297 | ||
|
|
e028d0d1e3 | ||
|
|
79819f58f2 | ||
|
|
6ff000b888 | ||
|
|
99e206d508 | ||
|
|
dd82ffea0c | ||
|
|
3823342d9d | ||
|
|
91dbaef406 | ||
|
|
9026dd3858 | ||
|
|
81d7f1928c | ||
|
|
bc4f29170f | ||
|
|
cb354c8f62 | ||
|
|
1cbb27b151 | ||
|
|
0ab4ff6378 | ||
|
|
63da13e829 | ||
|
|
4193a453c2 | ||
|
|
2e1fa03bf5 | ||
|
|
8f1ae18a18 | ||
|
|
57da92b7df | ||
|
|
df4f632dbc | ||
|
|
a34c2faae4 | ||
|
|
1d368c7589 | ||
|
|
88bd97e34c | ||
|
|
2ae3edb1cf | ||
|
|
b2ad967e45 | ||
|
|
a27b9e8bd5 | ||
|
|
4481a754e4 | ||
|
|
faa6ef6bc8 | ||
|
|
15870e90b0 | ||
|
|
8e4f824365 | ||
|
|
387ae5f30b | ||
|
|
ad7a071ab6 | ||
|
|
1310bf2474 | ||
|
|
b24f347190 | ||
|
|
ee6c9f95e1 | ||
|
|
2a69c6b879 | ||
|
|
cfadd183c4 | ||
|
|
e484c81f0c | ||
|
|
7e5e8306fd | ||
|
|
41e8bca4d0 | ||
|
|
8dbe9899a9 | ||
|
|
f4aac741d5 | ||
|
|
c1c9a79c49 | ||
|
|
226113c880 | ||
|
|
8932a66e49 | ||
|
|
79cfb46d42 | ||
|
|
00fcc17aee | ||
|
|
e94b783c74 | ||
|
|
97dae9ae07 | ||
|
|
ca215e0a4f | ||
|
|
91a26ca559 | ||
|
|
1ece880d7c | ||
|
|
400afddaf4 | ||
|
|
c3fef636b5 | ||
|
|
46e28a84ca | ||
|
|
17ad2b3fb1 | ||
|
|
5e2a60db4a | ||
|
|
cd214418f6 | ||
|
|
ba2d9f213e | ||
|
|
7f8ae73a5d | ||
|
|
466880f531 | ||
|
|
9f1f6d2437 | ||
|
|
9e0f897f6b | ||
|
|
c0f6aa876f | ||
|
|
d93bdee9a6 | ||
|
|
f13d09332d | ||
|
|
2f5865cc6d | ||
|
|
deefc05b88 | ||
|
|
0d8cb1cc14 | ||
|
|
a90b9fd209 | ||
|
|
829493439a | ||
|
|
73b4fafd82 | ||
|
|
b039775057 | ||
|
|
5c1d63b737 | ||
|
|
3cd022f6e6 | ||
|
|
abefd1f7c4 | ||
|
|
c21315f273 | ||
|
|
9ab1018b1a | ||
|
|
da0a5d2d6e | ||
|
|
ee6adb166c | ||
|
|
be8fe32c92 | ||
|
|
c38b1e776d | ||
|
|
4f8bf17f23 | ||
|
|
ca40186c75 | ||
|
|
a8c6b24155 | ||
|
|
bd8e5c7ca2 | ||
|
|
7c61bd36bb | ||
|
|
c54283824c | ||
|
|
52f15da2ca | ||
|
|
44d466559e | ||
|
|
05751eb047 | ||
|
|
f10503db67 | ||
|
|
adfeafe9e1 | ||
|
|
4c62a16f4f | ||
|
|
c0de39e6d4 | ||
|
|
fa55675593 | ||
|
|
d4d9920a26 | ||
|
|
47192f92d8 | ||
|
|
722076a123 | ||
|
|
bb4aa62cf7 | ||
|
|
843530568f | ||
|
|
138a5454b5 | ||
|
|
d279037036 | ||
|
|
46353f6783 | ||
|
|
70922df8b5 | ||
|
|
9c15e9de84 | ||
|
|
123c10608d | ||
|
|
0b7c2485b6 | ||
|
|
9abb32045a | ||
|
|
f490e77e77 | ||
|
|
2dc592991a | ||
|
|
0a60edcfa9 | ||
|
|
c53f9d30c8 | ||
|
|
509f398292 | ||
|
|
74bab3f0a4 | ||
|
|
8574862991 | ||
|
|
2de957c7e1 | ||
|
|
920de7a27d | ||
|
|
63efc427cd | ||
|
|
ce65fb6c76 | ||
|
|
4de1994b6e | ||
|
|
592882aa9f | ||
|
|
b98d6a1e19 | ||
|
|
29c7a63df8 | ||
|
|
8b25323ae2 | ||
|
|
f426de8460 | ||
|
|
695dc094ab | ||
|
|
e80d861064 | ||
|
|
2cdeb20135 | ||
|
|
7f74773254 | ||
|
|
f2c327fd39 | ||
|
|
e35e4ddc9a | ||
|
|
c3c88a2664 | ||
|
|
bb0eee71e7 | ||
|
|
6f56389b88 | ||
|
|
5b333c1ce6 | ||
|
|
a825f33030 | ||
|
|
92f618f2e2 | ||
|
|
81ec7c7901 | ||
|
|
dd5d2eb03c | ||
|
|
4ae720042c | ||
|
|
c705320f48 | ||
|
|
d2d8f89531 | ||
|
|
bdde940e90 | ||
|
|
45f4a76dbc | ||
|
|
13dc64ce74 | ||
|
|
c35f9e72ce | ||
|
|
f8061589e6 | ||
|
|
0ca96d48c7 | ||
|
|
4ba146f35d | ||
|
|
edf3e38ebd | ||
|
|
c4417ddb61 | ||
|
|
4a2080e407 | ||
|
|
2f2ffea9ca | ||
|
|
ba552f542f | ||
|
|
8379969834 | ||
|
|
95dbd2f990 | ||
|
|
a7177865b1 | ||
|
|
e0df6211cc | ||
|
|
b00ca882a4 | ||
|
|
39baacc49f | ||
|
|
3a1d48d6de | ||
|
|
34308b30d6 | ||
|
|
bc1506f8c0 | ||
|
|
b61067fa4f | ||
|
|
69b227a9bc | ||
|
|
0fd49457f5 | ||
|
|
58f289d013 | ||
|
|
3d60bb96e1 | ||
|
|
38d025b3f0 | ||
|
|
c40c6aaaaa | ||
|
|
1a810f0d4e | ||
|
|
63037593c0 | ||
|
|
7a878d47fa | ||
|
|
bc4b900898 | ||
|
|
c5e743f66f | ||
|
|
6c36d8d6fb | ||
|
|
71c82637e7 | ||
|
|
2dad310e2c | ||
|
|
d0ae9e3a8d | ||
|
|
a19413c311 | ||
|
|
1ef80b55dd | ||
|
|
eb03f4dad3 | ||
|
|
830dd1944a | ||
|
|
cc6943e86a | ||
|
|
1237c9a3a5 | ||
|
|
8f77093262 | ||
|
|
5d13df79a5 | ||
|
|
d79a0e233a | ||
|
|
6523223a4c | ||
|
|
4a67aafb7e | ||
|
|
f3f34c5b0f | ||
|
|
6ae8ee3f54 | ||
|
|
e8f8e80097 | ||
|
|
4dc0ff3ecf | ||
|
|
4b6462fc1e | ||
|
|
c4ece78564 | ||
|
|
0761d02b0b | ||
|
|
71c107fc57 | ||
|
|
7459e3a290 | ||
|
|
f9e66fb993 | ||
|
|
6c603ccce3 | ||
|
|
ef66b0c6ef | ||
|
|
22b50ecb2f | ||
|
|
5a6fecc3de | ||
|
|
cdbccafed9 | ||
|
|
e69ae5b9e7 | ||
|
|
92790f4e54 | ||
|
|
471a5ee908 | ||
|
|
19e1d35989 | ||
|
|
0b7f31184d | ||
|
|
fad84d50fe | ||
|
|
9a1c32dc54 | ||
|
|
a921f40799 | ||
|
|
74ac9bdd82 | ||
|
|
94518f2087 | ||
|
|
535f59bbcf | ||
|
|
71cedb3c0c | ||
|
|
dd01d6558a | ||
|
|
ce85f022d2 | ||
|
|
ad94a6fe44 | ||
|
|
353ba14060 | ||
|
|
83de794223 | ||
|
|
bfd5c93af9 | ||
|
|
c247d87ef3 | ||
|
|
07ac9e2cc2 | ||
|
|
6bc520c207 | ||
|
|
f1d20fa39f | ||
|
|
e3dc22ca3a | ||
|
|
d665f8d3cb | ||
|
|
055e6f3657 | ||
|
|
ac4f319ba1 | ||
|
|
542cca0e8c | ||
|
|
6a2449df3b | ||
|
|
7fad1c6328 | ||
|
|
d82134c339 | ||
|
|
54d39d8b2f | ||
|
|
de7f3446e0 | ||
|
|
f8e52269c1 | ||
|
|
cf1dd0c59e | ||
|
|
22c8b52545 | ||
|
|
1f7dc42cd0 | ||
|
|
aa8f2641da | ||
|
|
648d25d43d | ||
|
|
df3e61003a | ||
|
|
6b361ad5ee | ||
|
|
5d8afe69f7 | ||
|
|
a1ab553858 | ||
|
|
07463ea162 | ||
|
|
6d2d21f713 | ||
|
|
061b2889a9 | ||
|
|
8963d9c266 | ||
|
|
890f62e868 | ||
|
|
8f362589a5 | ||
|
|
a27a2470cd | ||
|
|
72836fcee4 | ||
|
|
a7130543fa | ||
|
|
a490fda746 | ||
|
|
7e77275293 | ||
|
|
d6e203b3dc | ||
|
|
e3ea479087 | ||
|
|
faab1d3836 | ||
|
|
8851a574a3 | ||
|
|
59282080c8 | ||
|
|
98f3da4040 | ||
|
|
1d213233cd | ||
|
|
fd9cf73836 | ||
|
|
0638ad9999 | ||
|
|
1eb527692a | ||
|
|
09bb17e108 | ||
|
|
1cf911bc82 | ||
|
|
f4b052321b | ||
|
|
a636203ea5 | ||
|
|
c215217e39 | ||
|
|
08e291b54d | ||
|
|
6b95b065be | ||
|
|
9363169b67 | ||
|
|
085bea4513 | ||
|
|
150f20828b | ||
|
|
08523ee20a | ||
|
|
5d5171d26a | ||
|
|
96fb5605b2 | ||
|
|
7011de0bc2 | ||
|
|
c3dd69eab4 | ||
|
|
025171c476 | ||
|
|
c8dbccde30 | ||
|
|
4ff7a0f1f6 | ||
|
|
9c2ade40de | ||
|
|
aa32314d09 | ||
|
|
52afe99665 | ||
|
|
b0446d6a33 | ||
|
|
8e4e89f1c2 | ||
|
|
6c758d79de | ||
|
|
691008087b | ||
|
|
85f03346eb | ||
|
|
bdc6b3fc64 | ||
|
|
847f582290 | ||
|
|
10f5c016ec | ||
|
|
2e756879f1 | ||
|
|
c7a7750d3b | ||
|
|
9193c1eede | ||
|
|
b3f0e53048 | ||
|
|
3243d0f7b6 | ||
|
|
23b00bc0e4 | ||
|
|
52e1eea18b | ||
|
|
ee80d66727 | ||
|
|
f1fb2d12b3 | ||
|
|
deb2c73212 | ||
|
|
8928491074 | ||
|
|
545434670b | ||
|
|
54fda45bac | ||
|
|
c7bf7366bc | ||
|
|
b7052e5087 | ||
|
|
0d75ae2ce3 | ||
|
|
b5ba7b9dcf | ||
|
|
483e0ddd4d | ||
|
|
2891932bf0 | ||
|
|
591078babf | ||
|
|
9868c781a1 | ||
|
|
c257baff85 | ||
|
|
878e83c5a4 | ||
|
|
0012690aae | ||
|
|
6e74bc41ca | ||
|
|
cba892fa1f | ||
|
|
550bfd4cbd | ||
|
|
920ef0779b | ||
|
|
48ea9cea77 | ||
|
|
ccf4b799df | ||
|
|
f143d86ad2 | ||
|
|
8ae97d76ee | ||
|
|
f8b362739e | ||
|
|
6d69d03bac | ||
|
|
204da0d3e3 | ||
|
|
c496ca96e7 | ||
|
|
67b22dd036 | ||
|
|
ce6a696e4d | ||
|
|
a5caba1eb0 | ||
|
|
cd9c100963 | ||
|
|
edde6c56ac | ||
|
|
b7f89fe692 | ||
|
|
ae3531adf9 | ||
|
|
8cf5ee7831 | ||
|
|
aa3e950764 | ||
|
|
1301a0dd42 | ||
|
|
af8bd6a82d | ||
|
|
6d38616e67 | ||
|
|
4f5f18acb9 | ||
|
|
3e223834d9 | ||
|
|
a1bb0f8773 | ||
|
|
0e283428f7 | ||
|
|
2eabb80254 | ||
|
|
44586389e4 | ||
|
|
06a401c845 | ||
|
|
273f603efb | ||
|
|
1619e22f40 | ||
|
|
88a79ce6a6 | ||
|
|
acebc9cd6b | ||
|
|
443c12a703 | ||
|
|
7f3c4f4f65 | ||
|
|
0bc56fa66a | ||
|
|
1a582dd49d | ||
|
|
c5b921b597 | ||
|
|
e86ea47c02 | ||
|
|
aa5a63a5b5 | ||
|
|
2a7b4da9b2 | ||
|
|
069d098f84 | ||
|
|
b3889f7023 | ||
|
|
65883c8dbd | ||
|
|
341ca8d74c | ||
|
|
99859d436c | ||
|
|
1b01e2b085 | ||
|
|
976fc7d137 | ||
|
|
c3b7b29c23 | ||
|
|
627a91a9a8 | ||
|
|
6dc6302599 | ||
|
|
7a20e2e1f8 | ||
|
|
90648143c3 | ||
|
|
5c6658d4dd | ||
|
|
9585f890f8 | ||
|
|
0838239e8e | ||
|
|
36399e8576 | ||
|
|
9460db832c | ||
|
|
d68730a56e | ||
|
|
f2aeefe29c | ||
|
|
39c6f507df | ||
|
|
d2d1eb5b0a | ||
|
|
8ae7be3ef4 | ||
|
|
306170518f | ||
|
|
aa6a10c44a | ||
|
|
9af73dc4fc | ||
|
|
fc483bb6af | ||
|
|
53b0f3e4e2 | ||
|
|
4353cf51a0 | ||
|
|
ce34e9ce5e | ||
|
|
d4051a8e05 | ||
|
|
df3df7fb64 | ||
|
|
9e9c164052 | ||
|
|
066090dd3f | ||
|
|
614d9c19c1 | ||
|
|
bd2dee6c67 | ||
|
|
74e6672beb | ||
|
|
02bcf0d389 | ||
|
|
18b4e04f1c | ||
|
|
10204dc898 | ||
|
|
1865ed31b9 | ||
|
|
3669cdba10 | ||
|
|
939fbd26ac | ||
|
|
b4e60dac23 | ||
|
|
e6ddb4e7af | ||
|
|
83390b83d9 | ||
|
|
ff2424595a | ||
|
|
adeb9c73d6 | ||
|
|
cd0abcc0bb | ||
|
|
4a55479fa9 | ||
|
|
f527115b5f | ||
|
|
75e1b46add | ||
|
|
05a2926c5c | ||
|
|
7070b83687 | ||
|
|
8d212e604a | ||
|
|
063fcc9676 | ||
|
|
8403612258 | ||
|
|
25b51c7816 | ||
|
|
9779b63bb6 | ||
|
|
d81aef3adf | ||
|
|
5af7e056a7 | ||
|
|
45ed795cb0 | ||
|
|
683e98a8a4 | ||
|
|
e0cfeb2ea7 | ||
|
|
75340ee383 | ||
|
|
668de34c6b | ||
|
|
a91b954bb4 | ||
|
|
a3f62b8255 | ||
|
|
37b6d5f684 | ||
|
|
b7a6838407 | ||
|
|
cde846b3d3 | ||
|
|
6c3e6e88d3 | ||
|
|
739674cd77 | ||
|
|
4b2d7cae11 | ||
|
|
7fea7156cb | ||
|
|
3093468977 | ||
|
|
79cb25776f | ||
|
|
87f78946a5 | ||
|
|
211fbc1328 | ||
|
|
836a086ce9 | ||
|
|
90d3989b99 | ||
|
|
d741e55a42 | ||
|
|
17d3aaaf16 | ||
|
|
ea55b2a4ca | ||
|
|
3f0537dd4a | ||
|
|
943f7f7a39 | ||
|
|
12e895fc5a | ||
|
|
bda2c49d75 | ||
|
|
01b32990da | ||
|
|
dbda1b5147 | ||
|
|
ddf3bd328b | ||
|
|
b9c37b92cf | ||
|
|
5a27ecdd2e | ||
|
|
f9c3c90ca8 | ||
|
|
6daccbe317 | ||
|
|
71ea844c0e | ||
|
|
3a7256697e | ||
|
|
d1ba998274 | ||
|
|
718ced8d8c | ||
|
|
e1842025d0 | ||
|
|
2b9213cdc1 | ||
|
|
e3a88568b0 | ||
|
|
0577177e3e | ||
|
|
298f833b16 | ||
|
|
97b3656c2e | ||
|
|
f3bcebb1d2 | ||
|
|
0f399e6e5e | ||
|
|
5b075e27cb | ||
|
|
8a9d86a2a7 | ||
|
|
d80a064eff | ||
|
|
d468a09789 | ||
|
|
9f4ab73d7f | ||
|
|
02cf62e240 | ||
|
|
d55de6eec2 | ||
|
|
69df680b97 | ||
|
|
447591e1ae | ||
|
|
33eb0ce4c4 | ||
|
|
505c28aac9 | ||
|
|
67fb0c5495 | ||
|
|
4efba05c56 | ||
|
|
8377574c9c | ||
|
|
0f90943e45 | ||
|
|
526e638c8a | ||
|
|
372297e713 | ||
|
|
356e067390 | ||
|
|
e2f48f9643 | ||
|
|
b513a251f8 | ||
|
|
953e32b2c1 | ||
|
|
5898e28272 | ||
|
|
67dfbc0cb9 | ||
|
|
36cb11f068 | ||
|
|
7a4c6cc92f | ||
|
|
7edcb8f39c | ||
|
|
d5b00ee6e0 | ||
|
|
461cead4f7 | ||
|
|
b5a6d40818 | ||
|
|
968b5e0112 | ||
|
|
39b782b390 | ||
|
|
577664c8e8 | ||
|
|
bba12cec89 | ||
|
|
70c4c03cb8 | ||
|
|
f5791ed136 | ||
|
|
4ec929dc9b | ||
|
|
fbf189a6ee | ||
|
|
09825cb5c0 | ||
|
|
ed27d35674 | ||
|
|
fd5539eb41 | ||
|
|
04bca64bde | ||
|
|
03cc7c20c1 | ||
|
|
4075311d94 | ||
|
|
6624a2b07d | ||
|
|
6d3a7d03e1 | ||
|
|
95fdc7d69c | ||
|
|
86fe61c8f9 | ||
|
|
9bb6d2f21d | ||
|
|
e3f4593e76 | ||
|
|
1d043b93cf | ||
|
|
b15d4f624f | ||
|
|
4aa16a50f5 | ||
|
|
bbcbf4d459 | ||
|
|
930ad9eecc | ||
|
|
b072a9defd | ||
|
|
75952c6e3d | ||
|
|
05afc96b73 | ||
|
|
fa80026915 | ||
|
|
2bc3de0f28 | ||
|
|
99c7bc94af | ||
|
|
152c8f349d | ||
|
|
d75654c15e | ||
|
|
0725f584e1 | ||
|
|
8cda9241d1 | ||
|
|
a3124ba49f | ||
|
|
579e2691fe | ||
|
|
63f05de10b | ||
|
|
caeefc29eb | ||
|
|
a3c736def2 | ||
|
|
58261235f0 | ||
|
|
da70877a1b | ||
|
|
5c468ca8a8 | ||
|
|
aedd6bb97d | ||
|
|
733d9cacb8 | ||
|
|
42f2805e48 | ||
|
|
0ffcb7c6fc | ||
|
|
27669bd11d | ||
|
|
6625f82940 | ||
|
|
d0866f0bb4 | ||
|
|
09eeb75130 | ||
|
|
0a99956f71 | ||
|
|
12ef6aefa8 | ||
|
|
e93aa81aa6 | ||
|
|
755eb0320e | ||
|
|
43ba5456b1 | ||
|
|
156d5ad6da | ||
|
|
c626a3d9fa | ||
|
|
b2e8bc1b20 | ||
|
|
771822ebb8 | ||
|
|
eb6a41ba0f | ||
|
|
7d2392691c | ||
|
|
c216c1894d | ||
|
|
3e1ad508eb | ||
|
|
a052c1d785 | ||
|
|
16484d4923 | ||
|
|
32a09b4382 | ||
|
|
870a7e6156 | ||
|
|
239e3e0cca | ||
|
|
b1ca5e3ffa | ||
|
|
b9a1252c96 | ||
|
|
fc492de31d | ||
|
|
a9c0f9bc63 | ||
|
|
b7cc9f5026 | ||
|
|
252580c561 | ||
|
|
acc47c1a3f | ||
|
|
70fa830e4d | ||
|
|
a7af0ebaf5 | ||
|
|
67ae7b4760 | ||
|
|
de48addae2 | ||
|
|
ddbfd0f0c5 | ||
|
|
d7ae0639b4 | ||
|
|
6804038d06 | ||
|
|
2f799533ae | ||
|
|
88ae5991cd | ||
|
|
5d51a883c2 | ||
|
|
c4a91be726 | ||
|
|
0382435990 | ||
|
|
b390d85d95 | ||
|
|
be925dc64c | ||
|
|
de7a91bfe3 | ||
|
|
a4358cbabd | ||
|
|
177ed935a9 | ||
|
|
c364f15ff1 | ||
|
|
e1f6e61e6a | ||
|
|
0932300e3a | ||
|
|
3f40217704 | ||
|
|
f631c3311a | ||
|
|
ad433bb372 | ||
|
|
3e0b3a1428 | ||
|
|
444b116597 | ||
|
|
2aea08eda1 | ||
|
|
8e5e059d7d | ||
|
|
2b1b511f6b | ||
|
|
233ad24ecf | ||
|
|
c4949c50f9 | ||
|
|
b6ef402905 | ||
|
|
ccf365475a | ||
|
|
e1fb245690 | ||
|
|
5a76c6517e | ||
|
|
1bb9568776 | ||
|
|
ecd1c2f7e9 | ||
|
|
466de68801 | ||
|
|
88d4111cfa | ||
|
|
51fb64bab1 | ||
|
|
be547e1d3b | ||
|
|
bf85454116 | ||
|
|
5910724b11 | ||
|
|
7e24b09da9 | ||
|
|
f085f960e7 | ||
|
|
f38de77f6e | ||
|
|
58e7d46d1b | ||
|
|
2a5201638d | ||
|
|
fe6fad1242 | ||
|
|
ec00e1d8a0 | ||
|
|
de29c4144e | ||
|
|
f3bab0044e | ||
|
|
ffd1833b87 | ||
|
|
896d5b63e8 | ||
|
|
67de24e449 | ||
|
|
66400c470c | ||
|
|
7665010267 | ||
|
|
5d9b75051a | ||
|
|
ab2f744b90 | ||
|
|
300fcad8a6 | ||
|
|
f7e025958a | ||
|
|
0ab5531363 | ||
|
|
b4444d5ca2 | ||
|
|
0025da15cf | ||
|
|
b9d3e1635f | ||
|
|
aa6b734e02 | ||
|
|
73b57f0ccb | ||
|
|
3c4e6d8337 | ||
|
|
36034aecc2 | ||
|
|
ffca4b5c32 | ||
|
|
b0e72bcf34 | ||
|
|
7fd930c0c8 | ||
|
|
2e78b2bead | ||
|
|
44dbe89035 | ||
|
|
2d5a8b5512 | ||
|
|
159736c1b8 | ||
|
|
46720279c2 | ||
|
|
d8269e1dfb | ||
|
|
cbdbb76665 | ||
|
|
6543f0dca5 | ||
|
|
232eb88bfe | ||
|
|
a95967f8b7 | ||
|
|
2ef648d3d3 | ||
|
|
33f6830fd5 | ||
|
|
606d7e67fd | ||
|
|
fd87ff26b9 | ||
|
|
85347e1cb6 | ||
|
|
41897817cc | ||
|
|
45ff2d51d0 | ||
|
|
5de3ece225 | ||
|
|
df50a41289 | ||
|
|
59ae56fad5 | ||
|
|
690e872c51 | ||
|
|
81082e046e | ||
|
|
3fa9550837 | ||
|
|
b1082f01a6 | ||
|
|
f35b84c807 | ||
|
|
117adb0f0f | ||
|
|
abb285fb1b | ||
|
|
a431154706 | ||
|
|
cfe50f04ed | ||
|
|
a7055eb956 | ||
|
|
0a1be1e997 | ||
|
|
c93898dae9 | ||
|
|
ebdf2af727 | ||
|
|
c108eb73cc | ||
|
|
3a1375dacf | ||
|
|
41bece30b4 | ||
|
|
16ea58cbda | ||
|
|
99e350d902 | ||
|
|
13e06d298c | ||
|
|
56c7366547 | ||
|
|
81f0259b9e | ||
|
|
fefcb5d314 | ||
|
|
345b0c9b46 | ||
|
|
20c3893f0e | ||
|
|
29293c1e09 | ||
|
|
5fe3a3c3fb | ||
|
|
b04621d155 | ||
|
|
b227060388 | ||
|
|
d93e4dcbb7 | ||
|
|
73e79f2a1b | ||
|
|
fc79158de2 | ||
|
|
7763b04e5f | ||
|
|
9d7b44b4cc | ||
|
|
897f36d179 | ||
|
|
94c3637f6d | ||
|
|
04cc96173c | ||
|
|
fbaaad49d7 | ||
|
|
b29f3b250d | ||
|
|
fa343954d4 | ||
|
|
2491f5898e | ||
|
|
b27c856fbc | ||
|
|
9941ceb331 | ||
|
|
c536d38059 | ||
|
|
8de64cac98 | ||
|
|
6d6d286539 | ||
|
|
5d2eac9eba | ||
|
|
9826925a20 | ||
|
|
24a267b562 | ||
|
|
d4da3d6116 | ||
|
|
d5a62e4f5f | ||
|
|
9a82b2389f | ||
|
|
8dba13f7e8 | ||
|
|
deacef651f | ||
|
|
2e1b3afeca | ||
|
|
652e776893 | ||
|
|
d055fe4cb0 | ||
|
|
131842bb0b | ||
|
|
59fc531f78 | ||
|
|
5c44c15438 | ||
|
|
62067cb9b8 | ||
|
|
0f81866329 | ||
|
|
2db67bc0f4 | ||
|
|
7dba9cd039 | ||
|
|
75dff0eef7 | ||
|
|
d828f3a550 | ||
|
|
bcd6e4bd07 | ||
|
|
53936f3d57 | ||
|
|
0beb3add18 | ||
|
|
f9bd64c098 | ||
|
|
d7f44b5bdb | ||
|
|
48bfb5f238 | ||
|
|
97ebe8dcaf | ||
|
|
d4409747ba | ||
|
|
37b6a6617f | ||
|
|
ca1c9cfe11 | ||
|
|
adeb4d7469 | ||
|
|
50587ee8ec | ||
|
|
8244288dfe | ||
|
|
6ffe72835a | ||
|
|
8ba5e990a5 | ||
|
|
9afb1afcc6 | ||
|
|
0e21093a8f | ||
|
|
9c5cd0948f | ||
|
|
1083705fe8 | ||
|
|
f3d294617f | ||
|
|
de33a30858 | ||
|
|
887a227953 | ||
|
|
705f6f35bc | ||
|
|
e648b22dbd | ||
|
|
257a2501fa | ||
|
|
99afb3ddd4 | ||
|
|
a3c776203f | ||
|
|
53f350c165 | ||
|
|
f46d31f948 | ||
|
|
bf64ff72db | ||
|
|
bc2884afc1 | ||
|
|
023fa8c440 | ||
|
|
427023a1e6 | ||
|
|
a924876fed | ||
|
|
3f223f7b2e | ||
|
|
fc2c063e1e | ||
|
|
20db33e299 | ||
|
|
c0109aa497 | ||
|
|
ba7a1de04d | ||
|
|
4269e78a80 | ||
|
|
6f5ac90cf3 | ||
|
|
de282fc217 | ||
|
|
ddbd903576 | ||
|
|
0c56a3f773 | ||
|
|
9d069c4778 | ||
|
|
0d843f796b | ||
|
|
67f51b3d8c | ||
|
|
5c5de1c79a | ||
|
|
0821771466 | ||
|
|
83f6f68e79 | ||
|
|
27473d18da | ||
|
|
0c6c096c20 | ||
|
|
52c8ade4ad | ||
|
|
0e853ca4c4 | ||
|
|
41beccbab0 | ||
|
|
2eb88d953f | ||
|
|
1f0483b4b1 | ||
|
|
6b47c7f24e | ||
|
|
d798e1c7a9 | ||
|
|
3a8736bd74 | ||
|
|
c8c5163618 | ||
|
|
500f3d2432 | ||
|
|
ed4a915e08 | ||
|
|
b8f7b1579a | ||
|
|
ed54491c60 | ||
|
|
e4decf2750 | ||
|
|
c90f13d106 | ||
|
|
62008f69c1 | ||
|
|
e88f5e0b4e | ||
|
|
769fda3c5a | ||
|
|
23300d7149 | ||
|
|
f5756f388a | ||
|
|
ee313cdcbf | ||
|
|
8b50fed04b | ||
|
|
5b66de8859 | ||
|
|
e38af9e00c | ||
|
|
6b37f0be55 | ||
|
|
6e5d5f2fc1 | ||
|
|
75c9481224 | ||
|
|
5746f9da99 | ||
|
|
112da0a0ce | ||
|
|
bcd606c0fe | ||
|
|
ed92bc9f6e | ||
|
|
9b0756f8f2 | ||
|
|
aa0c87391c | ||
|
|
b1dfdc51b1 | ||
|
|
2e32528012 | ||
|
|
f64e7695a1 | ||
|
|
5abeaf0650 | ||
|
|
8bcc355972 | ||
|
|
6b4642fae3 | ||
|
|
d1bd37deac | ||
|
|
405ec05cb2 | ||
|
|
52e8e1dc88 | ||
|
|
b98a6b2f72 | ||
|
|
0ca45b233f | ||
|
|
65cceef8f4 | ||
|
|
b004821fa9 | ||
|
|
81b42336ad | ||
|
|
c6c1974672 | ||
|
|
a545d1d262 | ||
|
|
037fcd0047 | ||
|
|
318452bc0c | ||
|
|
d746cd88c2 | ||
|
|
9c42603b5a | ||
|
|
ea93cce4f6 | ||
|
|
f4daa18152 | ||
|
|
9caa687d81 | ||
|
|
3b58c6fb54 | ||
|
|
5926c10690 | ||
|
|
df725153d2 | ||
|
|
d662896090 | ||
|
|
db241e8645 | ||
|
|
ead28ff30a | ||
|
|
515d7a5e73 | ||
|
|
14fbdc9cdd | ||
|
|
98bcd2834a | ||
|
|
f7ab6cbe16 | ||
|
|
28ef06f7c2 | ||
|
|
577d02370d | ||
|
|
50be92c11c | ||
|
|
d18596baf4 | ||
|
|
7ce7e39476 | ||
|
|
93eb15c573 | ||
|
|
9f4d83e3b1 | ||
|
|
1c251cd948 | ||
|
|
70d1924f8b | ||
|
|
7b4948b05f | ||
|
|
878b5d9f0d | ||
|
|
2bc1820660 | ||
|
|
8bf8b5a577 | ||
|
|
8222d8de88 | ||
|
|
c7253e2e8c | ||
|
|
d69cf69a6a | ||
|
|
d02ecdefab | ||
|
|
bc857bfce0 | ||
|
|
f8bf74575a | ||
|
|
964ac8b584 | ||
|
|
a3522dfddd | ||
|
|
d3a8613b6e | ||
|
|
200b388752 | ||
|
|
dabcaf3b06 | ||
|
|
e646ffe795 | ||
|
|
b0dcc3c47f | ||
|
|
b07d9c23c5 | ||
|
|
d71cae62cc | ||
|
|
633a50cf4b | ||
|
|
825e0984e2 | ||
|
|
d1cade5ade | ||
|
|
190717e31f | ||
|
|
0824c28c8b | ||
|
|
c59b4aaeef | ||
|
|
f9c6cbf002 | ||
|
|
b8fe71ab86 | ||
|
|
cb10cded2a | ||
|
|
cd8b830292 | ||
|
|
1ac4004f3a | ||
|
|
e17d368ae2 | ||
|
|
27110b0567 | ||
|
|
9fe4de3471 | ||
|
|
d26d440e19 | ||
|
|
9f5daf0006 | ||
|
|
eb1634cbf8 | ||
|
|
01c10ca26e | ||
|
|
45aef47281 | ||
|
|
ae287755b7 | ||
|
|
a37f27ae99 | ||
|
|
49f5f315fd | ||
|
|
97d2db017c | ||
|
|
2c64df0399 | ||
|
|
828400422a | ||
|
|
c3c77cec30 | ||
|
|
1183b85f50 | ||
|
|
0143dc029c | ||
|
|
e10e576fed | ||
|
|
78af8eb1d1 | ||
|
|
79e93125d0 | ||
|
|
48db0b1f4a | ||
|
|
8f0578f0fc | ||
|
|
250f557872 | ||
|
|
462dc88b17 | ||
|
|
570fa151fc | ||
|
|
9c286cfa00 | ||
|
|
80cbb6ddbb | ||
|
|
9fd5ce0cbe | ||
|
|
1736dec629 | ||
|
|
b8a360837a | ||
|
|
fc28721960 | ||
|
|
51ce3a75c9 | ||
|
|
335056663a | ||
|
|
5b286728de | ||
|
|
291a168bcc | ||
|
|
fda7d31aa0 | ||
|
|
cbf46c737c | ||
|
|
7beb36a529 | ||
|
|
153697660d | ||
|
|
60a72e8d45 | ||
|
|
426ff04282 | ||
|
|
a50e1b32e4 | ||
|
|
9eae41ddef | ||
|
|
aad0d6d5ba | ||
|
|
7aca14a1ec | ||
|
|
d1596ef439 | ||
|
|
ea63e4998b | ||
|
|
a08dfd27a8 | ||
|
|
f58848011e | ||
|
|
934858ad86 | ||
|
|
3c25b9abae | ||
|
|
3fc03845a1 | ||
|
|
9b122384e9 | ||
|
|
9f4e6bbaeb | ||
|
|
b05654f0e3 | ||
|
|
9b3a760bbb | ||
|
|
d5822b96b0 | ||
|
|
b3d14cbfa7 | ||
|
|
d6039175e5 | ||
|
|
97d6faaced | ||
|
|
219b8130df | ||
|
|
38cbc40a64 | ||
|
|
93d3a642a9 | ||
|
|
c5e8d7af0e | ||
|
|
d6983cb460 | ||
|
|
dd9829292e | ||
|
|
89cb0eb0b6 | ||
|
|
9b5fffb149 | ||
|
|
1f90438025 | ||
|
|
a130adb25b | ||
|
|
8756c5fe7a | ||
|
|
828dba2983 | ||
|
|
6b3f5a329b | ||
|
|
63ef586b05 | ||
|
|
383a6a61b1 | ||
|
|
4fdd4e6f6f | ||
|
|
01ba4b80a7 | ||
|
|
de66764e4e | ||
|
|
1037d53988 | ||
|
|
c3ab8f866c | ||
|
|
94eb2dd1fe | ||
|
|
346b5ce8fd | ||
|
|
b37fbb990b | ||
|
|
ef75f76f5c | ||
|
|
e296100005 | ||
|
|
953dd93a48 | ||
|
|
e704f4d378 | ||
|
|
77d0f05f71 | ||
|
|
50d2376769 | ||
|
|
759d525301 | ||
|
|
fcfa188548 | ||
|
|
f4c8bbcfc2 | ||
|
|
31eead52e7 | ||
|
|
038a3a1a61 | ||
|
|
587c68b2cd | ||
|
|
377fdf5dde | ||
|
|
5c67601931 | ||
|
|
68f54207a3 | ||
|
|
bb47437686 | ||
|
|
213b715893 | ||
|
|
449d5c910c | ||
|
|
0251f9c9c0 | ||
|
|
8bc7c3d858 | ||
|
|
af44c94862 | ||
|
|
36ed7177f0 | ||
|
|
32aa88bcae | ||
|
|
51090d636b | ||
|
|
31513ea6b9 | ||
|
|
88cebbd7b8 | ||
|
|
fb8f7280bc | ||
|
|
f380401bbd | ||
|
|
9abc6c8b31 | ||
|
|
8cd252f115 | ||
|
|
53f72b11e5 | ||
|
|
ee55fcbe12 | ||
|
|
78d3442b12 | ||
|
|
979a9dd4c4 | ||
|
|
d5979c5d55 | ||
|
|
8027175600 | ||
|
|
3054ff0cbe | ||
|
|
cd453d38bb | ||
|
|
f5a290eed9 | ||
|
|
ecb3e676a5 | ||
|
|
8b59a98610 | ||
|
|
8409501206 | ||
|
|
be95cac157 | ||
|
|
476203d025 | ||
|
|
468e2e926b | ||
|
|
ac3e9394e7 | ||
|
|
868d62a509 | ||
|
|
157b864a01 | ||
|
|
951b9dfd94 | ||
|
|
1142d31164 | ||
|
|
9131bde941 | ||
|
|
1132c10dc2 | ||
|
|
c978a96c02 | ||
|
|
71e458d437 | ||
|
|
57bde0d9c7 | ||
|
|
50b4d25980 | ||
|
|
eda60e8251 | ||
|
|
c794cbbb19 | ||
|
|
4a76d1dbe5 | ||
|
|
418f734a58 | ||
|
|
dc1c355b72 | ||
|
|
1b2b22ed9f | ||
|
|
f2cd958c0a | ||
|
|
57adeaea87 | ||
|
|
8f3f1aef05 | ||
|
|
51d2453c7a | ||
|
|
45014296be | ||
|
|
afef36c950 | ||
|
|
b31756c18e | ||
|
|
f008688520 | ||
|
|
5b68ea215b | ||
|
|
b1d568f0bc | ||
|
|
17bd1b2f41 | ||
|
|
5b0d3cc0cd | ||
|
|
d4f76f1674 | ||
|
|
340fa21198 | ||
|
|
de5d66d431 | ||
|
|
7bdb17d4d5 | ||
|
|
419c64b107 | ||
|
|
99a5ae3f8e | ||
|
|
c7563c528b | ||
|
|
e30e9318da | ||
|
|
5c51028d38 | ||
|
|
c1d58e1c67 | ||
|
|
02030ff7fe | ||
|
|
f45c185fa9 | ||
|
|
1bd96c3a60 | ||
|
|
929f85d851 | ||
|
|
98d4a4e6bc | ||
|
|
fb2f83360c | ||
|
|
3c5e7729e1 | ||
|
|
5a853e1423 | ||
|
|
2f58b12dad | ||
|
|
59f4fd4dc6 | ||
|
|
5738240ee8 | ||
|
|
86fd453ea8 | ||
|
|
c83411b9ee | ||
|
|
057c9938a1 | ||
|
|
9259966132 | ||
|
|
b08980412e | ||
|
|
532a1e0429 | ||
|
|
2a36c352a0 | ||
|
|
1a2adf3f49 | ||
|
|
43b62accbb | ||
|
|
be74864ace | ||
|
|
0ae456f08a | ||
|
|
0f75d25991 | ||
|
|
67129e4a15 | ||
|
|
dfb9323cf9 | ||
|
|
7f5bd09baf | ||
|
|
02d5eb935f | ||
|
|
94ca71b7cc | ||
|
|
b338f1b154 | ||
|
|
486f0c9476 | ||
|
|
d96680f58d | ||
|
|
f8602d3242 | ||
|
|
0c021ad171 | ||
|
|
086d7b4500 | ||
|
|
891629c84a | ||
|
|
ea6d901e51 | ||
|
|
4539dd30e6 | ||
|
|
c43e57242e | ||
|
|
db8fd71ca9 | ||
|
|
f4f316881d | ||
|
|
0e16f09474 | ||
|
|
09dd418f53 | ||
|
|
decd1d1737 | ||
|
|
180e689f7e | ||
|
|
7da5556ac2 | ||
|
|
f23a03a89b | ||
|
|
84e4682f0e | ||
|
|
e74c504f91 |
10
.gitignore
vendored
10
.gitignore
vendored
@@ -17,4 +17,12 @@ youtube-dl.tar.gz
|
||||
.coverage
|
||||
cover/
|
||||
updates_key.pem
|
||||
*.egg-info
|
||||
*.egg-info
|
||||
*.srt
|
||||
*.sbv
|
||||
*.vtt
|
||||
*.flv
|
||||
*.mp4
|
||||
*.part
|
||||
test/testdata
|
||||
.tox
|
||||
|
||||
@@ -3,12 +3,16 @@ python:
|
||||
- "2.6"
|
||||
- "2.7"
|
||||
- "3.3"
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq rtmpdump
|
||||
script: nosetests test --verbose
|
||||
notifications:
|
||||
email:
|
||||
- filippo.valsorda@gmail.com
|
||||
- phihag@phihag.de
|
||||
- jaime.marquinez.ferrandiz+travis@gmail.com
|
||||
- yasoob.khld@gmail.com
|
||||
# irc:
|
||||
# channels:
|
||||
# - "irc.freenode.org#youtube-dl"
|
||||
|
||||
21
Makefile
21
Makefile
@@ -9,9 +9,19 @@ cleanall: clean
|
||||
PREFIX=/usr/local
|
||||
BINDIR=$(PREFIX)/bin
|
||||
MANDIR=$(PREFIX)/man
|
||||
SYSCONFDIR=/etc
|
||||
PYTHON=/usr/bin/env python
|
||||
|
||||
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
||||
ifeq ($(PREFIX),/usr)
|
||||
SYSCONFDIR=/etc
|
||||
else
|
||||
ifeq ($(PREFIX),/usr/local)
|
||||
SYSCONFDIR=/etc
|
||||
else
|
||||
SYSCONFDIR=$(PREFIX)/etc
|
||||
endif
|
||||
endif
|
||||
|
||||
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
|
||||
install -d $(DESTDIR)$(BINDIR)
|
||||
install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
|
||||
@@ -30,15 +40,15 @@ tar: youtube-dl.tar.gz
|
||||
|
||||
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1
|
||||
|
||||
youtube-dl: youtube_dl/*.py
|
||||
zip --quiet youtube-dl youtube_dl/*.py
|
||||
youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
|
||||
zip --quiet youtube-dl youtube_dl/*.py youtube_dl/*/*.py
|
||||
zip --quiet --junk-paths youtube-dl youtube_dl/__main__.py
|
||||
echo '#!$(PYTHON)' > youtube-dl
|
||||
cat youtube-dl.zip >> youtube-dl
|
||||
rm youtube-dl.zip
|
||||
chmod a+x youtube-dl
|
||||
|
||||
README.md: youtube_dl/*.py
|
||||
README.md: youtube_dl/*.py youtube_dl/*/*.py
|
||||
COLUMNS=80 python -m youtube_dl --help | python devscripts/make_readme.py
|
||||
|
||||
README.txt: README.md
|
||||
@@ -47,7 +57,7 @@ README.txt: README.md
|
||||
youtube-dl.1: README.md
|
||||
pandoc -s -f markdown -t man README.md -o youtube-dl.1
|
||||
|
||||
youtube-dl.bash-completion: youtube_dl/*.py devscripts/bash-completion.in
|
||||
youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-completion.in
|
||||
python devscripts/bash-completion.py
|
||||
|
||||
bash-completion: youtube-dl.bash-completion
|
||||
@@ -61,6 +71,7 @@ youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-
|
||||
--exclude '*~' \
|
||||
--exclude '__pycache' \
|
||||
--exclude '.git' \
|
||||
--exclude 'testdata' \
|
||||
-- \
|
||||
bin devscripts test youtube_dl \
|
||||
CHANGELOG LICENSE README.md README.txt \
|
||||
|
||||
86
README.md
86
README.md
@@ -1,7 +1,7 @@
|
||||
% YOUTUBE-DL(1)
|
||||
|
||||
# NAME
|
||||
youtube-dl
|
||||
youtube-dl - download videos from youtube.com or other video platforms
|
||||
|
||||
# SYNOPSIS
|
||||
**youtube-dl** [OPTIONS] URL [URL...]
|
||||
@@ -16,22 +16,27 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
# OPTIONS
|
||||
-h, --help print this help text and exit
|
||||
--version print program version and exit
|
||||
-U, --update update this program to latest version
|
||||
-i, --ignore-errors continue on download errors
|
||||
-r, --rate-limit LIMIT maximum download rate (e.g. 50k or 44.6m)
|
||||
-R, --retries RETRIES number of retries (default is 10)
|
||||
--buffer-size SIZE size of download buffer (e.g. 1024 or 16k)
|
||||
(default is 1024)
|
||||
--no-resize-buffer do not automatically adjust the buffer size. By
|
||||
default, the buffer size is automatically resized
|
||||
from an initial value of SIZE.
|
||||
-U, --update update this program to latest version. Make sure
|
||||
that you have sufficient permissions (run with
|
||||
sudo if needed)
|
||||
-i, --ignore-errors continue on download errors, for example to to
|
||||
skip unavailable videos in a playlist
|
||||
--abort-on-error Abort downloading of further videos (in the
|
||||
playlist or the command line) if an error occurs
|
||||
--dump-user-agent display the current browser identification
|
||||
--user-agent UA specify a custom user agent
|
||||
--referer REF specify a custom referer, use if the video access
|
||||
is restricted to one domain
|
||||
--list-extractors List all supported extractors and the URLs they
|
||||
would handle
|
||||
--extractor-descriptions Output descriptions of all supported extractors
|
||||
--proxy URL Use the specified HTTP/HTTPS proxy
|
||||
--no-check-certificate Suppress HTTPS certificate validation.
|
||||
--cache-dir DIR Location in the filesystem where youtube-dl can
|
||||
store downloaded information permanently. By
|
||||
default $XDG_CACHE_HOME/youtube-dl or ~/.cache
|
||||
/youtube-dl .
|
||||
--no-cache-dir Disable filesystem caching
|
||||
|
||||
## Video Selection:
|
||||
--playlist-start NUMBER playlist video to start at (default is 1)
|
||||
@@ -48,6 +53,20 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
--date DATE download only videos uploaded in this date
|
||||
--datebefore DATE download only videos uploaded before this date
|
||||
--dateafter DATE download only videos uploaded after this date
|
||||
--no-playlist download only the currently playing video
|
||||
--age-limit YEARS download only videos suitable for the given age
|
||||
--download-archive FILE Download only videos not present in the archive
|
||||
file. Record all downloaded videos in it.
|
||||
|
||||
## Download Options:
|
||||
-r, --rate-limit LIMIT maximum download rate in bytes per second (e.g.
|
||||
50K or 4.2M)
|
||||
-R, --retries RETRIES number of retries (default is 10)
|
||||
--buffer-size SIZE size of download buffer (e.g. 1024 or 16K)
|
||||
(default is 1024)
|
||||
--no-resize-buffer do not automatically adjust the buffer size. By
|
||||
default, the buffer size is automatically resized
|
||||
from an initial value of SIZE.
|
||||
|
||||
## Filesystem Options:
|
||||
-t, --title use title in file name (default)
|
||||
@@ -59,7 +78,10 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
%(uploader_id)s for the uploader nickname if
|
||||
different, %(autonumber)s to get an automatically
|
||||
incremented number, %(ext)s for the filename
|
||||
extension, %(upload_date)s for the upload date
|
||||
extension, %(format)s for the format description
|
||||
(like "22 - 1280x720" or "HD"),%(format_id)s for
|
||||
the unique id of the format (like Youtube's
|
||||
itags: "137"),%(upload_date)s for the upload date
|
||||
(YYYYMMDD), %(extractor)s for the provider
|
||||
(youtube, metacafe, etc), %(id)s for the video id
|
||||
, %(playlist)s for the playlist the video is in,
|
||||
@@ -70,12 +92,14 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
ownloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
|
||||
--autonumber-size NUMBER Specifies the number of digits in %(autonumber)s
|
||||
when it is present in output filename template or
|
||||
--autonumber option is given
|
||||
--auto-number option is given
|
||||
--restrict-filenames Restrict filenames to only ASCII characters, and
|
||||
avoid "&" and spaces in filenames
|
||||
-a, --batch-file FILE file containing URLs to download ('-' for stdin)
|
||||
-w, --no-overwrites do not overwrite files
|
||||
-c, --continue resume partially downloaded files
|
||||
-c, --continue force resume of partially downloaded files. By
|
||||
default, youtube-dl will resume downloads if
|
||||
possible.
|
||||
--no-continue do not resume partially downloaded files (restart
|
||||
from beginning)
|
||||
--cookies FILE file to read cookies from and dump cookie jar in
|
||||
@@ -84,6 +108,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
file modification time
|
||||
--write-description write video description to a .description file
|
||||
--write-info-json write video metadata to a .info.json file
|
||||
--write-annotations write video annotations to a .annotation file
|
||||
--write-thumbnail write thumbnail image to disk
|
||||
|
||||
## Verbosity / Simulation Options:
|
||||
@@ -93,41 +118,49 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
--skip-download do not download the video
|
||||
-g, --get-url simulate, quiet but print URL
|
||||
-e, --get-title simulate, quiet but print title
|
||||
--get-id simulate, quiet but print id
|
||||
--get-thumbnail simulate, quiet but print thumbnail URL
|
||||
--get-description simulate, quiet but print video description
|
||||
--get-filename simulate, quiet but print output filename
|
||||
--get-format simulate, quiet but print output format
|
||||
-j, --dump-json simulate, quiet but print JSON information
|
||||
--newline output progress bar as new lines
|
||||
--no-progress do not print progress bar
|
||||
--console-title display progress in console titlebar
|
||||
-v, --verbose print various debugging information
|
||||
--dump-intermediate-pages print downloaded pages to debug problems(very
|
||||
verbose)
|
||||
--write-pages Write downloaded pages to files in the current
|
||||
directory
|
||||
|
||||
## Video Format Options:
|
||||
-f, --format FORMAT video format code, specifiy the order of
|
||||
preference using slashes: "-f 22/17/18"
|
||||
preference using slashes: "-f 22/17/18". "-f mp4"
|
||||
and "-f flv" are also supported
|
||||
--all-formats download all available video formats
|
||||
--prefer-free-formats prefer free video formats unless a specific one
|
||||
is requested
|
||||
--max-quality FORMAT highest quality format to download
|
||||
-F, --list-formats list all available formats (currently youtube
|
||||
only)
|
||||
--write-sub write subtitle file (currently youtube only)
|
||||
--only-sub downloads only the subtitles (no video)
|
||||
|
||||
## Subtitle Options:
|
||||
--write-sub write subtitle file
|
||||
--write-auto-sub write automatic subtitle file (youtube only)
|
||||
--all-subs downloads all the available subtitles of the
|
||||
video (currently youtube only)
|
||||
video
|
||||
--list-subs lists all available subtitles for the video
|
||||
(currently youtube only)
|
||||
--sub-format LANG subtitle format [srt/sbv] (default=srt)
|
||||
(currently youtube only)
|
||||
--sub-lang LANG language of the subtitles to download (optional)
|
||||
use IETF language tags like 'en'
|
||||
--sub-format FORMAT subtitle format (default=srt) ([sbv/vtt] youtube
|
||||
only)
|
||||
--sub-lang LANGS languages of the subtitles to download (optional)
|
||||
separated by commas, use IETF language tags like
|
||||
'en,pt'
|
||||
|
||||
## Authentication Options:
|
||||
-u, --username USERNAME account username
|
||||
-p, --password PASSWORD account password
|
||||
-n, --netrc use .netrc authentication data
|
||||
--video-password PASSWORD video password (vimeo only)
|
||||
|
||||
## Post-processing Options:
|
||||
-x, --extract-audio convert video files to audio-only files (requires
|
||||
@@ -143,6 +176,9 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
processing; the video is erased by default
|
||||
--no-post-overwrites do not overwrite post-processed files; the post-
|
||||
processed files are overwritten by default
|
||||
--embed-subs embed subtitles in the video (only for mp4
|
||||
videos)
|
||||
--add-metadata add metadata to the files
|
||||
|
||||
# CONFIGURATION
|
||||
|
||||
@@ -163,7 +199,7 @@ The `-o` option allows users to indicate a template for the output file names. T
|
||||
- `playlist`: The name or the id of the playlist that contains the video.
|
||||
- `playlist_index`: The index of the video in the playlist, a five-digit number.
|
||||
|
||||
The current default template is `%(id)s.%(ext)s`, but that will be switchted to `%(title)s-%(id)s.%(ext)s` (which can be requested with `-t` at the moment).
|
||||
The current default template is `%(title)s-%(id)s.%(ext)s`.
|
||||
|
||||
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
|
||||
|
||||
@@ -189,11 +225,11 @@ Examples:
|
||||
|
||||
### Can you please put the -b option back?
|
||||
|
||||
Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the -b option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you''re interested in. In that case, simply request it with the -f option and youtube-dl will try to download it.
|
||||
Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the `-b` option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you're interested in. In that case, simply request it with the `-f` option and youtube-dl will try to download it.
|
||||
|
||||
### I get HTTP error 402 when trying to download a video. What's this?
|
||||
|
||||
Apparently YouTube requires you to pass a CAPTCHA test if you download too much. We''re [considering to provide a way to let you solve the CAPTCHA](https://github.com/rg3/youtube-dl/issues/154), but at the moment, your best course of action is pointing a webbrowser to the youtube URL, solving the CAPTCHA, and restart youtube-dl.
|
||||
Apparently YouTube requires you to pass a CAPTCHA test if you download too much. We're [considering to provide a way to let you solve the CAPTCHA](https://github.com/rg3/youtube-dl/issues/154), but at the moment, your best course of action is pointing a webbrowser to the youtube URL, solving the CAPTCHA, and restart youtube-dl.
|
||||
|
||||
### I have downloaded a video but how can I play it?
|
||||
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
__youtube-dl()
|
||||
__youtube_dl()
|
||||
{
|
||||
local cur prev opts
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
opts="{{flags}}"
|
||||
keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater"
|
||||
|
||||
if [[ ${cur} == * ]] ; then
|
||||
if [[ ${cur} =~ : ]]; then
|
||||
COMPREPLY=( $(compgen -W "${keywords}" -- ${cur}) )
|
||||
return 0
|
||||
elif [[ ${cur} == * ]] ; then
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
complete -F __youtube-dl youtube-dl
|
||||
complete -F __youtube_dl youtube-dl
|
||||
|
||||
405
devscripts/buildserver.py
Normal file
405
devscripts/buildserver.py
Normal file
@@ -0,0 +1,405 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||
from socketserver import ThreadingMixIn
|
||||
import argparse
|
||||
import ctypes
|
||||
import functools
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
import os.path
|
||||
|
||||
|
||||
class BuildHTTPServer(ThreadingMixIn, HTTPServer):
|
||||
allow_reuse_address = True
|
||||
|
||||
|
||||
advapi32 = ctypes.windll.advapi32
|
||||
|
||||
SC_MANAGER_ALL_ACCESS = 0xf003f
|
||||
SC_MANAGER_CREATE_SERVICE = 0x02
|
||||
SERVICE_WIN32_OWN_PROCESS = 0x10
|
||||
SERVICE_AUTO_START = 0x2
|
||||
SERVICE_ERROR_NORMAL = 0x1
|
||||
DELETE = 0x00010000
|
||||
SERVICE_STATUS_START_PENDING = 0x00000002
|
||||
SERVICE_STATUS_RUNNING = 0x00000004
|
||||
SERVICE_ACCEPT_STOP = 0x1
|
||||
|
||||
SVCNAME = 'youtubedl_builder'
|
||||
|
||||
LPTSTR = ctypes.c_wchar_p
|
||||
START_CALLBACK = ctypes.WINFUNCTYPE(None, ctypes.c_int, ctypes.POINTER(LPTSTR))
|
||||
|
||||
|
||||
class SERVICE_TABLE_ENTRY(ctypes.Structure):
|
||||
_fields_ = [
|
||||
('lpServiceName', LPTSTR),
|
||||
('lpServiceProc', START_CALLBACK)
|
||||
]
|
||||
|
||||
|
||||
HandlerEx = ctypes.WINFUNCTYPE(
|
||||
ctypes.c_int, # return
|
||||
ctypes.c_int, # dwControl
|
||||
ctypes.c_int, # dwEventType
|
||||
ctypes.c_void_p, # lpEventData,
|
||||
ctypes.c_void_p, # lpContext,
|
||||
)
|
||||
|
||||
|
||||
def _ctypes_array(c_type, py_array):
|
||||
ar = (c_type * len(py_array))()
|
||||
ar[:] = py_array
|
||||
return ar
|
||||
|
||||
|
||||
def win_OpenSCManager():
|
||||
res = advapi32.OpenSCManagerW(None, None, SC_MANAGER_ALL_ACCESS)
|
||||
if not res:
|
||||
raise Exception('Opening service manager failed - '
|
||||
'are you running this as administrator?')
|
||||
return res
|
||||
|
||||
|
||||
def win_install_service(service_name, cmdline):
|
||||
manager = win_OpenSCManager()
|
||||
try:
|
||||
h = advapi32.CreateServiceW(
|
||||
manager, service_name, None,
|
||||
SC_MANAGER_CREATE_SERVICE, SERVICE_WIN32_OWN_PROCESS,
|
||||
SERVICE_AUTO_START, SERVICE_ERROR_NORMAL,
|
||||
cmdline, None, None, None, None, None)
|
||||
if not h:
|
||||
raise OSError('Service creation failed: %s' % ctypes.FormatError())
|
||||
|
||||
advapi32.CloseServiceHandle(h)
|
||||
finally:
|
||||
advapi32.CloseServiceHandle(manager)
|
||||
|
||||
|
||||
def win_uninstall_service(service_name):
|
||||
manager = win_OpenSCManager()
|
||||
try:
|
||||
h = advapi32.OpenServiceW(manager, service_name, DELETE)
|
||||
if not h:
|
||||
raise OSError('Could not find service %s: %s' % (
|
||||
service_name, ctypes.FormatError()))
|
||||
|
||||
try:
|
||||
if not advapi32.DeleteService(h):
|
||||
raise OSError('Deletion failed: %s' % ctypes.FormatError())
|
||||
finally:
|
||||
advapi32.CloseServiceHandle(h)
|
||||
finally:
|
||||
advapi32.CloseServiceHandle(manager)
|
||||
|
||||
|
||||
def win_service_report_event(service_name, msg, is_error=True):
|
||||
with open('C:/sshkeys/log', 'a', encoding='utf-8') as f:
|
||||
f.write(msg + '\n')
|
||||
|
||||
event_log = advapi32.RegisterEventSourceW(None, service_name)
|
||||
if not event_log:
|
||||
raise OSError('Could not report event: %s' % ctypes.FormatError())
|
||||
|
||||
try:
|
||||
type_id = 0x0001 if is_error else 0x0004
|
||||
event_id = 0xc0000000 if is_error else 0x40000000
|
||||
lines = _ctypes_array(LPTSTR, [msg])
|
||||
|
||||
if not advapi32.ReportEventW(
|
||||
event_log, type_id, 0, event_id, None, len(lines), 0,
|
||||
lines, None):
|
||||
raise OSError('Event reporting failed: %s' % ctypes.FormatError())
|
||||
finally:
|
||||
advapi32.DeregisterEventSource(event_log)
|
||||
|
||||
|
||||
def win_service_handler(stop_event, *args):
|
||||
try:
|
||||
raise ValueError('Handler called with args ' + repr(args))
|
||||
TODO
|
||||
except Exception as e:
|
||||
tb = traceback.format_exc()
|
||||
msg = str(e) + '\n' + tb
|
||||
win_service_report_event(service_name, msg, is_error=True)
|
||||
raise
|
||||
|
||||
|
||||
def win_service_set_status(handle, status_code):
|
||||
svcStatus = SERVICE_STATUS()
|
||||
svcStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS
|
||||
svcStatus.dwCurrentState = status_code
|
||||
svcStatus.dwControlsAccepted = SERVICE_ACCEPT_STOP
|
||||
|
||||
svcStatus.dwServiceSpecificExitCode = 0
|
||||
|
||||
if not advapi32.SetServiceStatus(handle, ctypes.byref(svcStatus)):
|
||||
raise OSError('SetServiceStatus failed: %r' % ctypes.FormatError())
|
||||
|
||||
|
||||
def win_service_main(service_name, real_main, argc, argv_raw):
|
||||
try:
|
||||
#args = [argv_raw[i].value for i in range(argc)]
|
||||
stop_event = threading.Event()
|
||||
handler = HandlerEx(functools.partial(stop_event, win_service_handler))
|
||||
h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None)
|
||||
if not h:
|
||||
raise OSError('Handler registration failed: %s' %
|
||||
ctypes.FormatError())
|
||||
|
||||
TODO
|
||||
except Exception as e:
|
||||
tb = traceback.format_exc()
|
||||
msg = str(e) + '\n' + tb
|
||||
win_service_report_event(service_name, msg, is_error=True)
|
||||
raise
|
||||
|
||||
|
||||
def win_service_start(service_name, real_main):
|
||||
try:
|
||||
cb = START_CALLBACK(
|
||||
functools.partial(win_service_main, service_name, real_main))
|
||||
dispatch_table = _ctypes_array(SERVICE_TABLE_ENTRY, [
|
||||
SERVICE_TABLE_ENTRY(
|
||||
service_name,
|
||||
cb
|
||||
),
|
||||
SERVICE_TABLE_ENTRY(None, ctypes.cast(None, START_CALLBACK))
|
||||
])
|
||||
|
||||
if not advapi32.StartServiceCtrlDispatcherW(dispatch_table):
|
||||
raise OSError('ctypes start failed: %s' % ctypes.FormatError())
|
||||
except Exception as e:
|
||||
tb = traceback.format_exc()
|
||||
msg = str(e) + '\n' + tb
|
||||
win_service_report_event(service_name, msg, is_error=True)
|
||||
raise
|
||||
|
||||
|
||||
def main(args=None):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-i', '--install',
|
||||
action='store_const', dest='action', const='install',
|
||||
help='Launch at Windows startup')
|
||||
parser.add_argument('-u', '--uninstall',
|
||||
action='store_const', dest='action', const='uninstall',
|
||||
help='Remove Windows service')
|
||||
parser.add_argument('-s', '--service',
|
||||
action='store_const', dest='action', const='service',
|
||||
help='Run as a Windows service')
|
||||
parser.add_argument('-b', '--bind', metavar='<host:port>',
|
||||
action='store', default='localhost:8142',
|
||||
help='Bind to host:port (default %default)')
|
||||
options = parser.parse_args(args=args)
|
||||
|
||||
if options.action == 'install':
|
||||
fn = os.path.abspath(__file__).replace('v:', '\\\\vboxsrv\\vbox')
|
||||
cmdline = '%s %s -s -b %s' % (sys.executable, fn, options.bind)
|
||||
win_install_service(SVCNAME, cmdline)
|
||||
return
|
||||
|
||||
if options.action == 'uninstall':
|
||||
win_uninstall_service(SVCNAME)
|
||||
return
|
||||
|
||||
if options.action == 'service':
|
||||
win_service_start(SVCNAME, main)
|
||||
return
|
||||
|
||||
host, port_str = options.bind.split(':')
|
||||
port = int(port_str)
|
||||
|
||||
print('Listening on %s:%d' % (host, port))
|
||||
srv = BuildHTTPServer((host, port), BuildHTTPRequestHandler)
|
||||
thr = threading.Thread(target=srv.serve_forever)
|
||||
thr.start()
|
||||
input('Press ENTER to shut down')
|
||||
srv.shutdown()
|
||||
thr.join()
|
||||
|
||||
|
||||
def rmtree(path):
|
||||
for name in os.listdir(path):
|
||||
fname = os.path.join(path, name)
|
||||
if os.path.isdir(fname):
|
||||
rmtree(fname)
|
||||
else:
|
||||
os.chmod(fname, 0o666)
|
||||
os.remove(fname)
|
||||
os.rmdir(path)
|
||||
|
||||
#==============================================================================
|
||||
|
||||
class BuildError(Exception):
|
||||
def __init__(self, output, code=500):
|
||||
self.output = output
|
||||
self.code = code
|
||||
|
||||
def __str__(self):
|
||||
return self.output
|
||||
|
||||
|
||||
class HTTPError(BuildError):
|
||||
pass
|
||||
|
||||
|
||||
class PythonBuilder(object):
|
||||
def __init__(self, **kwargs):
|
||||
pythonVersion = kwargs.pop('python', '2.7')
|
||||
try:
|
||||
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Python\PythonCore\%s\InstallPath' % pythonVersion)
|
||||
try:
|
||||
self.pythonPath, _ = _winreg.QueryValueEx(key, '')
|
||||
finally:
|
||||
_winreg.CloseKey(key)
|
||||
except Exception:
|
||||
raise BuildError('No such Python version: %s' % pythonVersion)
|
||||
|
||||
super(PythonBuilder, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class GITInfoBuilder(object):
|
||||
def __init__(self, **kwargs):
|
||||
try:
|
||||
self.user, self.repoName = kwargs['path'][:2]
|
||||
self.rev = kwargs.pop('rev')
|
||||
except ValueError:
|
||||
raise BuildError('Invalid path')
|
||||
except KeyError as e:
|
||||
raise BuildError('Missing mandatory parameter "%s"' % e.args[0])
|
||||
|
||||
path = os.path.join(os.environ['APPDATA'], 'Build archive', self.repoName, self.user)
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
self.basePath = tempfile.mkdtemp(dir=path)
|
||||
self.buildPath = os.path.join(self.basePath, 'build')
|
||||
|
||||
super(GITInfoBuilder, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class GITBuilder(GITInfoBuilder):
|
||||
def build(self):
|
||||
try:
|
||||
subprocess.check_output(['git', 'clone', 'git://github.com/%s/%s.git' % (self.user, self.repoName), self.buildPath])
|
||||
subprocess.check_output(['git', 'checkout', self.rev], cwd=self.buildPath)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise BuildError(e.output)
|
||||
|
||||
super(GITBuilder, self).build()
|
||||
|
||||
|
||||
class YoutubeDLBuilder(object):
|
||||
authorizedUsers = ['fraca7', 'phihag', 'rg3', 'FiloSottile']
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
if self.repoName != 'youtube-dl':
|
||||
raise BuildError('Invalid repository "%s"' % self.repoName)
|
||||
if self.user not in self.authorizedUsers:
|
||||
raise HTTPError('Unauthorized user "%s"' % self.user, 401)
|
||||
|
||||
super(YoutubeDLBuilder, self).__init__(**kwargs)
|
||||
|
||||
def build(self):
|
||||
try:
|
||||
subprocess.check_output([os.path.join(self.pythonPath, 'python.exe'), 'setup.py', 'py2exe'],
|
||||
cwd=self.buildPath)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise BuildError(e.output)
|
||||
|
||||
super(YoutubeDLBuilder, self).build()
|
||||
|
||||
|
||||
class DownloadBuilder(object):
|
||||
def __init__(self, **kwargs):
|
||||
self.handler = kwargs.pop('handler')
|
||||
self.srcPath = os.path.join(self.buildPath, *tuple(kwargs['path'][2:]))
|
||||
self.srcPath = os.path.abspath(os.path.normpath(self.srcPath))
|
||||
if not self.srcPath.startswith(self.buildPath):
|
||||
raise HTTPError(self.srcPath, 401)
|
||||
|
||||
super(DownloadBuilder, self).__init__(**kwargs)
|
||||
|
||||
def build(self):
|
||||
if not os.path.exists(self.srcPath):
|
||||
raise HTTPError('No such file', 404)
|
||||
if os.path.isdir(self.srcPath):
|
||||
raise HTTPError('Is a directory: %s' % self.srcPath, 401)
|
||||
|
||||
self.handler.send_response(200)
|
||||
self.handler.send_header('Content-Type', 'application/octet-stream')
|
||||
self.handler.send_header('Content-Disposition', 'attachment; filename=%s' % os.path.split(self.srcPath)[-1])
|
||||
self.handler.send_header('Content-Length', str(os.stat(self.srcPath).st_size))
|
||||
self.handler.end_headers()
|
||||
|
||||
with open(self.srcPath, 'rb') as src:
|
||||
shutil.copyfileobj(src, self.handler.wfile)
|
||||
|
||||
super(DownloadBuilder, self).build()
|
||||
|
||||
|
||||
class CleanupTempDir(object):
|
||||
def build(self):
|
||||
try:
|
||||
rmtree(self.basePath)
|
||||
except Exception as e:
|
||||
print('WARNING deleting "%s": %s' % (self.basePath, e))
|
||||
|
||||
super(CleanupTempDir, self).build()
|
||||
|
||||
|
||||
class Null(object):
|
||||
def __init__(self, **kwargs):
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
def build(self):
|
||||
pass
|
||||
|
||||
|
||||
class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, CleanupTempDir, Null):
|
||||
pass
|
||||
|
||||
|
||||
class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
|
||||
actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching.
|
||||
|
||||
def do_GET(self):
|
||||
path = urlparse.urlparse(self.path)
|
||||
paramDict = dict([(key, value[0]) for key, value in urlparse.parse_qs(path.query).items()])
|
||||
action, _, path = path.path.strip('/').partition('/')
|
||||
if path:
|
||||
path = path.split('/')
|
||||
if action in self.actionDict:
|
||||
try:
|
||||
builder = self.actionDict[action](path=path, handler=self, **paramDict)
|
||||
builder.start()
|
||||
try:
|
||||
builder.build()
|
||||
finally:
|
||||
builder.close()
|
||||
except BuildError as e:
|
||||
self.send_response(e.code)
|
||||
msg = unicode(e).encode('UTF-8')
|
||||
self.send_header('Content-Type', 'text/plain; charset=UTF-8')
|
||||
self.send_header('Content-Length', len(msg))
|
||||
self.end_headers()
|
||||
self.wfile.write(msg)
|
||||
except HTTPError as e:
|
||||
self.send_response(e.code, str(e))
|
||||
else:
|
||||
self.send_response(500, 'Unknown build method "%s"' % action)
|
||||
else:
|
||||
self.send_response(500, 'Malformed URL')
|
||||
|
||||
#==============================================================================
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
39
devscripts/check-porn.py
Normal file
39
devscripts/check-porn.py
Normal file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
|
||||
if we are not 'age_limit' tagging some porn site
|
||||
"""
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import get_testcases
|
||||
from youtube_dl.utils import compat_urllib_request
|
||||
|
||||
for test in get_testcases():
|
||||
try:
|
||||
webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
|
||||
except:
|
||||
print('\nFail: {0}'.format(test['name']))
|
||||
continue
|
||||
|
||||
webpage = webpage.decode('utf8', 'replace')
|
||||
|
||||
if 'porn' in webpage.lower() and ('info_dict' not in test
|
||||
or 'age_limit' not in test['info_dict']
|
||||
or test['info_dict']['age_limit'] != 18):
|
||||
print('\nPotential missing age_limit check: {0}'.format(test['name']))
|
||||
|
||||
elif 'porn' not in webpage.lower() and ('info_dict' in test and
|
||||
'age_limit' in test['info_dict'] and
|
||||
test['info_dict']['age_limit'] == 18):
|
||||
print('\nPotential false negative: {0}'.format(test['name']))
|
||||
|
||||
else:
|
||||
sys.stdout.write('.')
|
||||
sys.stdout.flush()
|
||||
|
||||
print()
|
||||
@@ -3,31 +3,40 @@
|
||||
import json
|
||||
import sys
|
||||
import hashlib
|
||||
import urllib.request
|
||||
import os.path
|
||||
|
||||
|
||||
if len(sys.argv) <= 1:
|
||||
print('Specify the version number as parameter')
|
||||
sys.exit()
|
||||
print('Specify the version number as parameter')
|
||||
sys.exit()
|
||||
version = sys.argv[1]
|
||||
|
||||
with open('update/LATEST_VERSION', 'w') as f:
|
||||
f.write(version)
|
||||
f.write(version)
|
||||
|
||||
versions_info = json.load(open('update/versions.json'))
|
||||
if 'signature' in versions_info:
|
||||
del versions_info['signature']
|
||||
del versions_info['signature']
|
||||
|
||||
new_version = {}
|
||||
|
||||
filenames = {'bin': 'youtube-dl', 'exe': 'youtube-dl.exe', 'tar': 'youtube-dl-%s.tar.gz' % version}
|
||||
filenames = {
|
||||
'bin': 'youtube-dl',
|
||||
'exe': 'youtube-dl.exe',
|
||||
'tar': 'youtube-dl-%s.tar.gz' % version}
|
||||
build_dir = os.path.join('..', '..', 'build', version)
|
||||
for key, filename in filenames.items():
|
||||
print('Downloading and checksumming %s...' %filename)
|
||||
url = 'http://youtube-dl.org/downloads/%s/%s' % (version, filename)
|
||||
data = urllib.request.urlopen(url).read()
|
||||
sha256sum = hashlib.sha256(data).hexdigest()
|
||||
new_version[key] = (url, sha256sum)
|
||||
url = 'https://yt-dl.org/downloads/%s/%s' % (version, filename)
|
||||
fn = os.path.join(build_dir, filename)
|
||||
with open(fn, 'rb') as f:
|
||||
data = f.read()
|
||||
if not data:
|
||||
raise ValueError('File %s is empty!' % fn)
|
||||
sha256sum = hashlib.sha256(data).hexdigest()
|
||||
new_version[key] = (url, sha256sum)
|
||||
|
||||
versions_info['versions'][version] = new_version
|
||||
versions_info['latest'] = version
|
||||
|
||||
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
|
||||
with open('update/versions.json', 'w') as jsonf:
|
||||
json.dump(versions_info, jsonf, indent=4, sort_keys=True)
|
||||
|
||||
@@ -22,7 +22,7 @@ entry_template=textwrap.dedent("""
|
||||
<atom:link href="http://rg3.github.io/youtube-dl" />
|
||||
<atom:content type="xhtml">
|
||||
<div xmlns="http://www.w3.org/1999/xhtml">
|
||||
Downloads available at <a href="http://youtube-dl.org/downloads/@VERSION@/">http://youtube-dl.org/downloads/@VERSION@/</a>
|
||||
Downloads available at <a href="https://yt-dl.org/downloads/@VERSION@/">https://yt-dl.org/downloads/@VERSION@/</a>
|
||||
</div>
|
||||
</atom:content>
|
||||
<atom:author>
|
||||
@@ -54,4 +54,3 @@ atom_template = atom_template.replace('@ENTRIES@', entries_str)
|
||||
with open('update/releases.atom','w',encoding='utf-8') as atom_file:
|
||||
atom_file.write(atom_template)
|
||||
|
||||
|
||||
|
||||
34
devscripts/gh-pages/update-sites.py
Executable file
34
devscripts/gh-pages/update-sites.py
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
import os
|
||||
import textwrap
|
||||
|
||||
# We must be able to import youtube_dl
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
||||
|
||||
import youtube_dl
|
||||
|
||||
def main():
|
||||
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
|
||||
template = tmplf.read()
|
||||
|
||||
ie_htmls = []
|
||||
for ie in sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower()):
|
||||
ie_html = '<b>{}</b>'.format(ie.IE_NAME)
|
||||
ie_desc = getattr(ie, 'IE_DESC', None)
|
||||
if ie_desc is False:
|
||||
continue
|
||||
elif ie_desc is not None:
|
||||
ie_html += ': {}'.format(ie.IE_DESC)
|
||||
if ie.working() == False:
|
||||
ie_html += ' (Currently broken)'
|
||||
ie_htmls.append('<li>{}</li>'.format(ie_html))
|
||||
|
||||
template = template.replace('@SITES@', textwrap.indent('\n'.join(ie_htmls), '\t'))
|
||||
|
||||
with open('supportedsites.html', 'w', encoding='utf-8') as sitesf:
|
||||
sitesf.write(template)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -14,6 +14,12 @@
|
||||
|
||||
set -e
|
||||
|
||||
skip_tests=false
|
||||
if [ "$1" = '--skip-test' ]; then
|
||||
skip_tests=true
|
||||
shift
|
||||
fi
|
||||
|
||||
if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi
|
||||
version="$1"
|
||||
if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
|
||||
@@ -22,7 +28,11 @@ if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit
|
||||
|
||||
/bin/echo -e "\n### First of all, testing..."
|
||||
make cleanall
|
||||
nosetests --with-coverage --cover-package=youtube_dl --cover-html test --stop || exit 1
|
||||
if $skip_tests ; then
|
||||
echo 'SKIPPING TESTS'
|
||||
else
|
||||
nosetests --verbose --with-coverage --cover-package=youtube_dl --cover-html test --stop || exit 1
|
||||
fi
|
||||
|
||||
/bin/echo -e "\n### Changing version in version.py..."
|
||||
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
|
||||
@@ -45,8 +55,8 @@ git push origin "$version"
|
||||
/bin/echo -e "\n### OK, now it is time to build the binaries..."
|
||||
REV=$(git rev-parse HEAD)
|
||||
make youtube-dl youtube-dl.tar.gz
|
||||
wget "http://jeromelaheurte.net:8142/download/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe || \
|
||||
wget "http://jeromelaheurte.net:8142/build/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe
|
||||
read -p "VM running? (y/n) " -n 1
|
||||
wget "http://localhost:8142/build/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe
|
||||
mkdir -p "build/$version"
|
||||
mv youtube-dl youtube-dl.exe "build/$version"
|
||||
mv youtube-dl.tar.gz "build/$version/youtube-dl-$version.tar.gz"
|
||||
@@ -57,9 +67,11 @@ RELEASE_FILES="youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz"
|
||||
(cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
|
||||
git checkout HEAD -- youtube-dl youtube-dl.exe
|
||||
|
||||
/bin/echo -e "\n### Signing and uploading the new binaries to youtube-dl.org..."
|
||||
/bin/echo -e "\n### Signing and uploading the new binaries to yt-dl.org ..."
|
||||
for f in $RELEASE_FILES; do gpg --detach-sig "build/$version/$f"; done
|
||||
scp -r "build/$version" ytdl@youtube-dl.org:html/downloads/
|
||||
scp -r "build/$version" ytdl@yt-dl.org:html/tmp/
|
||||
ssh ytdl@yt-dl.org "mv html/tmp/$version html/downloads/"
|
||||
ssh ytdl@yt-dl.org "sh html/update_latest.sh $version"
|
||||
|
||||
/bin/echo -e "\n### Now switching to gh-pages..."
|
||||
git clone --branch gh-pages --single-branch . build/gh-pages
|
||||
@@ -73,12 +85,9 @@ ROOT=$(pwd)
|
||||
"$ROOT/devscripts/gh-pages/sign-versions.py" < "$ROOT/updates_key.pem"
|
||||
"$ROOT/devscripts/gh-pages/generate-download.py"
|
||||
"$ROOT/devscripts/gh-pages/update-copyright.py"
|
||||
"$ROOT/devscripts/gh-pages/update-sites.py"
|
||||
git add *.html *.html.in update
|
||||
git commit -m "release $version"
|
||||
git show HEAD
|
||||
read -p "Is it good, can I push? (y/n) " -n 1
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
|
||||
echo
|
||||
git push "$ROOT" gh-pages
|
||||
git push "$ORIGIN_URL" gh-pages
|
||||
)
|
||||
|
||||
54
setup.py
54
setup.py
@@ -2,17 +2,21 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import pkg_resources
|
||||
import sys
|
||||
|
||||
try:
|
||||
from setuptools import setup
|
||||
setuptools_available = True
|
||||
except ImportError:
|
||||
from distutils.core import setup
|
||||
setuptools_available = False
|
||||
|
||||
try:
|
||||
# This will create an exe that needs Microsoft Visual C++ 2008
|
||||
# Redistributable Package
|
||||
import py2exe
|
||||
"""This will create an exe that needs Microsoft Visual C++ 2008 Redistributable Package"""
|
||||
except ImportError:
|
||||
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
|
||||
print("Cannot import py2exe", file=sys.stderr)
|
||||
@@ -23,15 +27,17 @@ py2exe_options = {
|
||||
"compressed": 1,
|
||||
"optimize": 2,
|
||||
"dist_dir": '.',
|
||||
"dll_excludes": ['w9xpopen.exe']
|
||||
"dll_excludes": ['w9xpopen.exe'],
|
||||
}
|
||||
|
||||
py2exe_console = [{
|
||||
"script": "./youtube_dl/__main__.py",
|
||||
"dest_base": "youtube-dl",
|
||||
}]
|
||||
|
||||
py2exe_params = {
|
||||
'console': py2exe_console,
|
||||
'options': { "py2exe": py2exe_options },
|
||||
'options': {"py2exe": py2exe_options},
|
||||
'zipfile': None
|
||||
}
|
||||
|
||||
@@ -39,31 +45,39 @@ if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
|
||||
params = py2exe_params
|
||||
else:
|
||||
params = {
|
||||
'scripts': ['bin/youtube-dl'],
|
||||
'data_files': [('etc/bash_completion.d', ['youtube-dl.bash-completion']), # Installing system-wide would require sudo...
|
||||
('share/doc/youtube_dl', ['README.txt']),
|
||||
('share/man/man1/', ['youtube-dl.1'])]
|
||||
'data_files': [ # Installing system-wide would require sudo...
|
||||
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
|
||||
('share/doc/youtube_dl', ['README.txt']),
|
||||
('share/man/man1', ['youtube-dl.1'])
|
||||
]
|
||||
}
|
||||
if setuptools_available:
|
||||
params['entry_points'] = {'console_scripts': ['youtube-dl = youtube_dl:main']}
|
||||
else:
|
||||
params['scripts'] = ['bin/youtube-dl']
|
||||
|
||||
# Get the version from youtube_dl/version.py without importing the package
|
||||
exec(compile(open('youtube_dl/version.py').read(), 'youtube_dl/version.py', 'exec'))
|
||||
exec(compile(open('youtube_dl/version.py').read(),
|
||||
'youtube_dl/version.py', 'exec'))
|
||||
|
||||
setup(
|
||||
name = 'youtube_dl',
|
||||
version = __version__,
|
||||
description = 'YouTube video downloader',
|
||||
long_description = 'Small command-line program to download videos from YouTube.com and other video sites.',
|
||||
url = 'https://github.com/rg3/youtube-dl',
|
||||
author = 'Ricardo Garcia',
|
||||
maintainer = 'Philipp Hagemeister',
|
||||
maintainer_email = 'phihag@phihag.de',
|
||||
packages = ['youtube_dl'],
|
||||
name='youtube_dl',
|
||||
version=__version__,
|
||||
description='YouTube video downloader',
|
||||
long_description='Small command-line program to download videos from'
|
||||
' YouTube.com and other video sites.',
|
||||
url='https://github.com/rg3/youtube-dl',
|
||||
author='Ricardo Garcia',
|
||||
author_email='ytdl@yt-dl.org',
|
||||
maintainer='Philipp Hagemeister',
|
||||
maintainer_email='phihag@phihag.de',
|
||||
packages=['youtube_dl', 'youtube_dl.extractor'],
|
||||
|
||||
# Provokes warning on most systems (why?!)
|
||||
#test_suite = 'nose.collector',
|
||||
#test_requires = ['nosetest'],
|
||||
# test_suite = 'nose.collector',
|
||||
# test_requires = ['nosetest'],
|
||||
|
||||
classifiers = [
|
||||
classifiers=[
|
||||
"Topic :: Multimedia :: Video",
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Environment :: Console",
|
||||
|
||||
0
test/__init__.py
Normal file
0
test/__init__.py
Normal file
85
test/helper.py
Normal file
85
test/helper.py
Normal file
@@ -0,0 +1,85 @@
|
||||
import errno
|
||||
import io
|
||||
import hashlib
|
||||
import json
|
||||
import os.path
|
||||
import re
|
||||
import types
|
||||
import sys
|
||||
|
||||
import youtube_dl.extractor
|
||||
from youtube_dl import YoutubeDL
|
||||
from youtube_dl.utils import preferredencoding
|
||||
|
||||
|
||||
def get_params(override=None):
|
||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||
"parameters.json")
|
||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
parameters = json.load(pf)
|
||||
if override:
|
||||
parameters.update(override)
|
||||
return parameters
|
||||
|
||||
|
||||
def try_rm(filename):
|
||||
""" Remove a file if it exists """
|
||||
try:
|
||||
os.remove(filename)
|
||||
except OSError as ose:
|
||||
if ose.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
|
||||
def report_warning(message):
|
||||
'''
|
||||
Print the message to stderr, it will be prefixed with 'WARNING:'
|
||||
If stderr is a tty file the 'WARNING:' will be colored
|
||||
'''
|
||||
if sys.stderr.isatty() and os.name != 'nt':
|
||||
_msg_header = u'\033[0;33mWARNING:\033[0m'
|
||||
else:
|
||||
_msg_header = u'WARNING:'
|
||||
output = u'%s %s\n' % (_msg_header, message)
|
||||
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
|
||||
output = output.encode(preferredencoding())
|
||||
sys.stderr.write(output)
|
||||
|
||||
|
||||
class FakeYDL(YoutubeDL):
|
||||
def __init__(self, override=None):
|
||||
# Different instances of the downloader can't share the same dictionary
|
||||
# some test set the "sublang" parameter, which would break the md5 checks.
|
||||
params = get_params(override=override)
|
||||
super(FakeYDL, self).__init__(params)
|
||||
self.result = []
|
||||
|
||||
def to_screen(self, s, skip_eol=None):
|
||||
print(s)
|
||||
|
||||
def trouble(self, s, tb=None):
|
||||
raise Exception(s)
|
||||
|
||||
def download(self, x):
|
||||
self.result.append(x)
|
||||
|
||||
def expect_warning(self, regex):
|
||||
# Silence an expected warning matching a regex
|
||||
old_report_warning = self.report_warning
|
||||
def report_warning(self, message):
|
||||
if re.match(regex, message): return
|
||||
old_report_warning(message)
|
||||
self.report_warning = types.MethodType(report_warning, self)
|
||||
|
||||
def get_testcases():
|
||||
for ie in youtube_dl.extractor.gen_extractors():
|
||||
t = getattr(ie, '_TEST', None)
|
||||
if t:
|
||||
t['name'] = type(ie).__name__[:-len('IE')]
|
||||
yield t
|
||||
for t in getattr(ie, '_TESTS', []):
|
||||
t['name'] = type(ie).__name__[:-len('IE')]
|
||||
yield t
|
||||
|
||||
|
||||
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
|
||||
@@ -38,7 +38,6 @@
|
||||
"writedescription": false,
|
||||
"writeinfojson": true,
|
||||
"writesubtitles": false,
|
||||
"onlysubtitles": false,
|
||||
"allsubtitles": false,
|
||||
"listssubtitles": false
|
||||
}
|
||||
|
||||
145
test/test_YoutubeDL.py
Normal file
145
test/test_YoutubeDL.py
Normal file
@@ -0,0 +1,145 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import FakeYDL
|
||||
|
||||
|
||||
class YDL(FakeYDL):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(YDL, self).__init__(*args, **kwargs)
|
||||
self.downloaded_info_dicts = []
|
||||
self.msgs = []
|
||||
|
||||
def process_info(self, info_dict):
|
||||
self.downloaded_info_dicts.append(info_dict)
|
||||
|
||||
def to_screen(self, msg):
|
||||
self.msgs.append(msg)
|
||||
|
||||
|
||||
class TestFormatSelection(unittest.TestCase):
|
||||
def test_prefer_free_formats(self):
|
||||
# Same resolution => download webm
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = True
|
||||
formats = [
|
||||
{u'ext': u'webm', u'height': 460},
|
||||
{u'ext': u'mp4', u'height': 460},
|
||||
]
|
||||
info_dict = {u'formats': formats, u'extractor': u'test'}
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded[u'ext'], u'webm')
|
||||
|
||||
# Different resolution => download best quality (mp4)
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = True
|
||||
formats = [
|
||||
{u'ext': u'webm', u'height': 720},
|
||||
{u'ext': u'mp4', u'height': 1080},
|
||||
]
|
||||
info_dict[u'formats'] = formats
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded[u'ext'], u'mp4')
|
||||
|
||||
# No prefer_free_formats => keep original formats order
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = False
|
||||
formats = [
|
||||
{u'ext': u'webm', u'height': 720},
|
||||
{u'ext': u'flv', u'height': 720},
|
||||
]
|
||||
info_dict[u'formats'] = formats
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded[u'ext'], u'flv')
|
||||
|
||||
def test_format_limit(self):
|
||||
formats = [
|
||||
{u'format_id': u'meh', u'url': u'http://example.com/meh'},
|
||||
{u'format_id': u'good', u'url': u'http://example.com/good'},
|
||||
{u'format_id': u'great', u'url': u'http://example.com/great'},
|
||||
{u'format_id': u'excellent', u'url': u'http://example.com/exc'},
|
||||
]
|
||||
info_dict = {
|
||||
u'formats': formats, u'extractor': u'test', 'id': 'testvid'}
|
||||
|
||||
ydl = YDL()
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded[u'format_id'], u'excellent')
|
||||
|
||||
ydl = YDL({'format_limit': 'good'})
|
||||
assert ydl.params['format_limit'] == 'good'
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded[u'format_id'], u'good')
|
||||
|
||||
ydl = YDL({'format_limit': 'great', 'format': 'all'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
self.assertEqual(ydl.downloaded_info_dicts[0][u'format_id'], u'meh')
|
||||
self.assertEqual(ydl.downloaded_info_dicts[1][u'format_id'], u'good')
|
||||
self.assertEqual(ydl.downloaded_info_dicts[2][u'format_id'], u'great')
|
||||
self.assertTrue('3' in ydl.msgs[0])
|
||||
|
||||
ydl = YDL()
|
||||
ydl.params['format_limit'] = 'excellent'
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded[u'format_id'], u'excellent')
|
||||
|
||||
def test_format_selection(self):
|
||||
formats = [
|
||||
{u'format_id': u'35', u'ext': u'mp4'},
|
||||
{u'format_id': u'45', u'ext': u'webm'},
|
||||
{u'format_id': u'47', u'ext': u'webm'},
|
||||
{u'format_id': u'2', u'ext': u'flv'},
|
||||
]
|
||||
info_dict = {u'formats': formats, u'extractor': u'test'}
|
||||
|
||||
ydl = YDL({'format': u'20/47'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], u'47')
|
||||
|
||||
ydl = YDL({'format': u'20/71/worst'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], u'35')
|
||||
|
||||
ydl = YDL()
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], u'2')
|
||||
|
||||
ydl = YDL({'format': u'webm/mp4'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], u'47')
|
||||
|
||||
ydl = YDL({'format': u'3gp/40/mp4'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], u'35')
|
||||
|
||||
def test_add_extra_info(self):
|
||||
test_dict = {
|
||||
'extractor': 'Foo',
|
||||
}
|
||||
extra_info = {
|
||||
'extractor': 'Bar',
|
||||
'playlist': 'funny videos',
|
||||
}
|
||||
YDL.add_extra_info(test_dict, extra_info)
|
||||
self.assertEqual(test_dict['extractor'], 'Foo')
|
||||
self.assertEqual(test_dict['playlist'], 'funny videos')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
54
test/test_age_restriction.py
Normal file
54
test/test_age_restriction.py
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import try_rm
|
||||
|
||||
|
||||
from youtube_dl import YoutubeDL
|
||||
|
||||
|
||||
def _download_restricted(url, filename, age):
|
||||
""" Returns true iff the file has been downloaded """
|
||||
|
||||
params = {
|
||||
'age_limit': age,
|
||||
'skip_download': True,
|
||||
'writeinfojson': True,
|
||||
"outtmpl": "%(id)s.%(ext)s",
|
||||
}
|
||||
ydl = YoutubeDL(params)
|
||||
ydl.add_default_info_extractors()
|
||||
json_filename = os.path.splitext(filename)[0] + '.info.json'
|
||||
try_rm(json_filename)
|
||||
ydl.download([url])
|
||||
res = os.path.exists(json_filename)
|
||||
try_rm(json_filename)
|
||||
return res
|
||||
|
||||
|
||||
class TestAgeRestriction(unittest.TestCase):
|
||||
def _assert_restricted(self, url, filename, age, old_age=None):
|
||||
self.assertTrue(_download_restricted(url, filename, old_age))
|
||||
self.assertFalse(_download_restricted(url, filename, age))
|
||||
|
||||
def test_youtube(self):
|
||||
self._assert_restricted('07FYdnEawAQ', '07FYdnEawAQ.mp4', 10)
|
||||
|
||||
def test_youporn(self):
|
||||
self._assert_restricted(
|
||||
'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
|
||||
'505835.mp4', 2, old_age=25)
|
||||
|
||||
def test_pornotube(self):
|
||||
self._assert_restricted(
|
||||
'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing',
|
||||
'1689755.flv', 13)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -1,38 +1,111 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
from test.helper import get_testcases
|
||||
|
||||
from youtube_dl.extractor import (
|
||||
gen_extractors,
|
||||
JustinTVIE,
|
||||
YoutubeIE,
|
||||
)
|
||||
|
||||
from youtube_dl.InfoExtractors import YoutubeIE, YoutubePlaylistIE, YoutubeChannelIE
|
||||
|
||||
class TestAllURLsMatching(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.ies = gen_extractors()
|
||||
|
||||
def matching_ies(self, url):
|
||||
return [ie.IE_NAME for ie in self.ies if ie.suitable(url) and ie.IE_NAME != 'generic']
|
||||
|
||||
def assertMatch(self, url, ie_list):
|
||||
self.assertEqual(self.matching_ies(url), ie_list)
|
||||
|
||||
def test_youtube_playlist_matching(self):
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'UUBABnxM4Ar9ten8Mdjj1j0Q')) #585
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'PL63F0C78739B09958'))
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q'))
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC'))
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
|
||||
self.assertFalse(YoutubePlaylistIE.suitable(u'PLtS2H6bU1M'))
|
||||
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
|
||||
assertPlaylist(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assertPlaylist(u'UUBABnxM4Ar9ten8Mdjj1j0Q') #585
|
||||
assertPlaylist(u'PL63F0C78739B09958')
|
||||
assertPlaylist(u'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||
assertPlaylist(u'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assertPlaylist(u'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||
assertPlaylist(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
|
||||
self.assertFalse('youtube:playlist' in self.matching_ies(u'PLtS2H6bU1M'))
|
||||
|
||||
def test_youtube_matching(self):
|
||||
self.assertTrue(YoutubeIE.suitable(u'PLtS2H6bU1M'))
|
||||
self.assertFalse(YoutubeIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
|
||||
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
|
||||
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
|
||||
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
|
||||
|
||||
def test_youtube_channel_matching(self):
|
||||
self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM'))
|
||||
self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec'))
|
||||
self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM/videos'))
|
||||
assertChannel = lambda url: self.assertMatch(url, ['youtube:channel'])
|
||||
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM')
|
||||
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec')
|
||||
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
|
||||
|
||||
def test_youtube_user_matching(self):
|
||||
self.assertMatch('www.youtube.com/NASAgovVideo/videos', ['youtube:user'])
|
||||
|
||||
def test_youtube_feeds(self):
|
||||
self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:watch_later'])
|
||||
self.assertMatch('https://www.youtube.com/feed/subscriptions', ['youtube:subscriptions'])
|
||||
self.assertMatch('https://www.youtube.com/feed/recommended', ['youtube:recommended'])
|
||||
self.assertMatch('https://www.youtube.com/my_favorites', ['youtube:favorites'])
|
||||
|
||||
def test_youtube_show_matching(self):
|
||||
self.assertMatch('http://www.youtube.com/show/airdisasters', ['youtube:show'])
|
||||
|
||||
def test_justin_tv_channelid_matching(self):
|
||||
self.assertTrue(JustinTVIE.suitable(u"justin.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"twitch.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"www.justin.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"www.twitch.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.justin.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.justin.tv/vanillatv/"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv/"))
|
||||
|
||||
def test_justintv_videoid_matching(self):
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv/b/328087483"))
|
||||
|
||||
def test_justin_tv_chapterid_matching(self):
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/tsm_theoddone/c/2349361"))
|
||||
|
||||
def test_youtube_extract(self):
|
||||
self.assertEqual(YoutubeIE()._extract_id('http://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc')
|
||||
self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc')
|
||||
self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc'), 'BaW_jenozKc')
|
||||
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE()._extract_id(url), id)
|
||||
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('https://www.youtube.com/watch_popup?v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('http://www.youtube.com/watch?v=BaW_jenozKcsharePLED17F32AD9753930', 'BaW_jenozKc')
|
||||
assertExtractId('BaW_jenozKc', 'BaW_jenozKc')
|
||||
|
||||
def test_no_duplicates(self):
|
||||
ies = gen_extractors()
|
||||
for tc in get_testcases():
|
||||
url = tc['url']
|
||||
for ie in ies:
|
||||
if type(ie).__name__ in ['GenericIE', tc['name'] + 'IE']:
|
||||
self.assertTrue(ie.suitable(url), '%s should match URL %r' % (type(ie).__name__, url))
|
||||
else:
|
||||
self.assertFalse(ie.suitable(url), '%s should not match URL %r' % (type(ie).__name__, url))
|
||||
|
||||
def test_keywords(self):
|
||||
self.assertMatch(':ytsubs', ['youtube:subscriptions'])
|
||||
self.assertMatch(':ytsubscriptions', ['youtube:subscriptions'])
|
||||
self.assertMatch(':ythistory', ['youtube:history'])
|
||||
self.assertMatch(':thedailyshow', ['ComedyCentralShows'])
|
||||
self.assertMatch(':tds', ['ComedyCentralShows'])
|
||||
self.assertMatch(':colbertreport', ['ComedyCentralShows'])
|
||||
self.assertMatch(':cr', ['ComedyCentralShows'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -1,139 +1,175 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import errno
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import (
|
||||
get_params,
|
||||
get_testcases,
|
||||
try_rm,
|
||||
md5,
|
||||
report_warning
|
||||
)
|
||||
|
||||
|
||||
import hashlib
|
||||
import io
|
||||
import os
|
||||
import json
|
||||
import unittest
|
||||
import sys
|
||||
import hashlib
|
||||
import socket
|
||||
|
||||
# Allow direct execution
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import youtube_dl.FileDownloader
|
||||
import youtube_dl.InfoExtractors
|
||||
from youtube_dl.utils import *
|
||||
|
||||
DEF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests.json')
|
||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
|
||||
import youtube_dl.YoutubeDL
|
||||
from youtube_dl.utils import (
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_HTTPError,
|
||||
DownloadError,
|
||||
ExtractorError,
|
||||
UnavailableVideoError,
|
||||
)
|
||||
from youtube_dl.extractor import get_info_extractor
|
||||
|
||||
RETRIES = 3
|
||||
|
||||
# General configuration (from __init__, not very elegant...)
|
||||
jar = compat_cookiejar.CookieJar()
|
||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
|
||||
proxy_handler = compat_urllib_request.ProxyHandler()
|
||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
|
||||
compat_urllib_request.install_opener(opener)
|
||||
socket.setdefaulttimeout(10)
|
||||
|
||||
def _try_rm(filename):
|
||||
""" Remove a file if it exists """
|
||||
try:
|
||||
os.remove(filename)
|
||||
except OSError as ose:
|
||||
if ose.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
class FileDownloader(youtube_dl.FileDownloader):
|
||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.to_stderr = self.to_screen
|
||||
self.processed_info_dicts = []
|
||||
return youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
|
||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||
def report_warning(self, message):
|
||||
# Don't accept warnings during tests
|
||||
raise ExtractorError(message)
|
||||
def process_info(self, info_dict):
|
||||
self.processed_info_dicts.append(info_dict)
|
||||
return youtube_dl.FileDownloader.process_info(self, info_dict)
|
||||
return super(YoutubeDL, self).process_info(info_dict)
|
||||
|
||||
def _file_md5(fn):
|
||||
with open(fn, 'rb') as f:
|
||||
return hashlib.md5(f.read()).hexdigest()
|
||||
|
||||
with io.open(DEF_FILE, encoding='utf-8') as deff:
|
||||
defs = json.load(deff)
|
||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
parameters = json.load(pf)
|
||||
defs = get_testcases()
|
||||
|
||||
|
||||
class TestDownload(unittest.TestCase):
|
||||
maxDiff = None
|
||||
def setUp(self):
|
||||
self.parameters = parameters
|
||||
self.defs = defs
|
||||
|
||||
### Dynamically generate tests
|
||||
def generator(test_case):
|
||||
|
||||
def test_template(self):
|
||||
ie = youtube_dl.InfoExtractors.get_info_extractor(test_case['name'])
|
||||
if not ie._WORKING:
|
||||
print('Skipping: IE marked as not _WORKING')
|
||||
return
|
||||
if 'playlist' not in test_case and not test_case['file']:
|
||||
print('Skipping: No output file specified')
|
||||
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
|
||||
other_ies = [get_info_extractor(ie_key) for ie_key in test_case.get('add_ie', [])]
|
||||
def print_skipping(reason):
|
||||
print('Skipping %s: %s' % (test_case['name'], reason))
|
||||
if not ie.working():
|
||||
print_skipping('IE marked as not _WORKING')
|
||||
return
|
||||
if 'playlist' not in test_case:
|
||||
info_dict = test_case.get('info_dict', {})
|
||||
if not test_case.get('file') and not (info_dict.get('id') and info_dict.get('ext')):
|
||||
print_skipping('The output file cannot be know, the "file" '
|
||||
'key is missing or the info_dict is incomplete')
|
||||
return
|
||||
if 'skip' in test_case:
|
||||
print('Skipping: {0}'.format(test_case['skip']))
|
||||
print_skipping(test_case['skip'])
|
||||
return
|
||||
for other_ie in other_ies:
|
||||
if not other_ie.working():
|
||||
print_skipping(u'test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
|
||||
return
|
||||
|
||||
params = self.parameters.copy()
|
||||
params.update(test_case.get('params', {}))
|
||||
params = get_params(test_case.get('params', {}))
|
||||
|
||||
fd = FileDownloader(params)
|
||||
for ie in youtube_dl.InfoExtractors.gen_extractors():
|
||||
fd.add_info_extractor(ie)
|
||||
ydl = YoutubeDL(params)
|
||||
ydl.add_default_info_extractors()
|
||||
finished_hook_called = set()
|
||||
def _hook(status):
|
||||
if status['status'] == 'finished':
|
||||
finished_hook_called.add(status['filename'])
|
||||
fd.add_progress_hook(_hook)
|
||||
ydl.fd.add_progress_hook(_hook)
|
||||
|
||||
def get_tc_filename(tc):
|
||||
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
|
||||
|
||||
test_cases = test_case.get('playlist', [test_case])
|
||||
for tc in test_cases:
|
||||
_try_rm(tc['file'])
|
||||
_try_rm(tc['file'] + '.part')
|
||||
_try_rm(tc['file'] + '.info.json')
|
||||
def try_rm_tcs_files():
|
||||
for tc in test_cases:
|
||||
tc_filename = get_tc_filename(tc)
|
||||
try_rm(tc_filename)
|
||||
try_rm(tc_filename + '.part')
|
||||
try_rm(os.path.splitext(tc_filename)[0] + '.info.json')
|
||||
try_rm_tcs_files()
|
||||
try:
|
||||
for retry in range(1, RETRIES + 1):
|
||||
try_num = 1
|
||||
while True:
|
||||
try:
|
||||
fd.download([test_case['url']])
|
||||
ydl.download([test_case['url']])
|
||||
except (DownloadError, ExtractorError) as err:
|
||||
if retry == RETRIES: raise
|
||||
|
||||
# Check if the exception is not a network related one
|
||||
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
|
||||
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
|
||||
raise
|
||||
|
||||
print('Retrying: {0} failed tries\n\n##########\n\n'.format(retry))
|
||||
if try_num == RETRIES:
|
||||
report_warning(u'Failed due to network errors, skipping...')
|
||||
return
|
||||
|
||||
print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num))
|
||||
|
||||
try_num += 1
|
||||
else:
|
||||
break
|
||||
|
||||
for tc in test_cases:
|
||||
tc_filename = get_tc_filename(tc)
|
||||
if not test_case.get('params', {}).get('skip_download', False):
|
||||
self.assertTrue(os.path.exists(tc['file']), msg='Missing file ' + tc['file'])
|
||||
self.assertTrue(tc['file'] in finished_hook_called)
|
||||
self.assertTrue(os.path.exists(tc['file'] + '.info.json'))
|
||||
self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
|
||||
self.assertTrue(tc_filename in finished_hook_called)
|
||||
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
|
||||
self.assertTrue(os.path.exists(info_json_fn))
|
||||
if 'md5' in tc:
|
||||
md5_for_file = _file_md5(tc['file'])
|
||||
md5_for_file = _file_md5(tc_filename)
|
||||
self.assertEqual(md5_for_file, tc['md5'])
|
||||
with io.open(tc['file'] + '.info.json', encoding='utf-8') as infof:
|
||||
with io.open(info_json_fn, encoding='utf-8') as infof:
|
||||
info_dict = json.load(infof)
|
||||
for (info_field, value) in tc.get('info_dict', {}).items():
|
||||
self.assertEqual(value, info_dict.get(info_field))
|
||||
for (info_field, expected) in tc.get('info_dict', {}).items():
|
||||
if isinstance(expected, compat_str) and expected.startswith('md5:'):
|
||||
got = 'md5:' + md5(info_dict.get(info_field))
|
||||
else:
|
||||
got = info_dict.get(info_field)
|
||||
self.assertEqual(expected, got,
|
||||
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
||||
|
||||
# If checkable fields are missing from the test case, print the info_dict
|
||||
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
||||
for key, value in info_dict.items()
|
||||
if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location'))
|
||||
if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()):
|
||||
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=2) + u'\n')
|
||||
|
||||
# Check for the presence of mandatory fields
|
||||
for key in ('id', 'url', 'title', 'ext'):
|
||||
self.assertTrue(key in info_dict.keys() and info_dict[key])
|
||||
# Check for mandatory fields that are automatically set by YoutubeDL
|
||||
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
||||
self.assertTrue(info_dict.get(key), u'Missing field: %s' % key)
|
||||
finally:
|
||||
for tc in test_cases:
|
||||
_try_rm(tc['file'])
|
||||
_try_rm(tc['file'] + '.part')
|
||||
_try_rm(tc['file'] + '.info.json')
|
||||
try_rm_tcs_files()
|
||||
|
||||
return test_template
|
||||
|
||||
### And add them to TestDownload
|
||||
for test_case in defs:
|
||||
for n, test_case in enumerate(defs):
|
||||
test_method = generator(test_case)
|
||||
test_method.__name__ = "test_{0}".format(test_case["name"])
|
||||
tname = 'test_' + str(test_case['name'])
|
||||
i = 1
|
||||
while hasattr(TestDownload, tname):
|
||||
tname = 'test_' + str(test_case['name']) + '_' + str(i)
|
||||
i += 1
|
||||
test_method.__name__ = tname
|
||||
setattr(TestDownload, test_method.__name__, test_method)
|
||||
del test_method
|
||||
|
||||
|
||||
115
test/test_playlists.py
Normal file
115
test/test_playlists.py
Normal file
@@ -0,0 +1,115 @@
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import FakeYDL
|
||||
|
||||
|
||||
from youtube_dl.extractor import (
|
||||
DailymotionPlaylistIE,
|
||||
DailymotionUserIE,
|
||||
VimeoChannelIE,
|
||||
UstreamChannelIE,
|
||||
SoundcloudSetIE,
|
||||
SoundcloudUserIE,
|
||||
LivestreamIE,
|
||||
NHLVideocenterIE,
|
||||
BambuserChannelIE,
|
||||
BandcampAlbumIE
|
||||
)
|
||||
|
||||
|
||||
class TestPlaylists(unittest.TestCase):
|
||||
def assertIsPlaylist(self, info):
|
||||
"""Make sure the info has '_type' set to 'playlist'"""
|
||||
self.assertEqual(info['_type'], 'playlist')
|
||||
|
||||
def test_dailymotion_playlist(self):
|
||||
dl = FakeYDL()
|
||||
ie = DailymotionPlaylistIE(dl)
|
||||
result = ie.extract('http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], u'SPORT')
|
||||
self.assertTrue(len(result['entries']) > 20)
|
||||
|
||||
def test_dailymotion_user(self):
|
||||
dl = FakeYDL()
|
||||
ie = DailymotionUserIE(dl)
|
||||
result = ie.extract('http://www.dailymotion.com/user/generation-quoi/')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], u'Génération Quoi')
|
||||
self.assertTrue(len(result['entries']) >= 26)
|
||||
|
||||
def test_vimeo_channel(self):
|
||||
dl = FakeYDL()
|
||||
ie = VimeoChannelIE(dl)
|
||||
result = ie.extract('http://vimeo.com/channels/tributes')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], u'Vimeo Tributes')
|
||||
self.assertTrue(len(result['entries']) > 24)
|
||||
|
||||
def test_ustream_channel(self):
|
||||
dl = FakeYDL()
|
||||
ie = UstreamChannelIE(dl)
|
||||
result = ie.extract('http://www.ustream.tv/channel/young-americans-for-liberty')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], u'5124905')
|
||||
self.assertTrue(len(result['entries']) >= 11)
|
||||
|
||||
def test_soundcloud_set(self):
|
||||
dl = FakeYDL()
|
||||
ie = SoundcloudSetIE(dl)
|
||||
result = ie.extract('https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], u'The Royal Concept EP')
|
||||
self.assertTrue(len(result['entries']) >= 6)
|
||||
|
||||
def test_soundcloud_user(self):
|
||||
dl = FakeYDL()
|
||||
ie = SoundcloudUserIE(dl)
|
||||
result = ie.extract('https://soundcloud.com/the-concept-band')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], u'9615865')
|
||||
self.assertTrue(len(result['entries']) >= 12)
|
||||
|
||||
def test_livestream_event(self):
|
||||
dl = FakeYDL()
|
||||
ie = LivestreamIE(dl)
|
||||
result = ie.extract('http://new.livestream.com/tedx/cityenglish')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], u'TEDCity2.0 (English)')
|
||||
self.assertTrue(len(result['entries']) >= 4)
|
||||
|
||||
def test_nhl_videocenter(self):
|
||||
dl = FakeYDL()
|
||||
ie = NHLVideocenterIE(dl)
|
||||
result = ie.extract('http://video.canucks.nhl.com/videocenter/console?catid=999')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], u'999')
|
||||
self.assertEqual(result['title'], u'Highlights')
|
||||
self.assertEqual(len(result['entries']), 12)
|
||||
|
||||
def test_bambuser_channel(self):
|
||||
dl = FakeYDL()
|
||||
ie = BambuserChannelIE(dl)
|
||||
result = ie.extract('http://bambuser.com/channel/pixelversity')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], u'pixelversity')
|
||||
self.assertTrue(len(result['entries']) >= 60)
|
||||
|
||||
def test_bandcamp_album(self):
|
||||
dl = FakeYDL()
|
||||
ie = BandcampAlbumIE(dl)
|
||||
result = ie.extract('http://mpallante.bandcamp.com/album/nightmare-night-ep')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], u'Nightmare Night EP')
|
||||
self.assertTrue(len(result['entries']) >= 4)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
210
test/test_subtitles.py
Normal file
210
test/test_subtitles.py
Normal file
@@ -0,0 +1,210 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import FakeYDL, md5
|
||||
|
||||
|
||||
from youtube_dl.extractor import (
|
||||
YoutubeIE,
|
||||
DailymotionIE,
|
||||
TEDIE,
|
||||
)
|
||||
|
||||
|
||||
class BaseTestSubtitles(unittest.TestCase):
|
||||
url = None
|
||||
IE = None
|
||||
def setUp(self):
|
||||
self.DL = FakeYDL()
|
||||
self.ie = self.IE(self.DL)
|
||||
|
||||
def getInfoDict(self):
|
||||
info_dict = self.ie.extract(self.url)
|
||||
return info_dict
|
||||
|
||||
def getSubtitles(self):
|
||||
info_dict = self.getInfoDict()
|
||||
return info_dict['subtitles']
|
||||
|
||||
|
||||
class TestYoutubeSubtitles(BaseTestSubtitles):
|
||||
url = 'QRS8MkLhQmM'
|
||||
IE = YoutubeIE
|
||||
|
||||
def getSubtitles(self):
|
||||
info_dict = self.getInfoDict()
|
||||
return info_dict[0]['subtitles']
|
||||
|
||||
def test_youtube_no_writesubtitles(self):
|
||||
self.DL.params['writesubtitles'] = False
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(subtitles, None)
|
||||
|
||||
def test_youtube_subtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['en']), '4cd9278a35ba2305f47354ee13472260')
|
||||
|
||||
def test_youtube_subtitles_lang(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['subtitleslangs'] = ['it']
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['it']), '164a51f16f260476a05b50fe4c2f161d')
|
||||
|
||||
def test_youtube_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(len(subtitles.keys()), 13)
|
||||
|
||||
def test_youtube_subtitles_sbv_format(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['subtitlesformat'] = 'sbv'
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['en']), '13aeaa0c245a8bed9a451cb643e3ad8b')
|
||||
|
||||
def test_youtube_subtitles_vtt_format(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['subtitlesformat'] = 'vtt'
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['en']), '356cdc577fde0c6783b9b822e7206ff7')
|
||||
|
||||
def test_youtube_list_subtitles(self):
|
||||
self.DL.expect_warning(u'Video doesn\'t have automatic captions')
|
||||
self.DL.params['listsubtitles'] = True
|
||||
info_dict = self.getInfoDict()
|
||||
self.assertEqual(info_dict, None)
|
||||
|
||||
def test_youtube_automatic_captions(self):
|
||||
self.url = '8YoUxe5ncPo'
|
||||
self.DL.params['writeautomaticsub'] = True
|
||||
self.DL.params['subtitleslangs'] = ['it']
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertTrue(subtitles['it'] is not None)
|
||||
|
||||
def test_youtube_nosubtitles(self):
|
||||
self.DL.expect_warning(u'video doesn\'t have subtitles')
|
||||
self.url = 'sAjKT8FhjI8'
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(len(subtitles), 0)
|
||||
|
||||
def test_youtube_multiple_langs(self):
|
||||
self.url = 'QRS8MkLhQmM'
|
||||
self.DL.params['writesubtitles'] = True
|
||||
langs = ['it', 'fr', 'de']
|
||||
self.DL.params['subtitleslangs'] = langs
|
||||
subtitles = self.getSubtitles()
|
||||
for lang in langs:
|
||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
||||
|
||||
|
||||
class TestDailymotionSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.dailymotion.com/video/xczg00'
|
||||
IE = DailymotionIE
|
||||
|
||||
def test_no_writesubtitles(self):
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(subtitles, None)
|
||||
|
||||
def test_subtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f')
|
||||
|
||||
def test_subtitles_lang(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['subtitleslangs'] = ['fr']
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792')
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(len(subtitles.keys()), 5)
|
||||
|
||||
def test_list_subtitles(self):
|
||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
||||
self.DL.params['listsubtitles'] = True
|
||||
info_dict = self.getInfoDict()
|
||||
self.assertEqual(info_dict, None)
|
||||
|
||||
def test_automatic_captions(self):
|
||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
||||
self.DL.params['writeautomaticsub'] = True
|
||||
self.DL.params['subtitleslang'] = ['en']
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertTrue(len(subtitles.keys()) == 0)
|
||||
|
||||
def test_nosubtitles(self):
|
||||
self.DL.expect_warning(u'video doesn\'t have subtitles')
|
||||
self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv'
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(len(subtitles), 0)
|
||||
|
||||
def test_multiple_langs(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
langs = ['es', 'fr', 'de']
|
||||
self.DL.params['subtitleslangs'] = langs
|
||||
subtitles = self.getSubtitles()
|
||||
for lang in langs:
|
||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
||||
|
||||
|
||||
class TestTedSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
|
||||
IE = TEDIE
|
||||
|
||||
def test_no_writesubtitles(self):
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(subtitles, None)
|
||||
|
||||
def test_subtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['en']), '2154f31ff9b9f89a0aa671537559c21d')
|
||||
|
||||
def test_subtitles_lang(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['subtitleslangs'] = ['fr']
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['fr']), '7616cbc6df20ec2c1204083c83871cf6')
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(len(subtitles.keys()), 28)
|
||||
|
||||
def test_list_subtitles(self):
|
||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
||||
self.DL.params['listsubtitles'] = True
|
||||
info_dict = self.getInfoDict()
|
||||
self.assertEqual(info_dict, None)
|
||||
|
||||
def test_automatic_captions(self):
|
||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
||||
self.DL.params['writeautomaticsub'] = True
|
||||
self.DL.params['subtitleslang'] = ['en']
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertTrue(len(subtitles.keys()) == 0)
|
||||
|
||||
def test_multiple_langs(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
langs = ['es', 'fr', 'de']
|
||||
self.DL.params['subtitleslangs'] = langs
|
||||
subtitles = self.getSubtitles()
|
||||
for lang in langs:
|
||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -1,21 +1,32 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Various small unit tests
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
# coding: utf-8
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
# Various small unit tests
|
||||
import xml.etree.ElementTree
|
||||
|
||||
#from youtube_dl.utils import htmlentity_transform
|
||||
from youtube_dl.utils import timeconvert
|
||||
from youtube_dl.utils import sanitize_filename
|
||||
from youtube_dl.utils import unescapeHTML
|
||||
from youtube_dl.utils import orderedSet
|
||||
from youtube_dl.utils import DateRange
|
||||
from youtube_dl.utils import unified_strdate
|
||||
from youtube_dl.utils import (
|
||||
timeconvert,
|
||||
sanitize_filename,
|
||||
unescapeHTML,
|
||||
orderedSet,
|
||||
DateRange,
|
||||
unified_strdate,
|
||||
find_xpath_attr,
|
||||
get_meta_content,
|
||||
xpath_with_ns,
|
||||
smuggle_url,
|
||||
unsmuggle_url,
|
||||
shell_quote,
|
||||
encodeFilename,
|
||||
)
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
_compat_str = lambda b: b.decode('unicode-escape')
|
||||
@@ -112,5 +123,59 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
|
||||
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
||||
|
||||
def test_find_xpath_attr(self):
|
||||
testxml = u'''<root>
|
||||
<node/>
|
||||
<node x="a"/>
|
||||
<node x="a" y="c" />
|
||||
<node x="b" y="d" />
|
||||
</root>'''
|
||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
||||
|
||||
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
|
||||
|
||||
def test_meta_parser(self):
|
||||
testhtml = u'''
|
||||
<head>
|
||||
<meta name="description" content="foo & bar">
|
||||
<meta content='Plato' name='author'/>
|
||||
</head>
|
||||
'''
|
||||
get_meta = lambda name: get_meta_content(name, testhtml)
|
||||
self.assertEqual(get_meta('description'), u'foo & bar')
|
||||
self.assertEqual(get_meta('author'), 'Plato')
|
||||
|
||||
def test_xpath_with_ns(self):
|
||||
testxml = u'''<root xmlns:media="http://example.com/">
|
||||
<media:song>
|
||||
<media:author>The Author</media:author>
|
||||
<url>http://server.com/download.mp3</url>
|
||||
</media:song>
|
||||
</root>'''
|
||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
||||
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
|
||||
self.assertTrue(find('media:song') is not None)
|
||||
self.assertEqual(find('media:song/media:author').text, u'The Author')
|
||||
self.assertEqual(find('media:song/url').text, u'http://server.com/download.mp3')
|
||||
|
||||
def test_smuggle_url(self):
|
||||
data = {u"ö": u"ö", u"abc": [3]}
|
||||
url = 'https://foo.bar/baz?x=y#a'
|
||||
smug_url = smuggle_url(url, data)
|
||||
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
|
||||
self.assertEqual(url, unsmug_url)
|
||||
self.assertEqual(data, unsmug_data)
|
||||
|
||||
res_url, res_data = unsmuggle_url(url)
|
||||
self.assertEqual(res_url, url)
|
||||
self.assertEqual(res_data, None)
|
||||
|
||||
def test_shell_quote(self):
|
||||
args = ['ffmpeg', '-i', encodeFilename(u'ñ€ß\'.mp4')]
|
||||
self.assertEqual(shell_quote(args), u"""ffmpeg -i 'ñ€ß'"'"'.mp4'""")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
79
test/test_write_annotations.py
Normal file
79
test/test_write_annotations.py
Normal file
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import get_params, try_rm
|
||||
|
||||
|
||||
import io
|
||||
|
||||
import xml.etree.ElementTree
|
||||
|
||||
import youtube_dl.YoutubeDL
|
||||
import youtube_dl.extractor
|
||||
|
||||
|
||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||
self.to_stderr = self.to_screen
|
||||
|
||||
params = get_params({
|
||||
'writeannotations': True,
|
||||
'skip_download': True,
|
||||
'writeinfojson': False,
|
||||
'format': 'flv',
|
||||
})
|
||||
|
||||
|
||||
|
||||
TEST_ID = 'gr51aVj-mLg'
|
||||
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
|
||||
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
||||
|
||||
class TestAnnotations(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# Clear old files
|
||||
self.tearDown()
|
||||
|
||||
|
||||
def test_info_json(self):
|
||||
expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text.
|
||||
ie = youtube_dl.extractor.YoutubeIE()
|
||||
ydl = YoutubeDL(params)
|
||||
ydl.add_info_extractor(ie)
|
||||
ydl.download([TEST_ID])
|
||||
self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
|
||||
annoxml = None
|
||||
with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
|
||||
annoxml = xml.etree.ElementTree.parse(annof)
|
||||
self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
|
||||
root = annoxml.getroot()
|
||||
self.assertEqual(root.tag, 'document')
|
||||
annotationsTag = root.find('annotations')
|
||||
self.assertEqual(annotationsTag.tag, 'annotations')
|
||||
annotations = annotationsTag.findall('annotation')
|
||||
|
||||
#Not all the annotations have TEXT children and the annotations are returned unsorted.
|
||||
for a in annotations:
|
||||
self.assertEqual(a.tag, 'annotation')
|
||||
if a.get('type') == 'text':
|
||||
textTag = a.find('TEXT')
|
||||
text = textTag.text
|
||||
self.assertTrue(text in expected) #assertIn only added in python 2.7
|
||||
#remove the first occurance, there could be more than one annotation with the same text
|
||||
expected.remove(text)
|
||||
#We should have seen (and removed) all the expected annotation texts.
|
||||
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
try_rm(ANNOTATIONS_FILE)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -1,40 +1,36 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
import json
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
# Allow direct execution
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from test.helper import get_params
|
||||
|
||||
import youtube_dl.FileDownloader
|
||||
import youtube_dl.InfoExtractors
|
||||
from youtube_dl.utils import *
|
||||
|
||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
|
||||
import io
|
||||
import json
|
||||
|
||||
# General configuration (from __init__, not very elegant...)
|
||||
jar = compat_cookiejar.CookieJar()
|
||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
|
||||
proxy_handler = compat_urllib_request.ProxyHandler()
|
||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
|
||||
compat_urllib_request.install_opener(opener)
|
||||
import youtube_dl.YoutubeDL
|
||||
import youtube_dl.extractor
|
||||
|
||||
class FileDownloader(youtube_dl.FileDownloader):
|
||||
|
||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||
def __init__(self, *args, **kwargs):
|
||||
youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
|
||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||
self.to_stderr = self.to_screen
|
||||
|
||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
params = json.load(pf)
|
||||
params['writeinfojson'] = True
|
||||
params['skip_download'] = True
|
||||
params['writedescription'] = True
|
||||
params = get_params({
|
||||
'writeinfojson': True,
|
||||
'skip_download': True,
|
||||
'writedescription': True,
|
||||
})
|
||||
|
||||
|
||||
TEST_ID = 'BaW_jenozKc'
|
||||
INFO_JSON_FILE = TEST_ID + '.mp4.info.json'
|
||||
INFO_JSON_FILE = TEST_ID + '.info.json'
|
||||
DESCRIPTION_FILE = TEST_ID + '.mp4.description'
|
||||
EXPECTED_DESCRIPTION = u'''test chars: "'/\ä↭𝕐
|
||||
|
||||
@@ -42,16 +38,17 @@ This is a test video for youtube-dl.
|
||||
|
||||
For more information, contact phihag@phihag.de .'''
|
||||
|
||||
|
||||
class TestInfoJSON(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# Clear old files
|
||||
self.tearDown()
|
||||
|
||||
def test_info_json(self):
|
||||
ie = youtube_dl.InfoExtractors.YoutubeIE()
|
||||
fd = FileDownloader(params)
|
||||
fd.add_info_extractor(ie)
|
||||
fd.download([TEST_ID])
|
||||
ie = youtube_dl.extractor.YoutubeIE()
|
||||
ydl = YoutubeDL(params)
|
||||
ydl.add_info_extractor(ie)
|
||||
ydl.download([TEST_ID])
|
||||
self.assertTrue(os.path.exists(INFO_JSON_FILE))
|
||||
with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf:
|
||||
jd = json.load(jsonf)
|
||||
|
||||
@@ -1,109 +1,111 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
import json
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from youtube_dl.InfoExtractors import YoutubeUserIE, YoutubePlaylistIE, YoutubeIE, YoutubeChannelIE
|
||||
from youtube_dl.utils import *
|
||||
from youtube_dl.FileDownloader import FileDownloader
|
||||
from test.helper import FakeYDL
|
||||
|
||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
|
||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
parameters = json.load(pf)
|
||||
|
||||
# General configuration (from __init__, not very elegant...)
|
||||
jar = compat_cookiejar.CookieJar()
|
||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
|
||||
proxy_handler = compat_urllib_request.ProxyHandler()
|
||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
|
||||
compat_urllib_request.install_opener(opener)
|
||||
from youtube_dl.extractor import (
|
||||
YoutubeUserIE,
|
||||
YoutubePlaylistIE,
|
||||
YoutubeIE,
|
||||
YoutubeChannelIE,
|
||||
YoutubeShowIE,
|
||||
)
|
||||
|
||||
class FakeDownloader(FileDownloader):
|
||||
def __init__(self):
|
||||
self.result = []
|
||||
self.params = parameters
|
||||
def to_screen(self, s):
|
||||
print(s)
|
||||
def trouble(self, s, tb=None):
|
||||
raise Exception(s)
|
||||
def extract_info(self, url):
|
||||
self.result.append(url)
|
||||
return url
|
||||
|
||||
class TestYoutubeLists(unittest.TestCase):
|
||||
def assertIsPlaylist(self,info):
|
||||
def assertIsPlaylist(self, info):
|
||||
"""Make sure the info has '_type' set to 'playlist'"""
|
||||
self.assertEqual(info['_type'], 'playlist')
|
||||
|
||||
def test_youtube_playlist(self):
|
||||
dl = FakeDownloader()
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')[0]
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], 'ytdl test PL')
|
||||
ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
|
||||
self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
|
||||
|
||||
def test_issue_673(self):
|
||||
dl = FakeDownloader()
|
||||
def test_youtube_playlist_noplaylist(self):
|
||||
dl = FakeYDL()
|
||||
dl.params['noplaylist'] = True
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('PLBB231211A4F62143')[0]
|
||||
self.assertEqual(result['title'], 'Team Fortress 2')
|
||||
self.assertTrue(len(result['entries']) > 40)
|
||||
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||
self.assertEqual(result['_type'], 'url')
|
||||
self.assertEqual(YoutubeIE()._extract_id(result['url']), 'FXxLjLQi3Fg')
|
||||
|
||||
def test_issue_673(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('PLBB231211A4F62143')
|
||||
self.assertTrue(len(result['entries']) > 25)
|
||||
|
||||
def test_youtube_playlist_long(self):
|
||||
dl = FakeDownloader()
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')[0]
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertTrue(len(result['entries']) >= 799)
|
||||
|
||||
def test_youtube_playlist_with_deleted(self):
|
||||
#651
|
||||
dl = FakeDownloader()
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')[0]
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||
ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
|
||||
self.assertFalse('pElCt5oNDuI' in ytie_results)
|
||||
self.assertFalse('KdPEApIVdWM' in ytie_results)
|
||||
|
||||
def test_youtube_playlist_empty(self):
|
||||
dl = FakeDownloader()
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx')[0]
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(len(result['entries']), 0)
|
||||
|
||||
def test_youtube_course(self):
|
||||
dl = FakeDownloader()
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
# TODO find a > 100 (paginating?) videos course
|
||||
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')[0]
|
||||
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
entries = result['entries']
|
||||
self.assertEqual(YoutubeIE()._extract_id(entries[0]['url']), 'j9WZyLZCBzs')
|
||||
self.assertEqual(len(entries), 25)
|
||||
self.assertEqual(YoutubeIE()._extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
||||
|
||||
def test_youtube_channel(self):
|
||||
dl = FakeDownloader()
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeChannelIE(dl)
|
||||
#test paginated channel
|
||||
result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')[0]
|
||||
result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')
|
||||
self.assertTrue(len(result['entries']) > 90)
|
||||
#test autogenerated channel
|
||||
result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')[0]
|
||||
result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
|
||||
self.assertTrue(len(result['entries']) >= 18)
|
||||
|
||||
def test_youtube_user(self):
|
||||
dl = FakeDownloader()
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeUserIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/user/TheLinuxFoundation')[0]
|
||||
result = ie.extract('https://www.youtube.com/user/TheLinuxFoundation')
|
||||
self.assertTrue(len(result['entries']) >= 320)
|
||||
|
||||
def test_youtube_safe_search(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl')
|
||||
self.assertEqual(len(result['entries']), 2)
|
||||
|
||||
def test_youtube_show(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeShowIE(dl)
|
||||
result = ie.extract('http://www.youtube.com/show/airdisasters')
|
||||
self.assertTrue(len(result) >= 3)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
81
test/test_youtube_signature.py
Normal file
81
test/test_youtube_signature.py
Normal file
@@ -0,0 +1,81 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
import io
|
||||
import re
|
||||
import string
|
||||
|
||||
from youtube_dl.extractor import YoutubeIE
|
||||
from youtube_dl.utils import compat_str, compat_urlretrieve
|
||||
|
||||
_TESTS = [
|
||||
(
|
||||
u'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
|
||||
u'js',
|
||||
86,
|
||||
u'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
|
||||
),
|
||||
(
|
||||
u'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
|
||||
u'js',
|
||||
85,
|
||||
u'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
|
||||
),
|
||||
(
|
||||
u'https://s.ytimg.com/yts/swfbin/watch_as3-vflg5GhxU.swf',
|
||||
u'swf',
|
||||
82,
|
||||
u':/.-,+*)=\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBAzyxw>utsrqponmlkjihgfedcba987654321'
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class TestSignature(unittest.TestCase):
|
||||
def setUp(self):
|
||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
|
||||
if not os.path.exists(self.TESTDATA_DIR):
|
||||
os.mkdir(self.TESTDATA_DIR)
|
||||
|
||||
|
||||
def make_tfunc(url, stype, sig_length, expected_sig):
|
||||
basename = url.rpartition('/')[2]
|
||||
m = re.match(r'.*-([a-zA-Z0-9_-]+)\.[a-z]+$', basename)
|
||||
assert m, '%r should follow URL format' % basename
|
||||
test_id = m.group(1)
|
||||
|
||||
def test_func(self):
|
||||
fn = os.path.join(self.TESTDATA_DIR, basename)
|
||||
|
||||
if not os.path.exists(fn):
|
||||
compat_urlretrieve(url, fn)
|
||||
|
||||
ie = YoutubeIE()
|
||||
if stype == 'js':
|
||||
with io.open(fn, encoding='utf-8') as testf:
|
||||
jscode = testf.read()
|
||||
func = ie._parse_sig_js(jscode)
|
||||
else:
|
||||
assert stype == 'swf'
|
||||
with open(fn, 'rb') as testf:
|
||||
swfcode = testf.read()
|
||||
func = ie._parse_sig_swf(swfcode)
|
||||
src_sig = compat_str(string.printable[:sig_length])
|
||||
got_sig = func(src_sig)
|
||||
self.assertEqual(got_sig, expected_sig)
|
||||
|
||||
test_func.__name__ = str('test_signature_' + stype + '_' + test_id)
|
||||
setattr(TestSignature, test_func.__name__, test_func)
|
||||
|
||||
for test_spec in _TESTS:
|
||||
make_tfunc(*test_spec)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -1,100 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
import json
|
||||
import io
|
||||
import hashlib
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from youtube_dl.InfoExtractors import YoutubeIE
|
||||
from youtube_dl.utils import *
|
||||
|
||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
|
||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
parameters = json.load(pf)
|
||||
|
||||
# General configuration (from __init__, not very elegant...)
|
||||
jar = compat_cookiejar.CookieJar()
|
||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
|
||||
proxy_handler = compat_urllib_request.ProxyHandler()
|
||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
|
||||
compat_urllib_request.install_opener(opener)
|
||||
|
||||
class FakeDownloader(object):
|
||||
def __init__(self):
|
||||
self.result = []
|
||||
self.params = parameters
|
||||
def to_screen(self, s):
|
||||
print(s)
|
||||
def trouble(self, s, tb=None):
|
||||
raise Exception(s)
|
||||
def download(self, x):
|
||||
self.result.append(x)
|
||||
|
||||
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
|
||||
|
||||
class TestYoutubeSubtitles(unittest.TestCase):
|
||||
def setUp(self):
|
||||
DL = FakeDownloader()
|
||||
DL.params['allsubtitles'] = False
|
||||
DL.params['writesubtitles'] = False
|
||||
DL.params['subtitlesformat'] = 'srt'
|
||||
DL.params['listsubtitles'] = False
|
||||
def test_youtube_no_subtitles(self):
|
||||
DL = FakeDownloader()
|
||||
DL.params['writesubtitles'] = False
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
subtitles = info_dict[0]['subtitles']
|
||||
self.assertEqual(subtitles, None)
|
||||
def test_youtube_subtitles(self):
|
||||
DL = FakeDownloader()
|
||||
DL.params['writesubtitles'] = True
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
sub = info_dict[0]['subtitles'][0]
|
||||
self.assertEqual(md5(sub[2]), '4cd9278a35ba2305f47354ee13472260')
|
||||
def test_youtube_subtitles_it(self):
|
||||
DL = FakeDownloader()
|
||||
DL.params['writesubtitles'] = True
|
||||
DL.params['subtitleslang'] = 'it'
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
sub = info_dict[0]['subtitles'][0]
|
||||
self.assertEqual(md5(sub[2]), '164a51f16f260476a05b50fe4c2f161d')
|
||||
def test_youtube_onlysubtitles(self):
|
||||
DL = FakeDownloader()
|
||||
DL.params['writesubtitles'] = True
|
||||
DL.params['onlysubtitles'] = True
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
sub = info_dict[0]['subtitles'][0]
|
||||
self.assertEqual(md5(sub[2]), '4cd9278a35ba2305f47354ee13472260')
|
||||
def test_youtube_allsubtitles(self):
|
||||
DL = FakeDownloader()
|
||||
DL.params['allsubtitles'] = True
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
subtitles = info_dict[0]['subtitles']
|
||||
self.assertEqual(len(subtitles), 13)
|
||||
def test_youtube_subtitles_format(self):
|
||||
DL = FakeDownloader()
|
||||
DL.params['writesubtitles'] = True
|
||||
DL.params['subtitlesformat'] = 'sbv'
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
sub = info_dict[0]['subtitles'][0]
|
||||
self.assertEqual(md5(sub[2]), '13aeaa0c245a8bed9a451cb643e3ad8b')
|
||||
def test_youtube_list_subtitles(self):
|
||||
DL = FakeDownloader()
|
||||
DL.params['listsubtitles'] = True
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
self.assertEqual(info_dict, None)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
416
test/tests.json
416
test/tests.json
@@ -1,416 +0,0 @@
|
||||
[
|
||||
{
|
||||
"name": "Youtube",
|
||||
"url": "http://www.youtube.com/watch?v=BaW_jenozKc",
|
||||
"file": "BaW_jenozKc.mp4",
|
||||
"info_dict": {
|
||||
"title": "youtube-dl test video \"'/\\ä↭𝕐",
|
||||
"uploader": "Philipp Hagemeister",
|
||||
"uploader_id": "phihag",
|
||||
"upload_date": "20121002",
|
||||
"description": "test chars: \"'/\\ä↭𝕐\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ."
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Dailymotion",
|
||||
"md5": "392c4b85a60a90dc4792da41ce3144eb",
|
||||
"url": "http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech",
|
||||
"file": "x33vw9.mp4"
|
||||
},
|
||||
{
|
||||
"name": "Metacafe",
|
||||
"add_ie": ["Youtube"],
|
||||
"url": "http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/",
|
||||
"file": "_aUehQsCQtM.flv"
|
||||
},
|
||||
{
|
||||
"name": "BlipTV",
|
||||
"md5": "b2d849efcf7ee18917e4b4d9ff37cafe",
|
||||
"url": "http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352",
|
||||
"file": "5779306.m4v"
|
||||
},
|
||||
{
|
||||
"name": "XVideos",
|
||||
"md5": "1d0c835822f0a71a7bf011855db929d0",
|
||||
"url": "http://www.xvideos.com/video939581/funny_porns_by_s_-1",
|
||||
"file": "939581.flv"
|
||||
},
|
||||
{
|
||||
"name": "YouPorn",
|
||||
"md5": "c37ddbaaa39058c76a7e86c6813423c1",
|
||||
"url": "http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/",
|
||||
"file": "505835.mp4"
|
||||
},
|
||||
{
|
||||
"name": "Pornotube",
|
||||
"md5": "374dd6dcedd24234453b295209aa69b6",
|
||||
"url": "http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing",
|
||||
"file": "1689755.flv"
|
||||
},
|
||||
{
|
||||
"name": "YouJizz",
|
||||
"md5": "07e15fa469ba384c7693fd246905547c",
|
||||
"url": "http://www.youjizz.com/videos/zeichentrick-1-2189178.html",
|
||||
"file": "2189178.flv"
|
||||
},
|
||||
{
|
||||
"name": "Vimeo",
|
||||
"md5": "8879b6cc097e987f02484baf890129e5",
|
||||
"url": "http://vimeo.com/56015672",
|
||||
"file": "56015672.mp4",
|
||||
"info_dict": {
|
||||
"title": "youtube-dl test video - ★ \" ' 幸 / \\ ä ↭ 𝕐",
|
||||
"uploader": "Filippo Valsorda",
|
||||
"uploader_id": "user7108434",
|
||||
"upload_date": "20121220",
|
||||
"description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: ★ \" ' 幸 / \\ ä ↭ 𝕐"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Soundcloud",
|
||||
"md5": "ebef0a451b909710ed1d7787dddbf0d7",
|
||||
"url": "http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy",
|
||||
"file": "62986583.mp3"
|
||||
},
|
||||
{
|
||||
"name": "StanfordOpenClassroom",
|
||||
"md5": "544a9468546059d4e80d76265b0443b8",
|
||||
"url": "http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100",
|
||||
"file": "PracticalUnix_intro-environment.mp4"
|
||||
},
|
||||
{
|
||||
"name": "XNXX",
|
||||
"md5": "0831677e2b4761795f68d417e0b7b445",
|
||||
"url": "http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_",
|
||||
"file": "1135332.flv"
|
||||
},
|
||||
{
|
||||
"name": "Youku",
|
||||
"url": "http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html",
|
||||
"file": "XNDgyMDQ2NTQw_part00.flv",
|
||||
"md5": "ffe3f2e435663dc2d1eea34faeff5b5b",
|
||||
"params": { "test": false }
|
||||
},
|
||||
{
|
||||
"name": "NBA",
|
||||
"url": "http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html",
|
||||
"file": "0021200253-okc-bkn-recap.nba.mp4",
|
||||
"md5": "c0edcfc37607344e2ff8f13c378c88a4"
|
||||
},
|
||||
{
|
||||
"name": "JustinTV",
|
||||
"url": "http://www.twitch.tv/thegamedevhub/b/296128360",
|
||||
"file": "296128360.flv",
|
||||
"md5": "ecaa8a790c22a40770901460af191c9a"
|
||||
},
|
||||
{
|
||||
"name": "MyVideo",
|
||||
"url": "http://www.myvideo.de/watch/8229274/bowling_fail_or_win",
|
||||
"file": "8229274.flv",
|
||||
"md5": "2d2753e8130479ba2cb7e0a37002053e"
|
||||
},
|
||||
{
|
||||
"name": "Escapist",
|
||||
"url": "http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate",
|
||||
"file": "6618-Breaking-Down-Baldurs-Gate.mp4",
|
||||
"md5": "c6793dbda81388f4264c1ba18684a74d",
|
||||
"skip": "Fails with timeout on Travis"
|
||||
},
|
||||
{
|
||||
"name": "GooglePlus",
|
||||
"url": "https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
|
||||
"file": "ZButuJc6CtH.flv"
|
||||
},
|
||||
{
|
||||
"name": "FunnyOrDie",
|
||||
"url": "http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version",
|
||||
"file": "0732f586d7.mp4",
|
||||
"md5": "f647e9e90064b53b6e046e75d0241fbd"
|
||||
},
|
||||
{
|
||||
"name": "Steam",
|
||||
"url": "http://store.steampowered.com/video/105600/",
|
||||
"playlist": [
|
||||
{
|
||||
"file": "81300.flv",
|
||||
"md5": "f870007cee7065d7c76b88f0a45ecc07",
|
||||
"info_dict": {
|
||||
"title": "Terraria 1.1 Trailer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"file": "80859.flv",
|
||||
"md5": "61aaf31a5c5c3041afb58fb83cbb5751",
|
||||
"info_dict": {
|
||||
"title": "Terraria Trailer"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Ustream",
|
||||
"url": "http://www.ustream.tv/recorded/20274954",
|
||||
"file": "20274954.flv",
|
||||
"md5": "088f151799e8f572f84eb62f17d73e5c",
|
||||
"info_dict": {
|
||||
"title": "Young Americans for Liberty February 7, 2012 2:28 AM"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "InfoQ",
|
||||
"url": "http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things",
|
||||
"file": "12-jan-pythonthings.mp4",
|
||||
"info_dict": {
|
||||
"title": "A Few of My Favorite [Python] Things"
|
||||
},
|
||||
"params": {
|
||||
"skip_download": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ComedyCentral",
|
||||
"url": "http://www.thedailyshow.com/watch/thu-december-13-2012/kristen-stewart",
|
||||
"file": "422212.mp4",
|
||||
"md5": "4e2f5cb088a83cd8cdb7756132f9739d",
|
||||
"info_dict": {
|
||||
"title": "thedailyshow-kristen-stewart part 1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "RBMARadio",
|
||||
"url": "http://www.rbmaradio.com/shows/ford-lopatin-live-at-primavera-sound-2011",
|
||||
"file": "ford-lopatin-live-at-primavera-sound-2011.mp3",
|
||||
"md5": "6bc6f9bcb18994b4c983bc3bf4384d95",
|
||||
"info_dict": {
|
||||
"title": "Live at Primavera Sound 2011",
|
||||
"description": "Joel Ford and Daniel \u2019Oneohtrix Point Never\u2019 Lopatin fly their midified pop extravaganza to Spain. Live at Primavera Sound 2011.",
|
||||
"uploader": "Ford & Lopatin",
|
||||
"uploader_id": "ford-lopatin",
|
||||
"location": "Spain"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Facebook",
|
||||
"url": "https://www.facebook.com/photo.php?v=120708114770723",
|
||||
"file": "120708114770723.mp4",
|
||||
"md5": "48975a41ccc4b7a581abd68651c1a5a8",
|
||||
"info_dict": {
|
||||
"title": "PEOPLE ARE AWESOME 2013",
|
||||
"duration": 279
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "EightTracks",
|
||||
"url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
|
||||
"playlist": [
|
||||
{
|
||||
"file": "11885610.m4a",
|
||||
"md5": "96ce57f24389fc8734ce47f4c1abcc55",
|
||||
"info_dict": {
|
||||
"title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
"file": "11885608.m4a",
|
||||
"md5": "4ab26f05c1f7291ea460a3920be8021f",
|
||||
"info_dict": {
|
||||
"title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
|
||||
}
|
||||
},
|
||||
{
|
||||
"file": "11885679.m4a",
|
||||
"md5": "d30b5b5f74217410f4689605c35d1fd7",
|
||||
"info_dict": {
|
||||
"title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad"
|
||||
}
|
||||
},
|
||||
{
|
||||
"file": "11885680.m4a",
|
||||
"md5": "4eb0a669317cd725f6bbd336a29f923a",
|
||||
"info_dict": {
|
||||
"title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad"
|
||||
}
|
||||
},
|
||||
{
|
||||
"file": "11885682.m4a",
|
||||
"md5": "1893e872e263a2705558d1d319ad19e8",
|
||||
"info_dict": {
|
||||
"title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad"
|
||||
}
|
||||
},
|
||||
{
|
||||
"file": "11885683.m4a",
|
||||
"md5": "b673c46f47a216ab1741ae8836af5899",
|
||||
"info_dict": {
|
||||
"title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad"
|
||||
}
|
||||
},
|
||||
{
|
||||
"file": "11885684.m4a",
|
||||
"md5": "1d74534e95df54986da7f5abf7d842b7",
|
||||
"info_dict": {
|
||||
"title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad"
|
||||
}
|
||||
},
|
||||
{
|
||||
"file": "11885685.m4a",
|
||||
"md5": "f081f47af8f6ae782ed131d38b9cd1c0",
|
||||
"info_dict": {
|
||||
"title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Keek",
|
||||
"url": "http://www.keek.com/ytdl/keeks/NODfbab",
|
||||
"file": "NODfbab.mp4",
|
||||
"md5": "9b0636f8c0f7614afa4ea5e4c6e57e83",
|
||||
"info_dict": {
|
||||
"title": "test chars: \"'/\\ä<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de ."
|
||||
}
|
||||
|
||||
},
|
||||
{
|
||||
"name": "TED",
|
||||
"url": "http://www.ted.com/talks/dan_dennett_on_our_consciousness.html",
|
||||
"file": "102.mp4",
|
||||
"md5": "7bc087e71d16f18f9b8ab9fa62a8a031",
|
||||
"info_dict": {
|
||||
"title": "Dan Dennett: The illusion of consciousness",
|
||||
"thumbnail": "http://images.ted.com/images/ted/488_389x292.jpg"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "MySpass",
|
||||
"url": "http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/",
|
||||
"file": "11741.mp4",
|
||||
"md5": "0b49f4844a068f8b33f4b7c88405862b",
|
||||
"info_dict": {
|
||||
"title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Generic",
|
||||
"url": "http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html",
|
||||
"file": "13601338388002.mp4",
|
||||
"md5": "85b90ccc9d73b4acd9138d3af4c27f89"
|
||||
},
|
||||
{
|
||||
"name": "Spiegel",
|
||||
"url": "http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html",
|
||||
"file": "1259285.mp4",
|
||||
"md5": "2c2754212136f35fb4b19767d242f66e",
|
||||
"info_dict": {
|
||||
"title": "Vulkanausbruch in Ecuador: Der \"Feuerschlund\" ist wieder aktiv"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "LiveLeak",
|
||||
"md5": "0813c2430bea7a46bf13acf3406992f4",
|
||||
"url": "http://www.liveleak.com/view?i=757_1364311680",
|
||||
"file": "757_1364311680.mp4",
|
||||
"info_dict": {
|
||||
"title": "Most unlucky car accident",
|
||||
"description": "extremely bad day for this guy..!",
|
||||
"uploader": "ljfriel2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "WorldStarHipHop",
|
||||
"url": "http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO",
|
||||
"file": "wshh6a7q1ny0G34ZwuIO.mp4",
|
||||
"md5": "9d04de741161603bf7071bbf4e883186",
|
||||
"info_dict": {
|
||||
"title": "Video: KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick! "
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ARD",
|
||||
"url": "http://www.ardmediathek.de/das-erste/tagesschau-in-100-sek?documentId=14077640",
|
||||
"file": "14077640.mp4",
|
||||
"md5": "6ca8824255460c787376353f9e20bbd8",
|
||||
"info_dict": {
|
||||
"title": "11.04.2013 09:23 Uhr - Tagesschau in 100 Sekunden"
|
||||
},
|
||||
"skip": "Requires rtmpdump"
|
||||
},
|
||||
{
|
||||
"name": "Tumblr",
|
||||
"url": "http://birthdayproject2012.tumblr.com/post/17258355236/a-sample-video-from-leeann-if-you-need-an-idea",
|
||||
"file": "17258355236.mp4",
|
||||
"md5": "7c6a514d691b034ccf8567999e9e88a3",
|
||||
"info_dict": {
|
||||
"title": "Calling all Pris! - A sample video from LeeAnn. (If you need an idea..."
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "SoundcloudSet",
|
||||
"url":"https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep",
|
||||
"playlist":[
|
||||
{
|
||||
"file":"30510138.mp3",
|
||||
"md5":"f9136bf103901728f29e419d2c70f55d",
|
||||
"info_dict": {
|
||||
"title":"D-D-Dance"
|
||||
}
|
||||
},
|
||||
{
|
||||
"file":"47127625.mp3",
|
||||
"md5":"09b6758a018470570f8fd423c9453dd8",
|
||||
"info_dict": {
|
||||
"title":"The Royal Concept - Gimme Twice"
|
||||
}
|
||||
},
|
||||
{
|
||||
"file":"47127627.mp3",
|
||||
"md5":"154abd4e418cea19c3b901f1e1306d9c",
|
||||
"info_dict": {
|
||||
"title":"Goldrushed"
|
||||
}
|
||||
},
|
||||
{
|
||||
"file":"47127629.mp3",
|
||||
"md5":"2f5471edc79ad3f33a683153e96a79c1",
|
||||
"info_dict": {
|
||||
"title":"In the End"
|
||||
}
|
||||
},
|
||||
{
|
||||
"file":"47127631.mp3",
|
||||
"md5":"f9ba87aa940af7213f98949254f1c6e2",
|
||||
"info_dict": {
|
||||
"title":"Knocked Up"
|
||||
}
|
||||
},
|
||||
{
|
||||
"file":"75206121.mp3",
|
||||
"md5":"f9d1fe9406717e302980c30de4af9353",
|
||||
"info_dict": {
|
||||
"title":"World On Fire"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name":"Bandcamp",
|
||||
"url":"http://youtube-dl.bandcamp.com/track/youtube-dl-test-song",
|
||||
"file":"1812978515.mp3",
|
||||
"md5":"cdeb30cdae1921719a3cbcab696ef53c",
|
||||
"info_dict": {
|
||||
"title":"youtube-dl test song \"'/\\ä↭"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "RedTube",
|
||||
"url": "http://www.redtube.com/66418",
|
||||
"file": "66418.mp4",
|
||||
"md5": "7b8c22b5e7098a3e1c09709df1126d2d",
|
||||
"info_dict":{
|
||||
"title":"Sucked on a toilet"
|
||||
}
|
||||
}
|
||||
]
|
||||
8
tox.ini
Normal file
8
tox.ini
Normal file
@@ -0,0 +1,8 @@
|
||||
[tox]
|
||||
envlist = py26,py27,py33
|
||||
[testenv]
|
||||
deps =
|
||||
nose
|
||||
coverage
|
||||
commands = nosetests --verbose {posargs:test} # --with-coverage --cover-package=youtube_dl --cover-html
|
||||
# test.test_download:TestDownload.test_NowVideo
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,14 +1,16 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from .utils import *
|
||||
|
||||
from .utils import (
|
||||
compat_subprocess_get_DEVNULL,
|
||||
encodeFilename,
|
||||
PostProcessingError,
|
||||
shell_quote,
|
||||
subtitles_filename,
|
||||
)
|
||||
|
||||
|
||||
class PostProcessor(object):
|
||||
@@ -76,17 +78,28 @@ class FFmpegPostProcessor(PostProcessor):
|
||||
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
|
||||
return dict((program, executable(program)) for program in programs)
|
||||
|
||||
def run_ffmpeg(self, path, out_path, opts):
|
||||
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
|
||||
if not self._exes['ffmpeg'] and not self._exes['avconv']:
|
||||
raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.')
|
||||
cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path)]
|
||||
|
||||
files_cmd = []
|
||||
for path in input_paths:
|
||||
files_cmd.extend(['-i', encodeFilename(path)])
|
||||
cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y'] + files_cmd
|
||||
+ opts +
|
||||
[encodeFilename(self._ffmpeg_filename_argument(out_path))])
|
||||
|
||||
if self._downloader.params.get('verbose', False):
|
||||
self._downloader.to_screen(u'[debug] ffmpeg command line: %s' % shell_quote(cmd))
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout,stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
stderr = stderr.decode('utf-8', 'replace')
|
||||
msg = stderr.strip().split('\n')[-1]
|
||||
raise FFmpegPostProcessorError(msg.decode('utf-8', 'replace'))
|
||||
raise FFmpegPostProcessorError(msg)
|
||||
|
||||
def run_ffmpeg(self, path, out_path, opts):
|
||||
self.run_ffmpeg_multiple_files([path], out_path, opts)
|
||||
|
||||
def _ffmpeg_filename_argument(self, fn):
|
||||
# ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details
|
||||
@@ -104,7 +117,8 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
||||
self._nopostoverwrites = nopostoverwrites
|
||||
|
||||
def get_audio_codec(self, path):
|
||||
if not self._exes['ffprobe'] and not self._exes['avprobe']: return None
|
||||
if not self._exes['ffprobe'] and not self._exes['avprobe']:
|
||||
raise PostProcessingError(u'ffprobe or avprobe not found. Please install one.')
|
||||
try:
|
||||
cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', encodeFilename(self._ffmpeg_filename_argument(path))]
|
||||
handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
|
||||
@@ -132,7 +146,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
||||
try:
|
||||
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
|
||||
except FFmpegPostProcessorError as err:
|
||||
raise AudioConversionError(err.message)
|
||||
raise AudioConversionError(err.msg)
|
||||
|
||||
def run(self, information):
|
||||
path = information['filepath']
|
||||
@@ -172,7 +186,8 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
||||
extension = self._preferredcodec
|
||||
more_opts = []
|
||||
if self._preferredquality is not None:
|
||||
if int(self._preferredquality) < 10:
|
||||
# The opus codec doesn't support the -aq option
|
||||
if int(self._preferredquality) < 10 and extension != 'opus':
|
||||
more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
|
||||
else:
|
||||
more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
|
||||
@@ -188,6 +203,11 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
||||
|
||||
prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
|
||||
new_path = prefix + sep + extension
|
||||
|
||||
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
|
||||
if new_path == path:
|
||||
self._nopostoverwrites = True
|
||||
|
||||
try:
|
||||
if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)):
|
||||
self._downloader.to_screen(u'[youtube] Post-process file %s exists, skipping' % new_path)
|
||||
@@ -197,7 +217,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
||||
except:
|
||||
etype,e,tb = sys.exc_info()
|
||||
if isinstance(e, AudioConversionError):
|
||||
msg = u'audio conversion failed: ' + e.message
|
||||
msg = u'audio conversion failed: ' + e.msg
|
||||
else:
|
||||
msg = u'error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg')
|
||||
raise PostProcessingError(msg)
|
||||
@@ -207,10 +227,10 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
||||
try:
|
||||
os.utime(encodeFilename(new_path), (time.time(), information['filetime']))
|
||||
except:
|
||||
self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file')
|
||||
self._downloader.report_warning(u'Cannot update utime of audio file')
|
||||
|
||||
information['filepath'] = new_path
|
||||
return False,information
|
||||
return self._nopostoverwrites,information
|
||||
|
||||
class FFmpegVideoConvertor(FFmpegPostProcessor):
|
||||
def __init__(self, downloader=None,preferedformat=None):
|
||||
@@ -230,3 +250,262 @@ class FFmpegVideoConvertor(FFmpegPostProcessor):
|
||||
information['format'] = self._preferedformat
|
||||
information['ext'] = self._preferedformat
|
||||
return False,information
|
||||
|
||||
|
||||
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
|
||||
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
|
||||
_lang_map = {
|
||||
'aa': 'aar',
|
||||
'ab': 'abk',
|
||||
'ae': 'ave',
|
||||
'af': 'afr',
|
||||
'ak': 'aka',
|
||||
'am': 'amh',
|
||||
'an': 'arg',
|
||||
'ar': 'ara',
|
||||
'as': 'asm',
|
||||
'av': 'ava',
|
||||
'ay': 'aym',
|
||||
'az': 'aze',
|
||||
'ba': 'bak',
|
||||
'be': 'bel',
|
||||
'bg': 'bul',
|
||||
'bh': 'bih',
|
||||
'bi': 'bis',
|
||||
'bm': 'bam',
|
||||
'bn': 'ben',
|
||||
'bo': 'bod',
|
||||
'br': 'bre',
|
||||
'bs': 'bos',
|
||||
'ca': 'cat',
|
||||
'ce': 'che',
|
||||
'ch': 'cha',
|
||||
'co': 'cos',
|
||||
'cr': 'cre',
|
||||
'cs': 'ces',
|
||||
'cu': 'chu',
|
||||
'cv': 'chv',
|
||||
'cy': 'cym',
|
||||
'da': 'dan',
|
||||
'de': 'deu',
|
||||
'dv': 'div',
|
||||
'dz': 'dzo',
|
||||
'ee': 'ewe',
|
||||
'el': 'ell',
|
||||
'en': 'eng',
|
||||
'eo': 'epo',
|
||||
'es': 'spa',
|
||||
'et': 'est',
|
||||
'eu': 'eus',
|
||||
'fa': 'fas',
|
||||
'ff': 'ful',
|
||||
'fi': 'fin',
|
||||
'fj': 'fij',
|
||||
'fo': 'fao',
|
||||
'fr': 'fra',
|
||||
'fy': 'fry',
|
||||
'ga': 'gle',
|
||||
'gd': 'gla',
|
||||
'gl': 'glg',
|
||||
'gn': 'grn',
|
||||
'gu': 'guj',
|
||||
'gv': 'glv',
|
||||
'ha': 'hau',
|
||||
'he': 'heb',
|
||||
'hi': 'hin',
|
||||
'ho': 'hmo',
|
||||
'hr': 'hrv',
|
||||
'ht': 'hat',
|
||||
'hu': 'hun',
|
||||
'hy': 'hye',
|
||||
'hz': 'her',
|
||||
'ia': 'ina',
|
||||
'id': 'ind',
|
||||
'ie': 'ile',
|
||||
'ig': 'ibo',
|
||||
'ii': 'iii',
|
||||
'ik': 'ipk',
|
||||
'io': 'ido',
|
||||
'is': 'isl',
|
||||
'it': 'ita',
|
||||
'iu': 'iku',
|
||||
'ja': 'jpn',
|
||||
'jv': 'jav',
|
||||
'ka': 'kat',
|
||||
'kg': 'kon',
|
||||
'ki': 'kik',
|
||||
'kj': 'kua',
|
||||
'kk': 'kaz',
|
||||
'kl': 'kal',
|
||||
'km': 'khm',
|
||||
'kn': 'kan',
|
||||
'ko': 'kor',
|
||||
'kr': 'kau',
|
||||
'ks': 'kas',
|
||||
'ku': 'kur',
|
||||
'kv': 'kom',
|
||||
'kw': 'cor',
|
||||
'ky': 'kir',
|
||||
'la': 'lat',
|
||||
'lb': 'ltz',
|
||||
'lg': 'lug',
|
||||
'li': 'lim',
|
||||
'ln': 'lin',
|
||||
'lo': 'lao',
|
||||
'lt': 'lit',
|
||||
'lu': 'lub',
|
||||
'lv': 'lav',
|
||||
'mg': 'mlg',
|
||||
'mh': 'mah',
|
||||
'mi': 'mri',
|
||||
'mk': 'mkd',
|
||||
'ml': 'mal',
|
||||
'mn': 'mon',
|
||||
'mr': 'mar',
|
||||
'ms': 'msa',
|
||||
'mt': 'mlt',
|
||||
'my': 'mya',
|
||||
'na': 'nau',
|
||||
'nb': 'nob',
|
||||
'nd': 'nde',
|
||||
'ne': 'nep',
|
||||
'ng': 'ndo',
|
||||
'nl': 'nld',
|
||||
'nn': 'nno',
|
||||
'no': 'nor',
|
||||
'nr': 'nbl',
|
||||
'nv': 'nav',
|
||||
'ny': 'nya',
|
||||
'oc': 'oci',
|
||||
'oj': 'oji',
|
||||
'om': 'orm',
|
||||
'or': 'ori',
|
||||
'os': 'oss',
|
||||
'pa': 'pan',
|
||||
'pi': 'pli',
|
||||
'pl': 'pol',
|
||||
'ps': 'pus',
|
||||
'pt': 'por',
|
||||
'qu': 'que',
|
||||
'rm': 'roh',
|
||||
'rn': 'run',
|
||||
'ro': 'ron',
|
||||
'ru': 'rus',
|
||||
'rw': 'kin',
|
||||
'sa': 'san',
|
||||
'sc': 'srd',
|
||||
'sd': 'snd',
|
||||
'se': 'sme',
|
||||
'sg': 'sag',
|
||||
'si': 'sin',
|
||||
'sk': 'slk',
|
||||
'sl': 'slv',
|
||||
'sm': 'smo',
|
||||
'sn': 'sna',
|
||||
'so': 'som',
|
||||
'sq': 'sqi',
|
||||
'sr': 'srp',
|
||||
'ss': 'ssw',
|
||||
'st': 'sot',
|
||||
'su': 'sun',
|
||||
'sv': 'swe',
|
||||
'sw': 'swa',
|
||||
'ta': 'tam',
|
||||
'te': 'tel',
|
||||
'tg': 'tgk',
|
||||
'th': 'tha',
|
||||
'ti': 'tir',
|
||||
'tk': 'tuk',
|
||||
'tl': 'tgl',
|
||||
'tn': 'tsn',
|
||||
'to': 'ton',
|
||||
'tr': 'tur',
|
||||
'ts': 'tso',
|
||||
'tt': 'tat',
|
||||
'tw': 'twi',
|
||||
'ty': 'tah',
|
||||
'ug': 'uig',
|
||||
'uk': 'ukr',
|
||||
'ur': 'urd',
|
||||
'uz': 'uzb',
|
||||
've': 'ven',
|
||||
'vi': 'vie',
|
||||
'vo': 'vol',
|
||||
'wa': 'wln',
|
||||
'wo': 'wol',
|
||||
'xh': 'xho',
|
||||
'yi': 'yid',
|
||||
'yo': 'yor',
|
||||
'za': 'zha',
|
||||
'zh': 'zho',
|
||||
'zu': 'zul',
|
||||
}
|
||||
|
||||
def __init__(self, downloader=None, subtitlesformat='srt'):
|
||||
super(FFmpegEmbedSubtitlePP, self).__init__(downloader)
|
||||
self._subformat = subtitlesformat
|
||||
|
||||
@classmethod
|
||||
def _conver_lang_code(cls, code):
|
||||
"""Convert language code from ISO 639-1 to ISO 639-2/T"""
|
||||
return cls._lang_map.get(code[:2])
|
||||
|
||||
def run(self, information):
|
||||
if information['ext'] != u'mp4':
|
||||
self._downloader.to_screen(u'[ffmpeg] Subtitles can only be embedded in mp4 files')
|
||||
return True, information
|
||||
if not information.get('subtitles'):
|
||||
self._downloader.to_screen(u'[ffmpeg] There aren\'t any subtitles to embed')
|
||||
return True, information
|
||||
|
||||
sub_langs = [key for key in information['subtitles']]
|
||||
filename = information['filepath']
|
||||
input_files = [filename] + [subtitles_filename(filename, lang, self._subformat) for lang in sub_langs]
|
||||
|
||||
opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy']
|
||||
for (i, lang) in enumerate(sub_langs):
|
||||
opts.extend(['-map', '%d:0' % (i+1), '-c:s:%d' % i, 'mov_text'])
|
||||
lang_code = self._conver_lang_code(lang)
|
||||
if lang_code is not None:
|
||||
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
|
||||
opts.extend(['-f', 'mp4'])
|
||||
|
||||
temp_filename = filename + u'.temp'
|
||||
self._downloader.to_screen(u'[ffmpeg] Embedding subtitles in \'%s\'' % filename)
|
||||
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
|
||||
os.remove(encodeFilename(filename))
|
||||
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
|
||||
|
||||
return True, information
|
||||
|
||||
|
||||
class FFmpegMetadataPP(FFmpegPostProcessor):
|
||||
def run(self, info):
|
||||
metadata = {}
|
||||
if info.get('title') is not None:
|
||||
metadata['title'] = info['title']
|
||||
if info.get('upload_date') is not None:
|
||||
metadata['date'] = info['upload_date']
|
||||
if info.get('uploader') is not None:
|
||||
metadata['artist'] = info['uploader']
|
||||
elif info.get('uploader_id') is not None:
|
||||
metadata['artist'] = info['uploader_id']
|
||||
|
||||
if not metadata:
|
||||
self._downloader.to_screen(u'[ffmpeg] There isn\'t any metadata to add')
|
||||
return True, info
|
||||
|
||||
filename = info['filepath']
|
||||
ext = os.path.splitext(filename)[1][1:]
|
||||
temp_filename = filename + u'.temp'
|
||||
|
||||
options = ['-c', 'copy']
|
||||
for (name, value) in metadata.items():
|
||||
options.extend(['-metadata', '%s=%s' % (name, value)])
|
||||
options.extend(['-f', ext])
|
||||
|
||||
self._downloader.to_screen(u'[ffmpeg] Adding metadata to \'%s\'' % filename)
|
||||
self.run_ffmpeg(filename, temp_filename, options)
|
||||
os.remove(encodeFilename(filename))
|
||||
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
|
||||
return True, info
|
||||
|
||||
1009
youtube_dl/YoutubeDL.py
Normal file
1009
youtube_dl/YoutubeDL.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import with_statement
|
||||
from __future__ import absolute_import
|
||||
|
||||
__authors__ = (
|
||||
'Ricardo Garcia Gonzalez',
|
||||
'Danny Colligan',
|
||||
@@ -26,7 +23,20 @@ __authors__ = (
|
||||
'Osama Khalid',
|
||||
'Michael Walter',
|
||||
'M. Yasoob Ullah Khalid',
|
||||
)
|
||||
'Julien Fraichard',
|
||||
'Johny Mo Swag',
|
||||
'Axel Noack',
|
||||
'Albert Kim',
|
||||
'Pierre Rudloff',
|
||||
'Huarong Huo',
|
||||
'Ismael Mejía',
|
||||
'Steffan \'Ruirize\' James',
|
||||
'Andras Elso',
|
||||
'Jelle van der Waa',
|
||||
'Marcin Cieślak',
|
||||
'Anton Larionov',
|
||||
'Takuya Tsuchida',
|
||||
)
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
|
||||
@@ -34,20 +44,40 @@ import codecs
|
||||
import getpass
|
||||
import optparse
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import shlex
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import warnings
|
||||
import platform
|
||||
|
||||
from .utils import *
|
||||
|
||||
from .utils import (
|
||||
compat_print,
|
||||
DateRange,
|
||||
decodeOption,
|
||||
determine_ext,
|
||||
DownloadError,
|
||||
get_cachedir,
|
||||
MaxDownloadsReached,
|
||||
preferredencoding,
|
||||
SameFileError,
|
||||
std_headers,
|
||||
write_string,
|
||||
)
|
||||
from .update import update_self
|
||||
from .FileDownloader import (
|
||||
FileDownloader,
|
||||
)
|
||||
from .extractor import gen_extractors
|
||||
from .version import __version__
|
||||
from .FileDownloader import *
|
||||
from .InfoExtractors import gen_extractors
|
||||
from .PostProcessor import *
|
||||
from .YoutubeDL import YoutubeDL
|
||||
from .PostProcessor import (
|
||||
FFmpegMetadataPP,
|
||||
FFmpegVideoConvertor,
|
||||
FFmpegExtractAudioPP,
|
||||
FFmpegEmbedSubtitlePP,
|
||||
)
|
||||
|
||||
|
||||
def parseOpts(overrideArguments=None):
|
||||
def _readOptions(filename_bytes):
|
||||
@@ -79,6 +109,9 @@ def parseOpts(overrideArguments=None):
|
||||
|
||||
return "".join(opts)
|
||||
|
||||
def _comma_separated_values_options_callback(option, opt_str, value, parser):
|
||||
setattr(parser.values, option.dest, value.split(','))
|
||||
|
||||
def _find_term_columns():
|
||||
columns = os.environ.get('COLUMNS', None)
|
||||
if columns:
|
||||
@@ -92,6 +125,16 @@ def parseOpts(overrideArguments=None):
|
||||
pass
|
||||
return None
|
||||
|
||||
def _hide_login_info(opts):
|
||||
opts = list(opts)
|
||||
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
|
||||
try:
|
||||
i = opts.index(private_opt)
|
||||
opts[i+1] = '<PRIVATE>'
|
||||
except ValueError:
|
||||
pass
|
||||
return opts
|
||||
|
||||
max_width = 80
|
||||
max_help_position = 80
|
||||
|
||||
@@ -116,6 +159,8 @@ def parseOpts(overrideArguments=None):
|
||||
selection = optparse.OptionGroup(parser, 'Video Selection')
|
||||
authentication = optparse.OptionGroup(parser, 'Authentication Options')
|
||||
video_format = optparse.OptionGroup(parser, 'Video Format Options')
|
||||
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
|
||||
downloader = optparse.OptionGroup(parser, 'Download Options')
|
||||
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
|
||||
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
|
||||
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
|
||||
@@ -125,18 +170,12 @@ def parseOpts(overrideArguments=None):
|
||||
general.add_option('-v', '--version',
|
||||
action='version', help='print program version and exit')
|
||||
general.add_option('-U', '--update',
|
||||
action='store_true', dest='update_self', help='update this program to latest version')
|
||||
action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
|
||||
general.add_option('-i', '--ignore-errors',
|
||||
action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
|
||||
general.add_option('-r', '--rate-limit',
|
||||
dest='ratelimit', metavar='LIMIT', help='maximum download rate (e.g. 50k or 44.6m)')
|
||||
general.add_option('-R', '--retries',
|
||||
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
|
||||
general.add_option('--buffer-size',
|
||||
dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16k) (default is %default)', default="1024")
|
||||
general.add_option('--no-resize-buffer',
|
||||
action='store_true', dest='noresizebuffer',
|
||||
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
|
||||
action='store_true', dest='ignoreerrors', help='continue on download errors, for example to to skip unavailable videos in a playlist', default=False)
|
||||
general.add_option('--abort-on-error',
|
||||
action='store_false', dest='ignoreerrors',
|
||||
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
|
||||
general.add_option('--dump-user-agent',
|
||||
action='store_true', dest='dump_user_agent',
|
||||
help='display the current browser identification', default=False)
|
||||
@@ -148,8 +187,18 @@ def parseOpts(overrideArguments=None):
|
||||
general.add_option('--list-extractors',
|
||||
action='store_true', dest='list_extractors',
|
||||
help='List all supported extractors and the URLs they would handle', default=False)
|
||||
general.add_option('--extractor-descriptions',
|
||||
action='store_true', dest='list_extractor_descriptions',
|
||||
help='Output descriptions of all supported extractors', default=False)
|
||||
general.add_option('--proxy', dest='proxy', default=None, help='Use the specified HTTP/HTTPS proxy', metavar='URL')
|
||||
general.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
|
||||
general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.')
|
||||
general.add_option(
|
||||
'--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
|
||||
help='Location in the filesystem where youtube-dl can store downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl .')
|
||||
general.add_option(
|
||||
'--no-cache-dir', action='store_const', const=None, dest='cachedir',
|
||||
help='Disable filesystem caching')
|
||||
|
||||
|
||||
selection.add_option('--playlist-start',
|
||||
dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is %default)', default=1)
|
||||
@@ -157,12 +206,21 @@ def parseOpts(overrideArguments=None):
|
||||
dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
|
||||
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
|
||||
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
|
||||
selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', help='Abort after downloading NUMBER files', default=None)
|
||||
selection.add_option('--max-downloads', metavar='NUMBER',
|
||||
dest='max_downloads', type=int, default=None,
|
||||
help='Abort after downloading NUMBER files')
|
||||
selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
|
||||
selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
|
||||
selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
|
||||
selection.add_option('--datebefore', metavar='DATE', dest='datebefore', help='download only videos uploaded before this date', default=None)
|
||||
selection.add_option('--dateafter', metavar='DATE', dest='dateafter', help='download only videos uploaded after this date', default=None)
|
||||
selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False)
|
||||
selection.add_option('--age-limit', metavar='YEARS', dest='age_limit',
|
||||
help='download only videos suitable for the given age',
|
||||
default=None, type=int)
|
||||
selection.add_option('--download-archive', metavar='FILE',
|
||||
dest='download_archive',
|
||||
help='Download only videos not present in the archive file. Record all downloaded videos in it.')
|
||||
|
||||
|
||||
authentication.add_option('-u', '--username',
|
||||
@@ -171,11 +229,13 @@ def parseOpts(overrideArguments=None):
|
||||
dest='password', metavar='PASSWORD', help='account password')
|
||||
authentication.add_option('-n', '--netrc',
|
||||
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
|
||||
authentication.add_option('--video-password',
|
||||
dest='videopassword', metavar='PASSWORD', help='video password (vimeo only)')
|
||||
|
||||
|
||||
video_format.add_option('-f', '--format',
|
||||
action='store', dest='format', metavar='FORMAT',
|
||||
help='video format code, specifiy the order of preference using slashes: "-f 22/17/18"')
|
||||
action='store', dest='format', metavar='FORMAT', default='best',
|
||||
help='video format code, specifiy the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported')
|
||||
video_format.add_option('--all-formats',
|
||||
action='store_const', dest='format', help='download all available video formats', const='all')
|
||||
video_format.add_option('--prefer-free-formats',
|
||||
@@ -184,24 +244,37 @@ def parseOpts(overrideArguments=None):
|
||||
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
|
||||
video_format.add_option('-F', '--list-formats',
|
||||
action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
|
||||
video_format.add_option('--write-sub', '--write-srt',
|
||||
|
||||
subtitles.add_option('--write-sub', '--write-srt',
|
||||
action='store_true', dest='writesubtitles',
|
||||
help='write subtitle file (currently youtube only)', default=False)
|
||||
video_format.add_option('--only-sub',
|
||||
action='store_true', dest='onlysubtitles',
|
||||
help='downloads only the subtitles (no video)', default=False)
|
||||
video_format.add_option('--all-subs',
|
||||
help='write subtitle file', default=False)
|
||||
subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
|
||||
action='store_true', dest='writeautomaticsub',
|
||||
help='write automatic subtitle file (youtube only)', default=False)
|
||||
subtitles.add_option('--all-subs',
|
||||
action='store_true', dest='allsubtitles',
|
||||
help='downloads all the available subtitles of the video (currently youtube only)', default=False)
|
||||
video_format.add_option('--list-subs',
|
||||
help='downloads all the available subtitles of the video', default=False)
|
||||
subtitles.add_option('--list-subs',
|
||||
action='store_true', dest='listsubtitles',
|
||||
help='lists all available subtitles for the video (currently youtube only)', default=False)
|
||||
video_format.add_option('--sub-format',
|
||||
action='store', dest='subtitlesformat', metavar='LANG',
|
||||
help='subtitle format [srt/sbv] (default=srt) (currently youtube only)', default='srt')
|
||||
video_format.add_option('--sub-lang', '--srt-lang',
|
||||
action='store', dest='subtitleslang', metavar='LANG',
|
||||
help='language of the subtitles to download (optional) use IETF language tags like \'en\'')
|
||||
help='lists all available subtitles for the video', default=False)
|
||||
subtitles.add_option('--sub-format',
|
||||
action='store', dest='subtitlesformat', metavar='FORMAT',
|
||||
help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
|
||||
subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
|
||||
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
|
||||
default=[], callback=_comma_separated_values_options_callback,
|
||||
help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
|
||||
|
||||
downloader.add_option('-r', '--rate-limit',
|
||||
dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
|
||||
downloader.add_option('-R', '--retries',
|
||||
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
|
||||
downloader.add_option('--buffer-size',
|
||||
dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
|
||||
downloader.add_option('--no-resize-buffer',
|
||||
action='store_true', dest='noresizebuffer',
|
||||
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
|
||||
downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
|
||||
|
||||
verbosity.add_option('-q', '--quiet',
|
||||
action='store_true', dest='quiet', help='activates quiet mode', default=False)
|
||||
@@ -213,6 +286,8 @@ def parseOpts(overrideArguments=None):
|
||||
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
|
||||
verbosity.add_option('-e', '--get-title',
|
||||
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
|
||||
verbosity.add_option('--get-id',
|
||||
action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
|
||||
verbosity.add_option('--get-thumbnail',
|
||||
action='store_true', dest='getthumbnail',
|
||||
help='simulate, quiet but print thumbnail URL', default=False)
|
||||
@@ -225,6 +300,9 @@ def parseOpts(overrideArguments=None):
|
||||
verbosity.add_option('--get-format',
|
||||
action='store_true', dest='getformat',
|
||||
help='simulate, quiet but print output format', default=False)
|
||||
verbosity.add_option('-j', '--dump-json',
|
||||
action='store_true', dest='dumpjson',
|
||||
help='simulate, quiet but print JSON information', default=False)
|
||||
verbosity.add_option('--newline',
|
||||
action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
|
||||
verbosity.add_option('--no-progress',
|
||||
@@ -237,6 +315,13 @@ def parseOpts(overrideArguments=None):
|
||||
verbosity.add_option('--dump-intermediate-pages',
|
||||
action='store_true', dest='dump_intermediate_pages', default=False,
|
||||
help='print downloaded pages to debug problems(very verbose)')
|
||||
verbosity.add_option('--write-pages',
|
||||
action='store_true', dest='write_pages', default=False,
|
||||
help='Write downloaded pages to files in the current directory')
|
||||
verbosity.add_option('--youtube-print-sig-code',
|
||||
action='store_true', dest='youtube_print_sig_code', default=False,
|
||||
help=optparse.SUPPRESS_HELP)
|
||||
|
||||
|
||||
filesystem.add_option('-t', '--title',
|
||||
action='store_true', dest='usetitle', help='use title in file name (default)', default=False)
|
||||
@@ -252,7 +337,10 @@ def parseOpts(overrideArguments=None):
|
||||
help=('output filename template. Use %(title)s to get the title, '
|
||||
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
|
||||
'%(autonumber)s to get an automatically incremented number, '
|
||||
'%(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), '
|
||||
'%(ext)s for the filename extension, '
|
||||
'%(format)s for the format description (like "22 - 1280x720" or "HD"),'
|
||||
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"),'
|
||||
'%(upload_date)s for the upload date (YYYYMMDD), '
|
||||
'%(extractor)s for the provider (youtube, metacafe, etc), '
|
||||
'%(id)s for the video id , %(playlist)s for the playlist the video is in, '
|
||||
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
|
||||
@@ -260,7 +348,7 @@ def parseOpts(overrideArguments=None):
|
||||
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
|
||||
filesystem.add_option('--autonumber-size',
|
||||
dest='autonumber_size', metavar='NUMBER',
|
||||
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --autonumber option is given')
|
||||
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
|
||||
filesystem.add_option('--restrict-filenames',
|
||||
action='store_true', dest='restrictfilenames',
|
||||
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
|
||||
@@ -269,7 +357,7 @@ def parseOpts(overrideArguments=None):
|
||||
filesystem.add_option('-w', '--no-overwrites',
|
||||
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
|
||||
filesystem.add_option('-c', '--continue',
|
||||
action='store_true', dest='continue_dl', help='resume partially downloaded files', default=True)
|
||||
action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
|
||||
filesystem.add_option('--no-continue',
|
||||
action='store_false', dest='continue_dl',
|
||||
help='do not resume partially downloaded files (restart from beginning)')
|
||||
@@ -286,6 +374,9 @@ def parseOpts(overrideArguments=None):
|
||||
filesystem.add_option('--write-info-json',
|
||||
action='store_true', dest='writeinfojson',
|
||||
help='write video metadata to a .info.json file', default=False)
|
||||
filesystem.add_option('--write-annotations',
|
||||
action='store_true', dest='writeannotations',
|
||||
help='write video annotations to a .annotation file', default=False)
|
||||
filesystem.add_option('--write-thumbnail',
|
||||
action='store_true', dest='writethumbnail',
|
||||
help='write thumbnail image to disk', default=False)
|
||||
@@ -303,35 +394,45 @@ def parseOpts(overrideArguments=None):
|
||||
help='keeps the video file on disk after the post-processing; the video is erased by default')
|
||||
postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
|
||||
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
|
||||
postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
|
||||
help='embed subtitles in the video (only for mp4 videos)')
|
||||
postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
|
||||
help='add metadata to the files')
|
||||
|
||||
|
||||
parser.add_option_group(general)
|
||||
parser.add_option_group(selection)
|
||||
parser.add_option_group(downloader)
|
||||
parser.add_option_group(filesystem)
|
||||
parser.add_option_group(verbosity)
|
||||
parser.add_option_group(video_format)
|
||||
parser.add_option_group(subtitles)
|
||||
parser.add_option_group(authentication)
|
||||
parser.add_option_group(postproc)
|
||||
|
||||
if overrideArguments is not None:
|
||||
opts, args = parser.parse_args(overrideArguments)
|
||||
if opts.verbose:
|
||||
print(u'[debug] Override config: ' + repr(overrideArguments))
|
||||
write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
|
||||
else:
|
||||
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
|
||||
if xdg_config_home:
|
||||
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
|
||||
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
|
||||
if not os.path.isfile(userConfFile):
|
||||
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
|
||||
else:
|
||||
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
|
||||
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config')
|
||||
if not os.path.isfile(userConfFile):
|
||||
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
|
||||
systemConf = _readOptions('/etc/youtube-dl.conf')
|
||||
userConf = _readOptions(userConfFile)
|
||||
commandLineConf = sys.argv[1:]
|
||||
commandLineConf = sys.argv[1:]
|
||||
argv = systemConf + userConf + commandLineConf
|
||||
opts, args = parser.parse_args(argv)
|
||||
if opts.verbose:
|
||||
print(u'[debug] System config: ' + repr(systemConf))
|
||||
print(u'[debug] User config: ' + repr(userConf))
|
||||
print(u'[debug] Command-line args: ' + repr(commandLineConf))
|
||||
write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
|
||||
write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
|
||||
write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
|
||||
|
||||
return parser, opts, args
|
||||
|
||||
@@ -343,30 +444,17 @@ def _real_main(argv=None):
|
||||
|
||||
parser, opts, args = parseOpts(argv)
|
||||
|
||||
# Open appropriate CookieJar
|
||||
if opts.cookiefile is None:
|
||||
jar = compat_cookiejar.CookieJar()
|
||||
else:
|
||||
try:
|
||||
jar = compat_cookiejar.MozillaCookieJar(opts.cookiefile)
|
||||
if os.access(opts.cookiefile, os.R_OK):
|
||||
jar.load()
|
||||
except (IOError, OSError) as err:
|
||||
if opts.verbose:
|
||||
traceback.print_exc()
|
||||
sys.stderr.write(u'ERROR: unable to open cookie file\n')
|
||||
sys.exit(101)
|
||||
# Set user agent
|
||||
if opts.user_agent is not None:
|
||||
std_headers['User-Agent'] = opts.user_agent
|
||||
|
||||
|
||||
# Set referer
|
||||
if opts.referer is not None:
|
||||
std_headers['Referer'] = opts.referer
|
||||
|
||||
# Dump user agent
|
||||
if opts.dump_user_agent:
|
||||
print(std_headers['User-Agent'])
|
||||
compat_print(std_headers['User-Agent'])
|
||||
sys.exit(0)
|
||||
|
||||
# Batch file verification
|
||||
@@ -380,42 +468,43 @@ def _real_main(argv=None):
|
||||
batchurls = batchfd.readlines()
|
||||
batchurls = [x.strip() for x in batchurls]
|
||||
batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
|
||||
if opts.verbose:
|
||||
write_string(u'[debug] Batch file urls: ' + repr(batchurls) + u'\n')
|
||||
except IOError:
|
||||
sys.exit(u'ERROR: batch file could not be read')
|
||||
all_urls = batchurls + args
|
||||
all_urls = [url.strip() for url in all_urls]
|
||||
|
||||
# General configuration
|
||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
|
||||
if opts.proxy:
|
||||
proxies = {'http': opts.proxy, 'https': opts.proxy}
|
||||
else:
|
||||
proxies = compat_urllib_request.getproxies()
|
||||
# Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
|
||||
if 'http' in proxies and 'https' not in proxies:
|
||||
proxies['https'] = proxies['http']
|
||||
proxy_handler = compat_urllib_request.ProxyHandler(proxies)
|
||||
https_handler = compat_urllib_request.HTTPSHandler()
|
||||
opener = compat_urllib_request.build_opener(https_handler, proxy_handler, cookie_processor, YoutubeDLHandler())
|
||||
compat_urllib_request.install_opener(opener)
|
||||
socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
|
||||
|
||||
extractors = gen_extractors()
|
||||
|
||||
if opts.list_extractors:
|
||||
for ie in extractors:
|
||||
print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
|
||||
for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
|
||||
compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
|
||||
matchedUrls = [url for url in all_urls if ie.suitable(url)]
|
||||
all_urls = [url for url in all_urls if url not in matchedUrls]
|
||||
for mu in matchedUrls:
|
||||
print(u' ' + mu)
|
||||
compat_print(u' ' + mu)
|
||||
sys.exit(0)
|
||||
if opts.list_extractor_descriptions:
|
||||
for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
|
||||
if not ie._WORKING:
|
||||
continue
|
||||
desc = getattr(ie, 'IE_DESC', ie.IE_NAME)
|
||||
if desc is False:
|
||||
continue
|
||||
if hasattr(ie, 'SEARCH_KEY'):
|
||||
_SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise')
|
||||
_COUNTS = (u'', u'5', u'10', u'all')
|
||||
desc += u' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
|
||||
compat_print(desc)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# Conflicting, missing and erroneous options
|
||||
if opts.usenetrc and (opts.username is not None or opts.password is not None):
|
||||
parser.error(u'using .netrc conflicts with giving username/password')
|
||||
if opts.password is not None and opts.username is None:
|
||||
parser.error(u'account username missing')
|
||||
parser.error(u' account username missing\n')
|
||||
if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
|
||||
parser.error(u'using output template conflicts with using title, video ID or auto number')
|
||||
if opts.usetitle and opts.useid:
|
||||
@@ -440,7 +529,7 @@ def _real_main(argv=None):
|
||||
if opts.retries is not None:
|
||||
try:
|
||||
opts.retries = int(opts.retries)
|
||||
except (TypeError, ValueError) as err:
|
||||
except (TypeError, ValueError):
|
||||
parser.error(u'invalid retry count specified')
|
||||
if opts.buffersize is not None:
|
||||
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
|
||||
@@ -451,13 +540,13 @@ def _real_main(argv=None):
|
||||
opts.playliststart = int(opts.playliststart)
|
||||
if opts.playliststart <= 0:
|
||||
raise ValueError(u'Playlist start must be positive')
|
||||
except (TypeError, ValueError) as err:
|
||||
except (TypeError, ValueError):
|
||||
parser.error(u'invalid playlist start number specified')
|
||||
try:
|
||||
opts.playlistend = int(opts.playlistend)
|
||||
if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
|
||||
raise ValueError(u'Playlist end must be greater than playlist start')
|
||||
except (TypeError, ValueError) as err:
|
||||
except (TypeError, ValueError):
|
||||
parser.error(u'invalid playlist end number specified')
|
||||
if opts.extractaudio:
|
||||
if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
|
||||
@@ -474,6 +563,11 @@ def _real_main(argv=None):
|
||||
else:
|
||||
date = DateRange(opts.dateafter, opts.datebefore)
|
||||
|
||||
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
|
||||
# this was the old behaviour if only --all-sub was given.
|
||||
if opts.allsubtitles and (opts.writeautomaticsub == False):
|
||||
opts.writesubtitles = True
|
||||
|
||||
if sys.version_info < (3,):
|
||||
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
|
||||
if opts.outtmpl is not None:
|
||||
@@ -486,21 +580,27 @@ def _real_main(argv=None):
|
||||
or (opts.useid and u'%(id)s.%(ext)s')
|
||||
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
|
||||
or u'%(title)s-%(id)s.%(ext)s')
|
||||
if '%(ext)s' not in outtmpl and opts.extractaudio:
|
||||
parser.error(u'Cannot download a video and extract audio into the same'
|
||||
u' file! Use "%%(ext)s" instead of %r' %
|
||||
determine_ext(outtmpl, u''))
|
||||
|
||||
# File downloader
|
||||
fd = FileDownloader({
|
||||
ydl_opts = {
|
||||
'usenetrc': opts.usenetrc,
|
||||
'username': opts.username,
|
||||
'password': opts.password,
|
||||
'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
|
||||
'videopassword': opts.videopassword,
|
||||
'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.dumpjson),
|
||||
'forceurl': opts.geturl,
|
||||
'forcetitle': opts.gettitle,
|
||||
'forceid': opts.getid,
|
||||
'forcethumbnail': opts.getthumbnail,
|
||||
'forcedescription': opts.getdescription,
|
||||
'forcefilename': opts.getfilename,
|
||||
'forceformat': opts.getformat,
|
||||
'forcejson': opts.dumpjson,
|
||||
'simulate': opts.simulate,
|
||||
'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
|
||||
'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.dumpjson),
|
||||
'format': opts.format,
|
||||
'format_limit': opts.format_limit,
|
||||
'listformats': opts.listformats,
|
||||
@@ -518,81 +618,76 @@ def _real_main(argv=None):
|
||||
'progress_with_newline': opts.progress_with_newline,
|
||||
'playliststart': opts.playliststart,
|
||||
'playlistend': opts.playlistend,
|
||||
'noplaylist': opts.noplaylist,
|
||||
'logtostderr': opts.outtmpl == '-',
|
||||
'consoletitle': opts.consoletitle,
|
||||
'nopart': opts.nopart,
|
||||
'updatetime': opts.updatetime,
|
||||
'writedescription': opts.writedescription,
|
||||
'writeannotations': opts.writeannotations,
|
||||
'writeinfojson': opts.writeinfojson,
|
||||
'writethumbnail': opts.writethumbnail,
|
||||
'writesubtitles': opts.writesubtitles,
|
||||
'onlysubtitles': opts.onlysubtitles,
|
||||
'writeautomaticsub': opts.writeautomaticsub,
|
||||
'allsubtitles': opts.allsubtitles,
|
||||
'listsubtitles': opts.listsubtitles,
|
||||
'subtitlesformat': opts.subtitlesformat,
|
||||
'subtitleslang': opts.subtitleslang,
|
||||
'subtitleslangs': opts.subtitleslangs,
|
||||
'matchtitle': decodeOption(opts.matchtitle),
|
||||
'rejecttitle': decodeOption(opts.rejecttitle),
|
||||
'max_downloads': opts.max_downloads,
|
||||
'prefer_free_formats': opts.prefer_free_formats,
|
||||
'verbose': opts.verbose,
|
||||
'dump_intermediate_pages': opts.dump_intermediate_pages,
|
||||
'write_pages': opts.write_pages,
|
||||
'test': opts.test,
|
||||
'keepvideo': opts.keepvideo,
|
||||
'min_filesize': opts.min_filesize,
|
||||
'max_filesize': opts.max_filesize,
|
||||
'daterange': date,
|
||||
})
|
||||
'cachedir': opts.cachedir,
|
||||
'youtube_print_sig_code': opts.youtube_print_sig_code,
|
||||
'age_limit': opts.age_limit,
|
||||
'download_archive': opts.download_archive,
|
||||
'cookiefile': opts.cookiefile,
|
||||
'nocheckcertificate': opts.no_check_certificate,
|
||||
}
|
||||
|
||||
with YoutubeDL(ydl_opts) as ydl:
|
||||
ydl.print_debug_header()
|
||||
ydl.add_default_info_extractors()
|
||||
|
||||
# PostProcessors
|
||||
# Add the metadata pp first, the other pps will copy it
|
||||
if opts.addmetadata:
|
||||
ydl.add_post_processor(FFmpegMetadataPP())
|
||||
if opts.extractaudio:
|
||||
ydl.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, nopostoverwrites=opts.nopostoverwrites))
|
||||
if opts.recodevideo:
|
||||
ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo))
|
||||
if opts.embedsubtitles:
|
||||
ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat))
|
||||
|
||||
# Update version
|
||||
if opts.update_self:
|
||||
update_self(ydl.to_screen, opts.verbose)
|
||||
|
||||
# Maybe do nothing
|
||||
if len(all_urls) < 1:
|
||||
if not opts.update_self:
|
||||
parser.error(u'you must provide at least one URL')
|
||||
else:
|
||||
sys.exit()
|
||||
|
||||
if opts.verbose:
|
||||
fd.to_screen(u'[debug] youtube-dl version ' + __version__)
|
||||
try:
|
||||
sp = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
cwd=os.path.dirname(os.path.abspath(__file__)))
|
||||
out, err = sp.communicate()
|
||||
out = out.decode().strip()
|
||||
if re.match('[0-9a-f]+', out):
|
||||
fd.to_screen(u'[debug] Git HEAD: ' + out)
|
||||
except:
|
||||
pass
|
||||
fd.to_screen(u'[debug] Python version %s - %s' %(platform.python_version(), platform.platform()))
|
||||
fd.to_screen(u'[debug] Proxy map: ' + str(proxy_handler.proxies))
|
||||
|
||||
for extractor in extractors:
|
||||
fd.add_info_extractor(extractor)
|
||||
|
||||
# PostProcessors
|
||||
if opts.extractaudio:
|
||||
fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, nopostoverwrites=opts.nopostoverwrites))
|
||||
if opts.recodevideo:
|
||||
fd.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo))
|
||||
|
||||
# Update version
|
||||
if opts.update_self:
|
||||
update_self(fd.to_screen, opts.verbose, sys.argv[0])
|
||||
|
||||
# Maybe do nothing
|
||||
if len(all_urls) < 1:
|
||||
if not opts.update_self:
|
||||
parser.error(u'you must provide at least one URL')
|
||||
else:
|
||||
sys.exit()
|
||||
|
||||
try:
|
||||
retcode = fd.download(all_urls)
|
||||
except MaxDownloadsReached:
|
||||
fd.to_screen(u'--max-download limit reached, aborting.')
|
||||
retcode = 101
|
||||
|
||||
# Dump cookie jar if requested
|
||||
if opts.cookiefile is not None:
|
||||
try:
|
||||
jar.save()
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit(u'ERROR: unable to save cookie jar')
|
||||
retcode = ydl.download(all_urls)
|
||||
except MaxDownloadsReached:
|
||||
ydl.to_screen(u'--max-download limit reached, aborting.')
|
||||
retcode = 101
|
||||
|
||||
sys.exit(retcode)
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
try:
|
||||
_real_main(argv)
|
||||
|
||||
202
youtube_dl/aes.py
Normal file
202
youtube_dl/aes.py
Normal file
@@ -0,0 +1,202 @@
|
||||
__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_decrypt_text']
|
||||
|
||||
import base64
|
||||
from math import ceil
|
||||
|
||||
from .utils import bytes_to_intlist, intlist_to_bytes
|
||||
|
||||
BLOCK_SIZE_BYTES = 16
|
||||
|
||||
def aes_ctr_decrypt(data, key, counter):
|
||||
"""
|
||||
Decrypt with aes in counter mode
|
||||
|
||||
@param {int[]} data cipher
|
||||
@param {int[]} key 16/24/32-Byte cipher key
|
||||
@param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block)
|
||||
returns the next counter block
|
||||
@returns {int[]} decrypted data
|
||||
"""
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
|
||||
decrypted_data=[]
|
||||
for i in range(block_count):
|
||||
counter_block = counter.next_value()
|
||||
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
|
||||
block += [0]*(BLOCK_SIZE_BYTES - len(block))
|
||||
|
||||
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
||||
decrypted_data += xor(block, cipher_counter_block)
|
||||
decrypted_data = decrypted_data[:len(data)]
|
||||
|
||||
return decrypted_data
|
||||
|
||||
def key_expansion(data):
|
||||
"""
|
||||
Generate key schedule
|
||||
|
||||
@param {int[]} data 16/24/32-Byte cipher key
|
||||
@returns {int[]} 176/208/240-Byte expanded key
|
||||
"""
|
||||
data = data[:] # copy
|
||||
rcon_iteration = 1
|
||||
key_size_bytes = len(data)
|
||||
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
|
||||
|
||||
while len(data) < expanded_key_size_bytes:
|
||||
temp = data[-4:]
|
||||
temp = key_schedule_core(temp, rcon_iteration)
|
||||
rcon_iteration += 1
|
||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
||||
|
||||
for _ in range(3):
|
||||
temp = data[-4:]
|
||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
||||
|
||||
if key_size_bytes == 32:
|
||||
temp = data[-4:]
|
||||
temp = sub_bytes(temp)
|
||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
||||
|
||||
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
||||
temp = data[-4:]
|
||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
||||
data = data[:expanded_key_size_bytes]
|
||||
|
||||
return data
|
||||
|
||||
def aes_encrypt(data, expanded_key):
|
||||
"""
|
||||
Encrypt one block with aes
|
||||
|
||||
@param {int[]} data 16-Byte state
|
||||
@param {int[]} expanded_key 176/208/240-Byte expanded key
|
||||
@returns {int[]} 16-Byte cipher
|
||||
"""
|
||||
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
||||
|
||||
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||
for i in range(1, rounds+1):
|
||||
data = sub_bytes(data)
|
||||
data = shift_rows(data)
|
||||
if i != rounds:
|
||||
data = mix_columns(data)
|
||||
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
|
||||
|
||||
return data
|
||||
|
||||
def aes_decrypt_text(data, password, key_size_bytes):
|
||||
"""
|
||||
Decrypt text
|
||||
- The first 8 Bytes of decoded 'data' are the 8 high Bytes of the counter
|
||||
- The cipher key is retrieved by encrypting the first 16 Byte of 'password'
|
||||
with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's)
|
||||
- Mode of operation is 'counter'
|
||||
|
||||
@param {str} data Base64 encoded string
|
||||
@param {str,unicode} password Password (will be encoded with utf-8)
|
||||
@param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit
|
||||
@returns {str} Decrypted data
|
||||
"""
|
||||
NONCE_LENGTH_BYTES = 8
|
||||
|
||||
data = bytes_to_intlist(base64.b64decode(data))
|
||||
password = bytes_to_intlist(password.encode('utf-8'))
|
||||
|
||||
key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password))
|
||||
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
|
||||
|
||||
nonce = data[:NONCE_LENGTH_BYTES]
|
||||
cipher = data[NONCE_LENGTH_BYTES:]
|
||||
|
||||
class Counter:
|
||||
__value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
||||
def next_value(self):
|
||||
temp = self.__value
|
||||
self.__value = inc(self.__value)
|
||||
return temp
|
||||
|
||||
decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
|
||||
plaintext = intlist_to_bytes(decrypted_data)
|
||||
|
||||
return plaintext
|
||||
|
||||
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
|
||||
SBOX = (0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
|
||||
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
|
||||
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
|
||||
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
|
||||
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
|
||||
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
|
||||
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
|
||||
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
|
||||
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
|
||||
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
|
||||
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
|
||||
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
|
||||
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
|
||||
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
|
||||
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
|
||||
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16)
|
||||
MIX_COLUMN_MATRIX = ((2,3,1,1),
|
||||
(1,2,3,1),
|
||||
(1,1,2,3),
|
||||
(3,1,1,2))
|
||||
|
||||
def sub_bytes(data):
|
||||
return [SBOX[x] for x in data]
|
||||
|
||||
def rotate(data):
|
||||
return data[1:] + [data[0]]
|
||||
|
||||
def key_schedule_core(data, rcon_iteration):
|
||||
data = rotate(data)
|
||||
data = sub_bytes(data)
|
||||
data[0] = data[0] ^ RCON[rcon_iteration]
|
||||
|
||||
return data
|
||||
|
||||
def xor(data1, data2):
|
||||
return [x^y for x, y in zip(data1, data2)]
|
||||
|
||||
def mix_column(data):
|
||||
data_mixed = []
|
||||
for row in range(4):
|
||||
mixed = 0
|
||||
for column in range(4):
|
||||
addend = data[column]
|
||||
if MIX_COLUMN_MATRIX[row][column] in (2,3):
|
||||
addend <<= 1
|
||||
if addend > 0xff:
|
||||
addend &= 0xff
|
||||
addend ^= 0x1b
|
||||
if MIX_COLUMN_MATRIX[row][column] == 3:
|
||||
addend ^= data[column]
|
||||
mixed ^= addend & 0xff
|
||||
data_mixed.append(mixed)
|
||||
return data_mixed
|
||||
|
||||
def mix_columns(data):
|
||||
data_mixed = []
|
||||
for i in range(4):
|
||||
column = data[i*4 : (i+1)*4]
|
||||
data_mixed += mix_column(column)
|
||||
return data_mixed
|
||||
|
||||
def shift_rows(data):
|
||||
data_shifted = []
|
||||
for column in range(4):
|
||||
for row in range(4):
|
||||
data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
|
||||
return data_shifted
|
||||
|
||||
def inc(data):
|
||||
data = data[:] # copy
|
||||
for i in range(len(data)-1,-1,-1):
|
||||
if data[i] == 255:
|
||||
data[i] = 0
|
||||
else:
|
||||
data[i] = data[i] + 1
|
||||
break
|
||||
return data
|
||||
211
youtube_dl/extractor/__init__.py
Normal file
211
youtube_dl/extractor/__init__.py
Normal file
@@ -0,0 +1,211 @@
|
||||
from .appletrailers import AppleTrailersIE
|
||||
from .addanime import AddAnimeIE
|
||||
from .anitube import AnitubeIE
|
||||
from .archiveorg import ArchiveOrgIE
|
||||
from .ard import ARDIE
|
||||
from .arte import (
|
||||
ArteTvIE,
|
||||
ArteTVPlus7IE,
|
||||
ArteTVCreativeIE,
|
||||
ArteTVFutureIE,
|
||||
)
|
||||
from .auengine import AUEngineIE
|
||||
from .bambuser import BambuserIE, BambuserChannelIE
|
||||
from .bandcamp import BandcampIE, BandcampAlbumIE
|
||||
from .bliptv import BlipTVIE, BlipTVUserIE
|
||||
from .bloomberg import BloombergIE
|
||||
from .breakcom import BreakIE
|
||||
from .brightcove import BrightcoveIE
|
||||
from .c56 import C56IE
|
||||
from .canalplus import CanalplusIE
|
||||
from .canalc2 import Canalc2IE
|
||||
from .cinemassacre import CinemassacreIE
|
||||
from .clipfish import ClipfishIE
|
||||
from .cnn import CNNIE
|
||||
from .collegehumor import CollegeHumorIE
|
||||
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
|
||||
from .condenast import CondeNastIE
|
||||
from .criterion import CriterionIE
|
||||
from .cspan import CSpanIE
|
||||
from .d8 import D8IE
|
||||
from .dailymotion import (
|
||||
DailymotionIE,
|
||||
DailymotionPlaylistIE,
|
||||
DailymotionUserIE,
|
||||
)
|
||||
from .daum import DaumIE
|
||||
from .depositfiles import DepositFilesIE
|
||||
from .dotsub import DotsubIE
|
||||
from .dreisat import DreiSatIE
|
||||
from .defense import DefenseGouvFrIE
|
||||
from .ebaumsworld import EbaumsWorldIE
|
||||
from .ehow import EHowIE
|
||||
from .eighttracks import EightTracksIE
|
||||
from .eitb import EitbIE
|
||||
from .escapist import EscapistIE
|
||||
from .exfm import ExfmIE
|
||||
from .extremetube import ExtremeTubeIE
|
||||
from .facebook import FacebookIE
|
||||
from .faz import FazIE
|
||||
from .fktv import (
|
||||
FKTVIE,
|
||||
FKTVPosteckeIE,
|
||||
)
|
||||
from .flickr import FlickrIE
|
||||
from .francetv import (
|
||||
PluzzIE,
|
||||
FranceTvInfoIE,
|
||||
France2IE,
|
||||
GenerationQuoiIE
|
||||
)
|
||||
from .freesound import FreesoundIE
|
||||
from .funnyordie import FunnyOrDieIE
|
||||
from .gamekings import GamekingsIE
|
||||
from .gamespot import GameSpotIE
|
||||
from .gametrailers import GametrailersIE
|
||||
from .generic import GenericIE
|
||||
from .googleplus import GooglePlusIE
|
||||
from .googlesearch import GoogleSearchIE
|
||||
from .hark import HarkIE
|
||||
from .hotnewhiphop import HotNewHipHopIE
|
||||
from .howcast import HowcastIE
|
||||
from .hypem import HypemIE
|
||||
from .ign import IGNIE, OneUPIE
|
||||
from .ina import InaIE
|
||||
from .infoq import InfoQIE
|
||||
from .instagram import InstagramIE
|
||||
from .internetvideoarchive import InternetVideoArchiveIE
|
||||
from .jeuxvideo import JeuxVideoIE
|
||||
from .jukebox import JukeboxIE
|
||||
from .justintv import JustinTVIE
|
||||
from .kankan import KankanIE
|
||||
from .keezmovies import KeezMoviesIE
|
||||
from .kickstarter import KickStarterIE
|
||||
from .keek import KeekIE
|
||||
from .liveleak import LiveLeakIE
|
||||
from .livestream import LivestreamIE, LivestreamOriginalIE
|
||||
from .metacafe import MetacafeIE
|
||||
from .metacritic import MetacriticIE
|
||||
from .mit import TechTVMITIE, MITIE
|
||||
from .mixcloud import MixcloudIE
|
||||
from .mofosex import MofosexIE
|
||||
from .mtv import MTVIE
|
||||
from .muzu import MuzuTVIE
|
||||
from .myspace import MySpaceIE
|
||||
from .myspass import MySpassIE
|
||||
from .myvideo import MyVideoIE
|
||||
from .naver import NaverIE
|
||||
from .nba import NBAIE
|
||||
from .nbc import NBCNewsIE
|
||||
from .newgrounds import NewgroundsIE
|
||||
from .nhl import NHLIE, NHLVideocenterIE
|
||||
from .niconico import NiconicoIE
|
||||
from .nowvideo import NowVideoIE
|
||||
from .ooyala import OoyalaIE
|
||||
from .orf import ORFIE
|
||||
from .pbs import PBSIE
|
||||
from .photobucket import PhotobucketIE
|
||||
from .pornhub import PornHubIE
|
||||
from .pornotube import PornotubeIE
|
||||
from .rbmaradio import RBMARadioIE
|
||||
from .redtube import RedTubeIE
|
||||
from .ringtv import RingTVIE
|
||||
from .ro220 import Ro220IE
|
||||
from .rottentomatoes import RottenTomatoesIE
|
||||
from .roxwel import RoxwelIE
|
||||
from .rtlnow import RTLnowIE
|
||||
from .rutube import RutubeIE
|
||||
from .sina import SinaIE
|
||||
from .slashdot import SlashdotIE
|
||||
from .slideshare import SlideshareIE
|
||||
from .sohu import SohuIE
|
||||
from .soundcloud import SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE
|
||||
from .southparkstudios import (
|
||||
SouthParkStudiosIE,
|
||||
SouthparkDeIE,
|
||||
)
|
||||
from .space import SpaceIE
|
||||
from .spankwire import SpankwireIE
|
||||
from .spiegel import SpiegelIE
|
||||
from .stanfordoc import StanfordOpenClassroomIE
|
||||
from .statigram import StatigramIE
|
||||
from .steam import SteamIE
|
||||
from .streamcloud import StreamcloudIE
|
||||
from .sztvhu import SztvHuIE
|
||||
from .teamcoco import TeamcocoIE
|
||||
from .techtalks import TechTalksIE
|
||||
from .ted import TEDIE
|
||||
from .tf1 import TF1IE
|
||||
from .thisav import ThisAVIE
|
||||
from .toutv import TouTvIE
|
||||
from .traileraddict import TrailerAddictIE
|
||||
from .trilulilu import TriluliluIE
|
||||
from .tube8 import Tube8IE
|
||||
from .tudou import TudouIE
|
||||
from .tumblr import TumblrIE
|
||||
from .tutv import TutvIE
|
||||
from .tvp import TvpIE
|
||||
from .unistra import UnistraIE
|
||||
from .ustream import UstreamIE, UstreamChannelIE
|
||||
from .vbox7 import Vbox7IE
|
||||
from .veehd import VeeHDIE
|
||||
from .veoh import VeohIE
|
||||
from .vevo import VevoIE
|
||||
from .vice import ViceIE
|
||||
from .viddler import ViddlerIE
|
||||
from .videodetective import VideoDetectiveIE
|
||||
from .videofyme import VideofyMeIE
|
||||
from .videopremium import VideoPremiumIE
|
||||
from .vimeo import VimeoIE, VimeoChannelIE
|
||||
from .vine import VineIE
|
||||
from .viki import VikiIE
|
||||
from .vk import VKIE
|
||||
from .wat import WatIE
|
||||
from .websurg import WeBSurgIE
|
||||
from .weibo import WeiboIE
|
||||
from .wimp import WimpIE
|
||||
from .worldstarhiphop import WorldStarHipHopIE
|
||||
from .xhamster import XHamsterIE
|
||||
from .xnxx import XNXXIE
|
||||
from .xvideos import XVideosIE
|
||||
from .xtube import XTubeIE
|
||||
from .yahoo import YahooIE, YahooSearchIE
|
||||
from .youjizz import YouJizzIE
|
||||
from .youku import YoukuIE
|
||||
from .youporn import YouPornIE
|
||||
from .youtube import (
|
||||
YoutubeIE,
|
||||
YoutubePlaylistIE,
|
||||
YoutubeSearchIE,
|
||||
YoutubeSearchDateIE,
|
||||
YoutubeUserIE,
|
||||
YoutubeChannelIE,
|
||||
YoutubeShowIE,
|
||||
YoutubeSubscriptionsIE,
|
||||
YoutubeRecommendedIE,
|
||||
YoutubeTruncatedURLIE,
|
||||
YoutubeWatchLaterIE,
|
||||
YoutubeFavouritesIE,
|
||||
YoutubeHistoryIE,
|
||||
)
|
||||
from .zdf import ZDFIE
|
||||
|
||||
|
||||
_ALL_CLASSES = [
|
||||
klass
|
||||
for name, klass in globals().items()
|
||||
if name.endswith('IE') and name != 'GenericIE'
|
||||
]
|
||||
_ALL_CLASSES.append(GenericIE)
|
||||
|
||||
|
||||
def gen_extractors():
|
||||
""" Return a list of an instance of every supported extractor.
|
||||
The order does matter; the first extractor matched is the one handling the URL.
|
||||
"""
|
||||
return [klass() for klass in _ALL_CLASSES]
|
||||
|
||||
|
||||
def get_info_extractor(ie_name):
|
||||
"""Returns the info extractor class with the given ie_name"""
|
||||
return globals()[ie_name+'IE']
|
||||
86
youtube_dl/extractor/addanime.py
Normal file
86
youtube_dl/extractor/addanime.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_HTTPError,
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_urlparse,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class AddAnimeIE(InfoExtractor):
|
||||
|
||||
_VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video.php\?(?:.*?)v=(?P<video_id>[\w_]+)(?:.*)'
|
||||
IE_NAME = u'AddAnime'
|
||||
_TEST = {
|
||||
u'url': u'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
|
||||
u'file': u'24MR3YO5SAS9.mp4',
|
||||
u'md5': u'72954ea10bc979ab5e2eb288b21425a0',
|
||||
u'info_dict': {
|
||||
u"description": u"One Piece 606",
|
||||
u"title": u"One Piece 606"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
try:
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('video_id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
except ExtractorError as ee:
|
||||
if not isinstance(ee.cause, compat_HTTPError) or \
|
||||
ee.cause.code != 503:
|
||||
raise
|
||||
|
||||
redir_webpage = ee.cause.read().decode('utf-8')
|
||||
action = self._search_regex(
|
||||
r'<form id="challenge-form" action="([^"]+)"',
|
||||
redir_webpage, u'Redirect form')
|
||||
vc = self._search_regex(
|
||||
r'<input type="hidden" name="jschl_vc" value="([^"]+)"/>',
|
||||
redir_webpage, u'redirect vc value')
|
||||
av = re.search(
|
||||
r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);',
|
||||
redir_webpage)
|
||||
if av is None:
|
||||
raise ExtractorError(u'Cannot find redirect math task')
|
||||
av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3))
|
||||
|
||||
parsed_url = compat_urllib_parse_urlparse(url)
|
||||
av_val = av_res + len(parsed_url.netloc)
|
||||
confirm_url = (
|
||||
parsed_url.scheme + u'://' + parsed_url.netloc +
|
||||
action + '?' +
|
||||
compat_urllib_parse.urlencode({
|
||||
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
|
||||
self._download_webpage(
|
||||
confirm_url, video_id,
|
||||
note=u'Confirming after redirect')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
formats = []
|
||||
for format_id in ('normal', 'hq'):
|
||||
rex = r"var %s_video_file = '(.*?)';" % re.escape(format_id)
|
||||
video_url = self._search_regex(rex, webpage, u'video file URLx',
|
||||
fatal=False)
|
||||
if not video_url:
|
||||
continue
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
'url': video_url,
|
||||
})
|
||||
if not formats:
|
||||
raise ExtractorError(u'Cannot find any video format!')
|
||||
video_title = self._og_search_title(webpage)
|
||||
video_description = self._og_search_description(webpage)
|
||||
|
||||
return {
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'title': video_title,
|
||||
'description': video_description
|
||||
}
|
||||
55
youtube_dl/extractor/anitube.py
Normal file
55
youtube_dl/extractor/anitube.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class AnitubeIE(InfoExtractor):
|
||||
IE_NAME = u'anitube.se'
|
||||
_VALID_URL = r'https?://(?:www\.)?anitube\.se/video/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.anitube.se/video/36621',
|
||||
u'md5': u'59d0eeae28ea0bc8c05e7af429998d43',
|
||||
u'file': u'36621.mp4',
|
||||
u'info_dict': {
|
||||
u'id': u'36621',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'Recorder to Randoseru 01',
|
||||
},
|
||||
u'skip': u'Blocked in the US',
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
key = self._html_search_regex(r'http://www\.anitube\.se/embed/([A-Za-z0-9_-]*)',
|
||||
webpage, u'key')
|
||||
|
||||
webpage_config = self._download_webpage('http://www.anitube.se/nuevo/econfig.php?key=%s' % key,
|
||||
key)
|
||||
config_xml = xml.etree.ElementTree.fromstring(webpage_config.encode('utf-8'))
|
||||
|
||||
video_title = config_xml.find('title').text
|
||||
|
||||
formats = []
|
||||
video_url = config_xml.find('file')
|
||||
if video_url is not None:
|
||||
formats.append({
|
||||
'format_id': 'sd',
|
||||
'url': video_url.text,
|
||||
})
|
||||
video_url = config_xml.find('filehd')
|
||||
if video_url is not None:
|
||||
formats.append({
|
||||
'format_id': 'hd',
|
||||
'url': video_url.text,
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'formats': formats
|
||||
}
|
||||
138
youtube_dl/extractor/appletrailers.py
Normal file
138
youtube_dl/extractor/appletrailers.py
Normal file
@@ -0,0 +1,138 @@
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urlparse,
|
||||
determine_ext,
|
||||
)
|
||||
|
||||
|
||||
class AppleTrailersIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?trailers.apple.com/trailers/(?P<company>[^/]+)/(?P<movie>[^/]+)'
|
||||
_TEST = {
|
||||
u"url": u"http://trailers.apple.com/trailers/wb/manofsteel/",
|
||||
u"playlist": [
|
||||
{
|
||||
u"file": u"manofsteel-trailer4.mov",
|
||||
u"md5": u"d97a8e575432dbcb81b7c3acb741f8a8",
|
||||
u"info_dict": {
|
||||
u"duration": 111,
|
||||
u"title": u"Trailer 4",
|
||||
u"upload_date": u"20130523",
|
||||
u"uploader_id": u"wb",
|
||||
},
|
||||
},
|
||||
{
|
||||
u"file": u"manofsteel-trailer3.mov",
|
||||
u"md5": u"b8017b7131b721fb4e8d6f49e1df908c",
|
||||
u"info_dict": {
|
||||
u"duration": 182,
|
||||
u"title": u"Trailer 3",
|
||||
u"upload_date": u"20130417",
|
||||
u"uploader_id": u"wb",
|
||||
},
|
||||
},
|
||||
{
|
||||
u"file": u"manofsteel-trailer.mov",
|
||||
u"md5": u"d0f1e1150989b9924679b441f3404d48",
|
||||
u"info_dict": {
|
||||
u"duration": 148,
|
||||
u"title": u"Trailer",
|
||||
u"upload_date": u"20121212",
|
||||
u"uploader_id": u"wb",
|
||||
},
|
||||
},
|
||||
{
|
||||
u"file": u"manofsteel-teaser.mov",
|
||||
u"md5": u"5fe08795b943eb2e757fa95cb6def1cb",
|
||||
u"info_dict": {
|
||||
u"duration": 93,
|
||||
u"title": u"Teaser",
|
||||
u"upload_date": u"20120721",
|
||||
u"uploader_id": u"wb",
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
_JSON_RE = r'iTunes.playURL\((.*?)\);'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
movie = mobj.group('movie')
|
||||
uploader_id = mobj.group('company')
|
||||
|
||||
playlist_url = compat_urlparse.urljoin(url, u'includes/playlists/itunes.inc')
|
||||
playlist_snippet = self._download_webpage(playlist_url, movie)
|
||||
playlist_cleaned = re.sub(r'(?s)<script[^<]*?>.*?</script>', u'', playlist_snippet)
|
||||
playlist_cleaned = re.sub(r'<img ([^<]*?)>', r'<img \1/>', playlist_cleaned)
|
||||
# The ' in the onClick attributes are not escaped, it couldn't be parsed
|
||||
# with xml.etree.ElementTree.fromstring
|
||||
# like: http://trailers.apple.com/trailers/wb/gravity/
|
||||
def _clean_json(m):
|
||||
return u'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
|
||||
playlist_cleaned = re.sub(self._JSON_RE, _clean_json, playlist_cleaned)
|
||||
playlist_html = u'<html>' + playlist_cleaned + u'</html>'
|
||||
|
||||
doc = xml.etree.ElementTree.fromstring(playlist_html)
|
||||
playlist = []
|
||||
for li in doc.findall('./div/ul/li'):
|
||||
on_click = li.find('.//a').attrib['onClick']
|
||||
trailer_info_json = self._search_regex(self._JSON_RE,
|
||||
on_click, u'trailer info')
|
||||
trailer_info = json.loads(trailer_info_json)
|
||||
title = trailer_info['title']
|
||||
video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
|
||||
thumbnail = li.find('.//img').attrib['src']
|
||||
upload_date = trailer_info['posted'].replace('-', '')
|
||||
|
||||
runtime = trailer_info['runtime']
|
||||
m = re.search(r'(?P<minutes>[0-9]+):(?P<seconds>[0-9]{1,2})', runtime)
|
||||
duration = None
|
||||
if m:
|
||||
duration = 60 * int(m.group('minutes')) + int(m.group('seconds'))
|
||||
|
||||
first_url = trailer_info['url']
|
||||
trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower()
|
||||
settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id)
|
||||
settings_json = self._download_webpage(settings_json_url, trailer_id, u'Downloading settings json')
|
||||
settings = json.loads(settings_json)
|
||||
|
||||
formats = []
|
||||
for format in settings['metadata']['sizes']:
|
||||
# The src is a file pointing to the real video file
|
||||
format_url = re.sub(r'_(\d*p.mov)', r'_h\1', format['src'])
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'ext': determine_ext(format_url),
|
||||
'format': format['type'],
|
||||
'width': format['width'],
|
||||
'height': int(format['height']),
|
||||
})
|
||||
formats = sorted(formats, key=lambda f: (f['height'], f['width']))
|
||||
|
||||
info = {
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'title': title,
|
||||
'duration': duration,
|
||||
'thumbnail': thumbnail,
|
||||
'upload_date': upload_date,
|
||||
'uploader_id': uploader_id,
|
||||
'user_agent': 'QuickTime compatible (youtube-dl)',
|
||||
}
|
||||
# TODO: Remove when #980 has been merged
|
||||
info['url'] = formats[-1]['url']
|
||||
info['ext'] = formats[-1]['ext']
|
||||
|
||||
playlist.append(info)
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': movie,
|
||||
'entries': playlist,
|
||||
}
|
||||
68
youtube_dl/extractor/archiveorg.py
Normal file
68
youtube_dl/extractor/archiveorg.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class ArchiveOrgIE(InfoExtractor):
|
||||
IE_NAME = 'archive.org'
|
||||
IE_DESC = 'archive.org videos'
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?archive.org/details/(?P<id>[^?/]+)(?:[?].*)?$'
|
||||
_TEST = {
|
||||
u"url": u"http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect",
|
||||
u'file': u'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv',
|
||||
u'md5': u'8af1d4cf447933ed3c7f4871162602db',
|
||||
u'info_dict': {
|
||||
u"title": u"1968 Demo - FJCC Conference Presentation Reel #1",
|
||||
u"description": u"Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also <a href=\"http://dougengelbart.org/firsts/dougs-1968-demo.html\" rel=\"nofollow\">Doug's 1968 Demo page</a> for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | <a href=\"http://www.archive.org/details/XD300-24_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 2</a> | <a href=\"http://www.archive.org/details/XD300-25_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 3</a>",
|
||||
u"upload_date": u"19681210",
|
||||
u"uploader": u"SRI International"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
json_url = url + (u'?' if u'?' in url else '&') + u'output=json'
|
||||
json_data = self._download_webpage(json_url, video_id)
|
||||
data = json.loads(json_data)
|
||||
|
||||
title = data['metadata']['title'][0]
|
||||
description = data['metadata']['description'][0]
|
||||
uploader = data['metadata']['creator'][0]
|
||||
upload_date = unified_strdate(data['metadata']['date'][0])
|
||||
|
||||
formats = [{
|
||||
'format': fdata['format'],
|
||||
'url': 'http://' + data['server'] + data['dir'] + fn,
|
||||
'file_size': int(fdata['size']),
|
||||
}
|
||||
for fn,fdata in data['files'].items()
|
||||
if 'Video' in fdata['format']]
|
||||
formats.sort(key=lambda fdata: fdata['file_size'])
|
||||
for f in formats:
|
||||
f['ext'] = determine_ext(f['url'])
|
||||
|
||||
info = {
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'description': description,
|
||||
'uploader': uploader,
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
thumbnail = data.get('misc', {}).get('image')
|
||||
if thumbnail:
|
||||
info['thumbnail'] = thumbnail
|
||||
|
||||
# TODO: Remove when #980 has been merged
|
||||
info.update(formats[-1])
|
||||
|
||||
return info
|
||||
54
youtube_dl/extractor/ard.py
Normal file
54
youtube_dl/extractor/ard.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
class ARDIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
|
||||
_TITLE = r'<h1(?: class="boxTopHeadline")?>(?P<title>.*)</h1>'
|
||||
_MEDIA_STREAM = r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.ardmediathek.de/das-erste/tagesschau-in-100-sek?documentId=14077640',
|
||||
u'file': u'14077640.mp4',
|
||||
u'md5': u'6ca8824255460c787376353f9e20bbd8',
|
||||
u'info_dict': {
|
||||
u"title": u"11.04.2013 09:23 Uhr - Tagesschau in 100 Sekunden"
|
||||
},
|
||||
u'skip': u'Requires rtmpdump'
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
# determine video id from url
|
||||
m = re.match(self._VALID_URL, url)
|
||||
|
||||
numid = re.search(r'documentId=([0-9]+)', url)
|
||||
if numid:
|
||||
video_id = numid.group(1)
|
||||
else:
|
||||
video_id = m.group('video_id')
|
||||
|
||||
# determine title and media streams from webpage
|
||||
html = self._download_webpage(url, video_id)
|
||||
title = re.search(self._TITLE, html).group('title')
|
||||
streams = [mo.groupdict() for mo in re.finditer(self._MEDIA_STREAM, html)]
|
||||
if not streams:
|
||||
assert '"fsk"' in html
|
||||
raise ExtractorError(u'This video is only available after 8:00 pm')
|
||||
|
||||
# choose default media type and highest quality for now
|
||||
stream = max([s for s in streams if int(s["media_type"]) == 0],
|
||||
key=lambda s: int(s["quality"]))
|
||||
|
||||
# there's two possibilities: RTMP stream or HTTP download
|
||||
info = {'id': video_id, 'title': title, 'ext': 'mp4'}
|
||||
if stream['rtmp_url']:
|
||||
self.to_screen(u'RTMP download detected')
|
||||
assert stream['video_url'].startswith('mp4:')
|
||||
info["url"] = stream["rtmp_url"]
|
||||
info["play_path"] = stream['video_url']
|
||||
else:
|
||||
assert stream["video_url"].endswith('.mp4')
|
||||
info["url"] = stream["video_url"]
|
||||
return [info]
|
||||
262
youtube_dl/extractor/arte.py
Normal file
262
youtube_dl/extractor/arte.py
Normal file
@@ -0,0 +1,262 @@
|
||||
# encoding: utf-8
|
||||
import re
|
||||
import json
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
find_xpath_attr,
|
||||
unified_strdate,
|
||||
determine_ext,
|
||||
get_element_by_id,
|
||||
compat_str,
|
||||
)
|
||||
|
||||
# There are different sources of video in arte.tv, the extraction process
|
||||
# is different for each one. The videos usually expire in 7 days, so we can't
|
||||
# add tests.
|
||||
|
||||
class ArteTvIE(InfoExtractor):
|
||||
_VIDEOS_URL = r'(?:http://)?videos.arte.tv/(?P<lang>fr|de)/.*-(?P<id>.*?).html'
|
||||
_LIVEWEB_URL = r'(?:http://)?liveweb.arte.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
|
||||
_LIVE_URL = r'index-[0-9]+\.html$'
|
||||
|
||||
IE_NAME = u'arte.tv'
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return any(re.match(regex, url) for regex in (cls._VIDEOS_URL, cls._LIVEWEB_URL))
|
||||
|
||||
# TODO implement Live Stream
|
||||
# from ..utils import compat_urllib_parse
|
||||
# def extractLiveStream(self, url):
|
||||
# video_lang = url.split('/')[-4]
|
||||
# info = self.grep_webpage(
|
||||
# url,
|
||||
# r'src="(.*?/videothek_js.*?\.js)',
|
||||
# 0,
|
||||
# [
|
||||
# (1, 'url', u'Invalid URL: %s' % url)
|
||||
# ]
|
||||
# )
|
||||
# http_host = url.split('/')[2]
|
||||
# next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
|
||||
# info = self.grep_webpage(
|
||||
# next_url,
|
||||
# r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
|
||||
# '(http://.*?\.swf).*?' +
|
||||
# '(rtmp://.*?)\'',
|
||||
# re.DOTALL,
|
||||
# [
|
||||
# (1, 'path', u'could not extract video path: %s' % url),
|
||||
# (2, 'player', u'could not extract video player: %s' % url),
|
||||
# (3, 'url', u'could not extract video url: %s' % url)
|
||||
# ]
|
||||
# )
|
||||
# video_url = u'%s/%s' % (info.get('url'), info.get('path'))
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VIDEOS_URL, url)
|
||||
if mobj is not None:
|
||||
id = mobj.group('id')
|
||||
lang = mobj.group('lang')
|
||||
return self._extract_video(url, id, lang)
|
||||
|
||||
mobj = re.match(self._LIVEWEB_URL, url)
|
||||
if mobj is not None:
|
||||
name = mobj.group('name')
|
||||
lang = mobj.group('lang')
|
||||
return self._extract_liveweb(url, name, lang)
|
||||
|
||||
if re.search(self._LIVE_URL, url) is not None:
|
||||
raise ExtractorError(u'Arte live streams are not yet supported, sorry')
|
||||
# self.extractLiveStream(url)
|
||||
# return
|
||||
|
||||
def _extract_video(self, url, video_id, lang):
|
||||
"""Extract from videos.arte.tv"""
|
||||
ref_xml_url = url.replace('/videos/', '/do_delegate/videos/')
|
||||
ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml')
|
||||
ref_xml = self._download_webpage(ref_xml_url, video_id, note=u'Downloading metadata')
|
||||
ref_xml_doc = xml.etree.ElementTree.fromstring(ref_xml)
|
||||
config_node = find_xpath_attr(ref_xml_doc, './/video', 'lang', lang)
|
||||
config_xml_url = config_node.attrib['ref']
|
||||
config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
|
||||
|
||||
video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
|
||||
def _key(m):
|
||||
quality = m.group('quality')
|
||||
if quality == 'hd':
|
||||
return 2
|
||||
else:
|
||||
return 1
|
||||
# We pick the best quality
|
||||
video_urls = sorted(video_urls, key=_key)
|
||||
video_url = list(video_urls)[-1].group('url')
|
||||
|
||||
title = self._html_search_regex(r'<name>(.*?)</name>', config_xml, 'title')
|
||||
thumbnail = self._html_search_regex(r'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
|
||||
config_xml, 'thumbnail')
|
||||
return {'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'url': video_url,
|
||||
'ext': 'flv',
|
||||
}
|
||||
|
||||
def _extract_liveweb(self, url, name, lang):
|
||||
"""Extract form http://liveweb.arte.tv/"""
|
||||
webpage = self._download_webpage(url, name)
|
||||
video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, u'event id')
|
||||
config_xml = self._download_webpage('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
|
||||
video_id, u'Downloading information')
|
||||
config_doc = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8'))
|
||||
event_doc = config_doc.find('event')
|
||||
url_node = event_doc.find('video').find('urlHd')
|
||||
if url_node is None:
|
||||
url_node = event_doc.find('urlSd')
|
||||
|
||||
return {'id': video_id,
|
||||
'title': event_doc.find('name%s' % lang.capitalize()).text,
|
||||
'url': url_node.text.replace('MP4', 'mp4'),
|
||||
'ext': 'flv',
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
}
|
||||
|
||||
|
||||
class ArteTVPlus7IE(InfoExtractor):
|
||||
IE_NAME = u'arte.tv:+7'
|
||||
_VALID_URL = r'https?://www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
|
||||
|
||||
@classmethod
|
||||
def _extract_url_info(cls, url):
|
||||
mobj = re.match(cls._VALID_URL, url)
|
||||
lang = mobj.group('lang')
|
||||
# This is not a real id, it can be for example AJT for the news
|
||||
# http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
|
||||
video_id = mobj.group('id')
|
||||
return video_id, lang
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id, lang = self._extract_url_info(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
return self._extract_from_webpage(webpage, video_id, lang)
|
||||
|
||||
def _extract_from_webpage(self, webpage, video_id, lang):
|
||||
json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url')
|
||||
|
||||
json_info = self._download_webpage(json_url, video_id, 'Downloading info json')
|
||||
self.report_extraction(video_id)
|
||||
info = json.loads(json_info)
|
||||
player_info = info['videoJsonPlayer']
|
||||
|
||||
info_dict = {
|
||||
'id': player_info['VID'],
|
||||
'title': player_info['VTI'],
|
||||
'description': player_info.get('VDE'),
|
||||
'upload_date': unified_strdate(player_info.get('VDA', '').split(' ')[0]),
|
||||
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
|
||||
}
|
||||
|
||||
all_formats = player_info['VSR'].values()
|
||||
# Some formats use the m3u8 protocol
|
||||
all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
|
||||
def _match_lang(f):
|
||||
if f.get('versionCode') is None:
|
||||
return True
|
||||
# Return true if that format is in the language of the url
|
||||
if lang == 'fr':
|
||||
l = 'F'
|
||||
elif lang == 'de':
|
||||
l = 'A'
|
||||
regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
|
||||
return any(re.match(r, f['versionCode']) for r in regexes)
|
||||
# Some formats may not be in the same language as the url
|
||||
formats = filter(_match_lang, all_formats)
|
||||
formats = list(formats) # in python3 filter returns an iterator
|
||||
if not formats:
|
||||
# Some videos are only available in the 'Originalversion'
|
||||
# they aren't tagged as being in French or German
|
||||
if all(f['versionCode'] == 'VO' for f in all_formats):
|
||||
formats = all_formats
|
||||
else:
|
||||
raise ExtractorError(u'The formats list is empty')
|
||||
|
||||
if re.match(r'[A-Z]Q', formats[0]['quality']) is not None:
|
||||
def sort_key(f):
|
||||
return ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality'])
|
||||
else:
|
||||
def sort_key(f):
|
||||
return (
|
||||
# Sort first by quality
|
||||
int(f.get('height',-1)),
|
||||
int(f.get('bitrate',-1)),
|
||||
# The original version with subtitles has lower relevance
|
||||
re.match(r'VO-ST(F|A)', f.get('versionCode', '')) is None,
|
||||
# The version with sourds/mal subtitles has also lower relevance
|
||||
re.match(r'VO?(F|A)-STM\1', f.get('versionCode', '')) is None,
|
||||
)
|
||||
formats = sorted(formats, key=sort_key)
|
||||
def _format(format_info):
|
||||
quality = ''
|
||||
height = format_info.get('height')
|
||||
if height is not None:
|
||||
quality = compat_str(height)
|
||||
bitrate = format_info.get('bitrate')
|
||||
if bitrate is not None:
|
||||
quality += '-%d' % bitrate
|
||||
if format_info.get('versionCode') is not None:
|
||||
format_id = u'%s-%s' % (quality, format_info['versionCode'])
|
||||
else:
|
||||
format_id = quality
|
||||
info = {
|
||||
'format_id': format_id,
|
||||
'format_note': format_info.get('versionLibelle'),
|
||||
'width': format_info.get('width'),
|
||||
'height': height,
|
||||
}
|
||||
if format_info['mediaType'] == u'rtmp':
|
||||
info['url'] = format_info['streamer']
|
||||
info['play_path'] = 'mp4:' + format_info['url']
|
||||
info['ext'] = 'flv'
|
||||
else:
|
||||
info['url'] = format_info['url']
|
||||
info['ext'] = determine_ext(info['url'])
|
||||
return info
|
||||
info_dict['formats'] = [_format(f) for f in formats]
|
||||
|
||||
return info_dict
|
||||
|
||||
|
||||
# It also uses the arte_vp_url url from the webpage to extract the information
|
||||
class ArteTVCreativeIE(ArteTVPlus7IE):
|
||||
IE_NAME = u'arte.tv:creative'
|
||||
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/magazine?/(?P<id>.+)'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
|
||||
u'file': u'050489-002.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'Agentur Amateur / Agence Amateur #2 : Corporate Design',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class ArteTVFutureIE(ArteTVPlus7IE):
|
||||
IE_NAME = u'arte.tv:future'
|
||||
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
|
||||
u'file': u'050940-003.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'Les champignons au secours de la planète',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
anchor_id, lang = self._extract_url_info(url)
|
||||
webpage = self._download_webpage(url, anchor_id)
|
||||
row = get_element_by_id(anchor_id, webpage)
|
||||
return self._extract_from_webpage(row, anchor_id, lang)
|
||||
49
youtube_dl/extractor/auengine.py
Normal file
49
youtube_dl/extractor/auengine.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
class AUEngineIE(InfoExtractor):
|
||||
_TEST = {
|
||||
u'url': u'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370',
|
||||
u'file': u'lfvlytY6.mp4',
|
||||
u'md5': u'48972bdbcf1a3a2f5533e62425b41d4f',
|
||||
u'info_dict': {
|
||||
u"title": u"[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]"
|
||||
}
|
||||
}
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?auengine\.com/embed.php\?.*?file=([^&]+).*?'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>',
|
||||
webpage, u'title')
|
||||
title = title.strip()
|
||||
links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage)
|
||||
links = map(compat_urllib_parse.unquote, links)
|
||||
|
||||
thumbnail = None
|
||||
video_url = None
|
||||
for link in links:
|
||||
if link.endswith('.png'):
|
||||
thumbnail = link
|
||||
elif '/videos/' in link:
|
||||
video_url = link
|
||||
if not video_url:
|
||||
raise ExtractorError(u'Could not find video URL')
|
||||
ext = u'.' + determine_ext(video_url)
|
||||
if ext == title[-len(ext):]:
|
||||
title = title[:-len(ext)]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
86
youtube_dl/extractor/bambuser.py
Normal file
86
youtube_dl/extractor/bambuser.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import re
|
||||
import json
|
||||
import itertools
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_request,
|
||||
)
|
||||
|
||||
|
||||
class BambuserIE(InfoExtractor):
|
||||
IE_NAME = u'bambuser'
|
||||
_VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)'
|
||||
_API_KEY = '005f64509e19a868399060af746a00aa'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://bambuser.com/v/4050584',
|
||||
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
|
||||
#u'md5': u'fba8f7693e48fd4e8641b3fd5539a641',
|
||||
u'info_dict': {
|
||||
u'id': u'4050584',
|
||||
u'ext': u'flv',
|
||||
u'title': u'Education engineering days - lightning talks',
|
||||
u'duration': 3741,
|
||||
u'uploader': u'pixelversity',
|
||||
u'uploader_id': u'344706',
|
||||
},
|
||||
u'params': {
|
||||
# It doesn't respect the 'Range' header, it would download the whole video
|
||||
# caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59
|
||||
u'skip_download': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
info_url = ('http://player-c.api.bambuser.com/getVideo.json?'
|
||||
'&api_key=%s&vid=%s' % (self._API_KEY, video_id))
|
||||
info_json = self._download_webpage(info_url, video_id)
|
||||
info = json.loads(info_json)['result']
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': info['title'],
|
||||
'url': info['url'],
|
||||
'thumbnail': info.get('preview'),
|
||||
'duration': int(info['length']),
|
||||
'view_count': int(info['views_total']),
|
||||
'uploader': info['username'],
|
||||
'uploader_id': info['uid'],
|
||||
}
|
||||
|
||||
|
||||
class BambuserChannelIE(InfoExtractor):
|
||||
IE_NAME = u'bambuser:channel'
|
||||
_VALID_URL = r'http://bambuser.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
|
||||
# The maximum number we can get with each request
|
||||
_STEP = 50
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
user = mobj.group('user')
|
||||
urls = []
|
||||
last_id = ''
|
||||
for i in itertools.count(1):
|
||||
req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
|
||||
'&sort=created&access_mode=0%2C1%2C2&limit={count}'
|
||||
'&method=broadcast&format=json&vid_older_than={last}'
|
||||
).format(user=user, count=self._STEP, last=last_id)
|
||||
req = compat_urllib_request.Request(req_url)
|
||||
# Without setting this header, we wouldn't get any result
|
||||
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
|
||||
info_json = self._download_webpage(req, user,
|
||||
u'Downloading page %d' % i)
|
||||
results = json.loads(info_json)['result']
|
||||
if len(results) == 0:
|
||||
break
|
||||
last_id = results[-1]['vid']
|
||||
urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'title': user,
|
||||
'entries': urls,
|
||||
}
|
||||
129
youtube_dl/extractor/bandcamp.py
Normal file
129
youtube_dl/extractor/bandcamp.py
Normal file
@@ -0,0 +1,129 @@
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_str,
|
||||
compat_urlparse,
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class BandcampIE(InfoExtractor):
|
||||
IE_NAME = u'Bandcamp'
|
||||
_VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)'
|
||||
_TESTS = [{
|
||||
u'url': u'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
|
||||
u'file': u'1812978515.mp3',
|
||||
u'md5': u'cdeb30cdae1921719a3cbcab696ef53c',
|
||||
u'info_dict': {
|
||||
u"title": u"youtube-dl test song \"'/\\\u00e4\u21ad"
|
||||
},
|
||||
u'skip': u'There is a limit of 200 free downloads / month for the test song'
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
title = mobj.group('title')
|
||||
webpage = self._download_webpage(url, title)
|
||||
# We get the link to the free download page
|
||||
m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
|
||||
if m_download is None:
|
||||
m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage)
|
||||
if m_trackinfo:
|
||||
json_code = m_trackinfo.group(1)
|
||||
data = json.loads(json_code)
|
||||
|
||||
for d in data:
|
||||
formats = [{
|
||||
'format_id': 'format_id',
|
||||
'url': format_url,
|
||||
'ext': format_id.partition('-')[0]
|
||||
} for format_id, format_url in sorted(d['file'].items())]
|
||||
return {
|
||||
'id': compat_str(d['id']),
|
||||
'title': d['title'],
|
||||
'formats': formats,
|
||||
}
|
||||
else:
|
||||
raise ExtractorError(u'No free songs found')
|
||||
|
||||
download_link = m_download.group(1)
|
||||
id = re.search(r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
|
||||
webpage, re.MULTILINE|re.DOTALL).group('id')
|
||||
|
||||
download_webpage = self._download_webpage(download_link, id,
|
||||
'Downloading free downloads page')
|
||||
# We get the dictionary of the track from some javascrip code
|
||||
info = re.search(r'items: (.*?),$',
|
||||
download_webpage, re.MULTILINE).group(1)
|
||||
info = json.loads(info)[0]
|
||||
# We pick mp3-320 for now, until format selection can be easily implemented.
|
||||
mp3_info = info[u'downloads'][u'mp3-320']
|
||||
# If we try to use this url it says the link has expired
|
||||
initial_url = mp3_info[u'url']
|
||||
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
|
||||
m_url = re.match(re_url, initial_url)
|
||||
#We build the url we will use to get the final track url
|
||||
# This url is build in Bandcamp in the script download_bunde_*.js
|
||||
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), id, m_url.group('ts'))
|
||||
final_url_webpage = self._download_webpage(request_url, id, 'Requesting download url')
|
||||
# If we could correctly generate the .rand field the url would be
|
||||
#in the "download_url" key
|
||||
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
|
||||
|
||||
track_info = {'id':id,
|
||||
'title' : info[u'title'],
|
||||
'ext' : 'mp3',
|
||||
'url' : final_url,
|
||||
'thumbnail' : info[u'thumb_url'],
|
||||
'uploader' : info[u'artist']
|
||||
}
|
||||
|
||||
return [track_info]
|
||||
|
||||
|
||||
class BandcampAlbumIE(InfoExtractor):
|
||||
IE_NAME = u'Bandcamp:album'
|
||||
_VALID_URL = r'http://.*?\.bandcamp\.com/album/(?P<title>.*)'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
|
||||
u'playlist': [
|
||||
{
|
||||
u'file': u'1353101989.mp3',
|
||||
u'md5': u'39bc1eded3476e927c724321ddf116cf',
|
||||
u'info_dict': {
|
||||
u'title': u'Intro',
|
||||
}
|
||||
},
|
||||
{
|
||||
u'file': u'38097443.mp3',
|
||||
u'md5': u'1a2c32e2691474643e912cc6cd4bffaa',
|
||||
u'info_dict': {
|
||||
u'title': u'Kero One - Keep It Alive (Blazo remix)',
|
||||
}
|
||||
},
|
||||
],
|
||||
u'params': {
|
||||
u'playlistend': 2
|
||||
},
|
||||
u'skip': u'Bancamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test'
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
title = mobj.group('title')
|
||||
webpage = self._download_webpage(url, title)
|
||||
tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage)
|
||||
if not tracks_paths:
|
||||
raise ExtractorError(u'The page doesn\'t contain any track')
|
||||
entries = [
|
||||
self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key())
|
||||
for t_path in tracks_paths]
|
||||
title = self._search_regex(r'album_title : "(.*?)"', webpage, u'title')
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'title': title,
|
||||
'entries': entries,
|
||||
}
|
||||
193
youtube_dl/extractor/bliptv.py
Normal file
193
youtube_dl/extractor/bliptv.py
Normal file
@@ -0,0 +1,193 @@
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_parse_qs,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class BlipTVIE(InfoExtractor):
|
||||
"""Information extractor for blip.tv"""
|
||||
|
||||
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$'
|
||||
_URL_EXT = r'^.*\.([a-z0-9]+)$'
|
||||
IE_NAME = u'blip.tv'
|
||||
_TEST = {
|
||||
u'url': u'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
|
||||
u'file': u'5779306.m4v',
|
||||
u'md5': u'80baf1ec5c3d2019037c1c707d676b9f',
|
||||
u'info_dict': {
|
||||
u"upload_date": u"20111205",
|
||||
u"description": u"md5:9bc31f227219cde65e47eeec8d2dc596",
|
||||
u"uploader": u"Comic Book Resources - CBR TV",
|
||||
u"title": u"CBR EXCLUSIVE: \"Gotham City Imposters\" Bats VS Jokerz Short 3"
|
||||
}
|
||||
}
|
||||
|
||||
def report_direct_download(self, title):
|
||||
"""Report information extraction."""
|
||||
self.to_screen(u'%s: Direct download detected' % title)
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
# See https://github.com/rg3/youtube-dl/issues/857
|
||||
api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url)
|
||||
if api_mobj is not None:
|
||||
url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id')
|
||||
urlp = compat_urllib_parse_urlparse(url)
|
||||
if urlp.path.startswith('/play/'):
|
||||
request = compat_urllib_request.Request(url)
|
||||
response = compat_urllib_request.urlopen(request)
|
||||
redirecturl = response.geturl()
|
||||
rurlp = compat_urllib_parse_urlparse(redirecturl)
|
||||
file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2]
|
||||
url = 'http://blip.tv/a/a-' + file_id
|
||||
return self._real_extract(url)
|
||||
|
||||
|
||||
if '?' in url:
|
||||
cchar = '&'
|
||||
else:
|
||||
cchar = '?'
|
||||
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
|
||||
request = compat_urllib_request.Request(json_url)
|
||||
request.add_header('User-Agent', 'iTunes/10.6.1')
|
||||
self.report_extraction(mobj.group(1))
|
||||
info = None
|
||||
try:
|
||||
urlh = compat_urllib_request.urlopen(request)
|
||||
if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
|
||||
basename = url.split('/')[-1]
|
||||
title,ext = os.path.splitext(basename)
|
||||
title = title.decode('UTF-8')
|
||||
ext = ext.replace('.', '')
|
||||
self.report_direct_download(title)
|
||||
info = {
|
||||
'id': title,
|
||||
'url': url,
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
'title': title,
|
||||
'ext': ext,
|
||||
'urlhandle': urlh
|
||||
}
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
|
||||
if info is None: # Regular URL
|
||||
try:
|
||||
json_code_bytes = urlh.read()
|
||||
json_code = json_code_bytes.decode('utf-8')
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
|
||||
|
||||
try:
|
||||
json_data = json.loads(json_code)
|
||||
if 'Post' in json_data:
|
||||
data = json_data['Post']
|
||||
else:
|
||||
data = json_data
|
||||
|
||||
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
|
||||
if 'additionalMedia' in data:
|
||||
formats = sorted(data['additionalMedia'], key=lambda f: int(f['media_height']))
|
||||
best_format = formats[-1]
|
||||
video_url = best_format['url']
|
||||
else:
|
||||
video_url = data['media']['url']
|
||||
umobj = re.match(self._URL_EXT, video_url)
|
||||
if umobj is None:
|
||||
raise ValueError('Can not determine filename extension')
|
||||
ext = umobj.group(1)
|
||||
|
||||
info = {
|
||||
'id': compat_str(data['item_id']),
|
||||
'url': video_url,
|
||||
'uploader': data['display_name'],
|
||||
'upload_date': upload_date,
|
||||
'title': data['title'],
|
||||
'ext': ext,
|
||||
'format': data['media']['mimeType'],
|
||||
'thumbnail': data['thumbnailUrl'],
|
||||
'description': data['description'],
|
||||
'player_url': data['embedUrl'],
|
||||
'user_agent': 'iTunes/10.6.1',
|
||||
}
|
||||
except (ValueError,KeyError) as err:
|
||||
raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
|
||||
|
||||
return [info]
|
||||
|
||||
|
||||
class BlipTVUserIE(InfoExtractor):
|
||||
"""Information Extractor for blip.tv users."""
|
||||
|
||||
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
|
||||
_PAGE_SIZE = 12
|
||||
IE_NAME = u'blip.tv:user'
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract username
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
username = mobj.group(1)
|
||||
|
||||
page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
|
||||
|
||||
page = self._download_webpage(url, username, u'Downloading user page')
|
||||
mobj = re.search(r'data-users-id="([^"]+)"', page)
|
||||
page_base = page_base % mobj.group(1)
|
||||
|
||||
|
||||
# Download video ids using BlipTV Ajax calls. Result size per
|
||||
# query is limited (currently to 12 videos) so we need to query
|
||||
# page by page until there are no video ids - it means we got
|
||||
# all of them.
|
||||
|
||||
video_ids = []
|
||||
pagenum = 1
|
||||
|
||||
while True:
|
||||
url = page_base + "&page=" + str(pagenum)
|
||||
page = self._download_webpage(url, username,
|
||||
u'Downloading video ids from page %d' % pagenum)
|
||||
|
||||
# Extract video identifiers
|
||||
ids_in_page = []
|
||||
|
||||
for mobj in re.finditer(r'href="/([^"]+)"', page):
|
||||
if mobj.group(1) not in ids_in_page:
|
||||
ids_in_page.append(unescapeHTML(mobj.group(1)))
|
||||
|
||||
video_ids.extend(ids_in_page)
|
||||
|
||||
# A little optimization - if current page is not
|
||||
# "full", ie. does not contain PAGE_SIZE video ids then
|
||||
# we can assume that this page is the last one - there
|
||||
# are no more ids on further pages - no need to query
|
||||
# again.
|
||||
|
||||
if len(ids_in_page) < self._PAGE_SIZE:
|
||||
break
|
||||
|
||||
pagenum += 1
|
||||
|
||||
urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
|
||||
url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
|
||||
return [self.playlist_result(url_entries, playlist_title = username)]
|
||||
27
youtube_dl/extractor/bloomberg.py
Normal file
27
youtube_dl/extractor/bloomberg.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class BloombergIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.bloomberg\.com/video/(?P<name>.+?).html'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.bloomberg.com/video/shah-s-presentation-on-foreign-exchange-strategies-qurhIVlJSB6hzkVi229d8g.html',
|
||||
u'file': u'12bzhqZTqQHmmlA8I-i0NpzJgcG5NNYX.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'Shah\'s Presentation on Foreign-Exchange Strategies',
|
||||
u'description': u'md5:abc86e5236f9f0e4866c59ad36736686',
|
||||
},
|
||||
u'params': {
|
||||
# Requires ffmpeg (m3u8 manifest)
|
||||
u'skip_download': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
name = mobj.group('name')
|
||||
webpage = self._download_webpage(url, name)
|
||||
ooyala_url = self._og_search_video_url(webpage)
|
||||
return self.url_result(ooyala_url, ie='Ooyala')
|
||||
38
youtube_dl/extractor/breakcom.py
Normal file
38
youtube_dl/extractor/breakcom.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import determine_ext
|
||||
|
||||
|
||||
class BreakIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?break\.com/video/([^/]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.break.com/video/when-girls-act-like-guys-2468056',
|
||||
u'file': u'2468056.mp4',
|
||||
u'md5': u'a3513fb1547fba4fb6cfac1bffc6c46b',
|
||||
u'info_dict': {
|
||||
u"title": u"When Girls Act Like D-Bags"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1).split("-")[-1]
|
||||
embed_url = 'http://www.break.com/embed/%s' % video_id
|
||||
webpage = self._download_webpage(embed_url, video_id)
|
||||
info_json = self._search_regex(r'var embedVars = ({.*?});', webpage,
|
||||
u'info json', flags=re.DOTALL)
|
||||
info = json.loads(info_json)
|
||||
video_url = info['videoUri']
|
||||
m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', video_url)
|
||||
if m_youtube is not None:
|
||||
return self.url_result(m_youtube.group(1), 'Youtube')
|
||||
final_url = video_url + '?' + info['AuthToken']
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': final_url,
|
||||
'ext': determine_ext(final_url),
|
||||
'title': info['contentName'],
|
||||
'thumbnail': info['thumbUri'],
|
||||
}]
|
||||
177
youtube_dl/extractor/brightcove.py
Normal file
177
youtube_dl/extractor/brightcove.py
Normal file
@@ -0,0 +1,177 @@
|
||||
# encoding: utf-8
|
||||
|
||||
import re
|
||||
import json
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
find_xpath_attr,
|
||||
compat_urlparse,
|
||||
compat_str,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class BrightcoveIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
|
||||
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
|
||||
_PLAYLIST_URL_TEMPLATE = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
|
||||
u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
|
||||
u'file': u'2371591881001.mp4',
|
||||
u'md5': u'8eccab865181d29ec2958f32a6a754f5',
|
||||
u'note': u'Test Brightcove downloads and detection in GenericIE',
|
||||
u'info_dict': {
|
||||
u'title': u'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
|
||||
u'uploader': u'8TV',
|
||||
u'description': u'md5:a950cc4285c43e44d763d036710cd9cd',
|
||||
}
|
||||
},
|
||||
{
|
||||
# From http://medianetwork.oracle.com/video/player/1785452137001
|
||||
u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
|
||||
u'file': u'1785452137001.flv',
|
||||
u'info_dict': {
|
||||
u'title': u'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
|
||||
u'description': u'John Rose speaks at the JVM Language Summit, August 1, 2012.',
|
||||
u'uploader': u'Oracle',
|
||||
},
|
||||
},
|
||||
{
|
||||
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
|
||||
u'url': u'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
|
||||
u'info_dict': {
|
||||
u'id': u'2750934548001',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'This Bracelet Acts as a Personal Thermostat',
|
||||
u'description': u'md5:547b78c64f4112766ccf4e151c20b6a0',
|
||||
u'uploader': u'Mashable',
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def _build_brighcove_url(cls, object_str):
|
||||
"""
|
||||
Build a Brightcove url from a xml string containing
|
||||
<object class="BrightcoveExperience">{params}</object>
|
||||
"""
|
||||
|
||||
# Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553
|
||||
object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>',
|
||||
lambda m: m.group(1) + '/>', object_str)
|
||||
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
|
||||
object_str = object_str.replace(u'<--', u'<!--')
|
||||
|
||||
object_doc = xml.etree.ElementTree.fromstring(object_str)
|
||||
assert u'BrightcoveExperience' in object_doc.attrib['class']
|
||||
params = {'flashID': object_doc.attrib['id'],
|
||||
'playerID': find_xpath_attr(object_doc, './param', 'name', 'playerID').attrib['value'],
|
||||
}
|
||||
def find_param(name):
|
||||
node = find_xpath_attr(object_doc, './param', 'name', name)
|
||||
if node is not None:
|
||||
return node.attrib['value']
|
||||
return None
|
||||
playerKey = find_param('playerKey')
|
||||
# Not all pages define this value
|
||||
if playerKey is not None:
|
||||
params['playerKey'] = playerKey
|
||||
# The three fields hold the id of the video
|
||||
videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID')
|
||||
if videoPlayer is not None:
|
||||
params['@videoPlayer'] = videoPlayer
|
||||
linkBase = find_param('linkBaseURL')
|
||||
if linkBase is not None:
|
||||
params['linkBaseURL'] = linkBase
|
||||
data = compat_urllib_parse.urlencode(params)
|
||||
return cls._FEDERATED_URL_TEMPLATE % data
|
||||
|
||||
@classmethod
|
||||
def _extract_brightcove_url(cls, webpage):
|
||||
"""Try to extract the brightcove url from the wepbage, returns None
|
||||
if it can't be found
|
||||
"""
|
||||
m_brightcove = re.search(
|
||||
r'<object[^>]+?class=([\'"])[^>]*?BrightcoveExperience.*?\1.+?</object>',
|
||||
webpage, re.DOTALL)
|
||||
if m_brightcove is not None:
|
||||
return cls._build_brighcove_url(m_brightcove.group())
|
||||
else:
|
||||
return None
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Change the 'videoId' and others field to '@videoPlayer'
|
||||
url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url)
|
||||
# Change bckey (used by bcove.me urls) to playerKey
|
||||
url = re.sub(r'(?<=[?&])bckey', 'playerKey', url)
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
query_str = mobj.group('query')
|
||||
query = compat_urlparse.parse_qs(query_str)
|
||||
|
||||
videoPlayer = query.get('@videoPlayer')
|
||||
if videoPlayer:
|
||||
return self._get_video_info(videoPlayer[0], query_str, query)
|
||||
else:
|
||||
player_key = query['playerKey']
|
||||
return self._get_playlist_info(player_key[0])
|
||||
|
||||
def _get_video_info(self, video_id, query_str, query):
|
||||
request_url = self._FEDERATED_URL_TEMPLATE % query_str
|
||||
req = compat_urllib_request.Request(request_url)
|
||||
linkBase = query.get('linkBaseURL')
|
||||
if linkBase is not None:
|
||||
req.add_header('Referer', linkBase[0])
|
||||
webpage = self._download_webpage(req, video_id)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
info = self._search_regex(r'var experienceJSON = ({.*?});', webpage, 'json')
|
||||
info = json.loads(info)['data']
|
||||
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
|
||||
|
||||
return self._extract_video_info(video_info)
|
||||
|
||||
def _get_playlist_info(self, player_key):
|
||||
playlist_info = self._download_webpage(self._PLAYLIST_URL_TEMPLATE % player_key,
|
||||
player_key, u'Downloading playlist information')
|
||||
|
||||
json_data = json.loads(playlist_info)
|
||||
if 'videoList' not in json_data:
|
||||
raise ExtractorError(u'Empty playlist')
|
||||
playlist_info = json_data['videoList']
|
||||
videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
|
||||
|
||||
return self.playlist_result(videos, playlist_id=playlist_info['id'],
|
||||
playlist_title=playlist_info['mediaCollectionDTO']['displayName'])
|
||||
|
||||
def _extract_video_info(self, video_info):
|
||||
info = {
|
||||
'id': compat_str(video_info['id']),
|
||||
'title': video_info['displayName'],
|
||||
'description': video_info.get('shortDescription'),
|
||||
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
|
||||
'uploader': video_info.get('publisherName'),
|
||||
}
|
||||
|
||||
renditions = video_info.get('renditions')
|
||||
if renditions:
|
||||
renditions = sorted(renditions, key=lambda r: r['size'])
|
||||
info['formats'] = [{
|
||||
'url': rend['defaultURL'],
|
||||
'height': rend.get('frameHeight'),
|
||||
'width': rend.get('frameWidth'),
|
||||
} for rend in renditions]
|
||||
elif video_info.get('FLVFullLengthURL') is not None:
|
||||
info.update({
|
||||
'url': video_info['FLVFullLengthURL'],
|
||||
})
|
||||
else:
|
||||
raise ExtractorError(u'Unable to extract video url for %s' % info['id'])
|
||||
return info
|
||||
36
youtube_dl/extractor/c56.py
Normal file
36
youtube_dl/extractor/c56.py
Normal file
@@ -0,0 +1,36 @@
|
||||
# coding: utf-8
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import determine_ext
|
||||
|
||||
class C56IE(InfoExtractor):
|
||||
_VALID_URL = r'https?://((www|player)\.)?56\.com/(.+?/)?(v_|(play_album.+-))(?P<textid>.+?)\.(html|swf)'
|
||||
IE_NAME = u'56.com'
|
||||
|
||||
_TEST ={
|
||||
u'url': u'http://www.56.com/u39/v_OTM0NDA3MTY.html',
|
||||
u'file': u'93440716.flv',
|
||||
u'md5': u'e59995ac63d0457783ea05f93f12a866',
|
||||
u'info_dict': {
|
||||
u'title': u'网事知多少 第32期:车怒',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE)
|
||||
text_id = mobj.group('textid')
|
||||
info_page = self._download_webpage('http://vxml.56.com/json/%s/' % text_id,
|
||||
text_id, u'Downloading video info')
|
||||
info = json.loads(info_page)['info']
|
||||
best_format = sorted(info['rfiles'], key=lambda f: int(f['filesize']))[-1]
|
||||
video_url = best_format['url']
|
||||
|
||||
return {'id': info['vid'],
|
||||
'title': info['Subject'],
|
||||
'url': video_url,
|
||||
'ext': determine_ext(video_url),
|
||||
'thumbnail': info.get('bimg') or info.get('img'),
|
||||
}
|
||||
37
youtube_dl/extractor/canalc2.py
Normal file
37
youtube_dl/extractor/canalc2.py
Normal file
@@ -0,0 +1,37 @@
|
||||
# coding: utf-8
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class Canalc2IE(InfoExtractor):
|
||||
IE_NAME = 'canalc2.tv'
|
||||
_VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?.*?idVideo=(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui',
|
||||
u'file': u'12163.mp4',
|
||||
u'md5': u'060158428b650f896c542dfbb3d6487f',
|
||||
u'info_dict': {
|
||||
u'title': u'Terrasses du Numérique'
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = re.match(self._VALID_URL, url).group('id')
|
||||
# We need to set the voir field for getting the file name
|
||||
url = 'http://www.canalc2.tv/video.asp?idVideo=%s&voir=oui' % video_id
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
file_name = self._search_regex(
|
||||
r"so\.addVariable\('file','(.*?)'\);",
|
||||
webpage, 'file name')
|
||||
video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file_name
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'class="evenement8">(.*?)</a>', webpage, u'title')
|
||||
|
||||
return {'id': video_id,
|
||||
'ext': 'mp4',
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
}
|
||||
55
youtube_dl/extractor/canalplus.py
Normal file
55
youtube_dl/extractor/canalplus.py
Normal file
@@ -0,0 +1,55 @@
|
||||
# encoding: utf-8
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import unified_strdate
|
||||
|
||||
|
||||
class CanalplusIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(www\.canalplus\.fr/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>\d+))'
|
||||
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s'
|
||||
IE_NAME = u'canalplus.fr'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
|
||||
u'file': u'922470.flv',
|
||||
u'info_dict': {
|
||||
u'title': u'Zapping - 26/08/13',
|
||||
u'description': u'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
|
||||
u'upload_date': u'20130826',
|
||||
},
|
||||
u'params': {
|
||||
u'skip_download': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.groupdict().get('id')
|
||||
if video_id is None:
|
||||
webpage = self._download_webpage(url, mobj.group('path'))
|
||||
video_id = self._search_regex(r'videoId = "(\d+)";', webpage, u'video id')
|
||||
info_url = self._VIDEO_INFO_TEMPLATE % video_id
|
||||
info_page = self._download_webpage(info_url,video_id,
|
||||
u'Downloading video info')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
doc = xml.etree.ElementTree.fromstring(info_page.encode('utf-8'))
|
||||
video_info = [video for video in doc if video.find('ID').text == video_id][0]
|
||||
infos = video_info.find('INFOS')
|
||||
media = video_info.find('MEDIA')
|
||||
formats = [media.find('VIDEOS/%s' % format)
|
||||
for format in ['BAS_DEBIT', 'HAUT_DEBIT', 'HD']]
|
||||
video_url = [format.text for format in formats if format is not None][-1]
|
||||
|
||||
return {'id': video_id,
|
||||
'title': u'%s - %s' % (infos.find('TITRAGE/TITRE').text,
|
||||
infos.find('TITRAGE/SOUS_TITRE').text),
|
||||
'url': video_url,
|
||||
'ext': 'flv',
|
||||
'upload_date': unified_strdate(infos.find('PUBLICATION/DATE').text),
|
||||
'thumbnail': media.find('IMAGES/GRAND').text,
|
||||
'description': infos.find('DESCRIPTION').text,
|
||||
'view_count': int(infos.find('NB_VUES').text),
|
||||
}
|
||||
84
youtube_dl/extractor/cinemassacre.py
Normal file
84
youtube_dl/extractor/cinemassacre.py
Normal file
@@ -0,0 +1,84 @@
|
||||
# encoding: utf-8
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class CinemassacreIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?(?P<url>cinemassacre\.com/(?P<date_Y>[0-9]{4})/(?P<date_m>[0-9]{2})/(?P<date_d>[0-9]{2})/.+?)(?:[/?].*)?'
|
||||
_TESTS = [{
|
||||
u'url': u'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/',
|
||||
u'file': u'19911.flv',
|
||||
u'md5': u'f9bb7ede54d1229c9846e197b4737e06',
|
||||
u'info_dict': {
|
||||
u'upload_date': u'20121110',
|
||||
u'title': u'“Angry Video Game Nerd: The Movie” – Trailer',
|
||||
u'description': u'md5:fb87405fcb42a331742a0dce2708560b',
|
||||
}
|
||||
},
|
||||
{
|
||||
u'url': u'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
|
||||
u'file': u'521be8ef82b16.flv',
|
||||
u'md5': u'9509ee44dcaa7c1068604817c19a9e50',
|
||||
u'info_dict': {
|
||||
u'upload_date': u'20131002',
|
||||
u'title': u'The Mummy’s Hand (1940)',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
webpage_url = u'http://' + mobj.group('url')
|
||||
webpage = self._download_webpage(webpage_url, None) # Don't know video id yet
|
||||
video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
|
||||
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?id=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
|
||||
if not mobj:
|
||||
raise ExtractorError(u'Can\'t extract embed url and video id')
|
||||
playerdata_url = mobj.group(u'embed_url')
|
||||
video_id = mobj.group(u'video_id')
|
||||
|
||||
video_title = self._html_search_regex(r'<title>(?P<title>.+?)\|',
|
||||
webpage, u'title')
|
||||
video_description = self._html_search_regex(r'<div class="entry-content">(?P<description>.+?)</div>',
|
||||
webpage, u'description', flags=re.DOTALL, fatal=False)
|
||||
if len(video_description) == 0:
|
||||
video_description = None
|
||||
|
||||
playerdata = self._download_webpage(playerdata_url, video_id)
|
||||
url = self._html_search_regex(r'\'streamer\': \'(?P<url>[^\']+)\'', playerdata, u'url')
|
||||
|
||||
sd_file = self._html_search_regex(r'\'file\': \'(?P<sd_file>[^\']+)\'', playerdata, u'sd_file')
|
||||
hd_file = self._html_search_regex(r'\'?file\'?: "(?P<hd_file>[^"]+)"', playerdata, u'hd_file')
|
||||
video_thumbnail = self._html_search_regex(r'\'image\': \'(?P<thumbnail>[^\']+)\'', playerdata, u'thumbnail', fatal=False)
|
||||
|
||||
formats = [
|
||||
{
|
||||
'url': url,
|
||||
'play_path': 'mp4:' + sd_file,
|
||||
'rtmp_live': True, # workaround
|
||||
'ext': 'flv',
|
||||
'format': 'sd',
|
||||
'format_id': 'sd',
|
||||
},
|
||||
{
|
||||
'url': url,
|
||||
'play_path': 'mp4:' + hd_file,
|
||||
'rtmp_live': True, # workaround
|
||||
'ext': 'flv',
|
||||
'format': 'hd',
|
||||
'format_id': 'hd',
|
||||
},
|
||||
]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'formats': formats,
|
||||
'description': video_description,
|
||||
'upload_date': video_date,
|
||||
'thumbnail': video_thumbnail,
|
||||
}
|
||||
53
youtube_dl/extractor/clipfish.py
Normal file
53
youtube_dl/extractor/clipfish.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import re
|
||||
import time
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class ClipfishIE(InfoExtractor):
|
||||
IE_NAME = u'clipfish'
|
||||
|
||||
_VALID_URL = r'^https?://(?:www\.)?clipfish\.de/.*?/video/(?P<id>[0-9]+)/'
|
||||
_TEST = {
|
||||
u'url': u'http://www.clipfish.de/special/supertalent/video/4028320/supertalent-2013-ivana-opacak-singt-nobodys-perfect/',
|
||||
u'file': u'4028320.f4v',
|
||||
u'md5': u'5e38bda8c329fbfb42be0386a3f5a382',
|
||||
u'info_dict': {
|
||||
u'title': u'Supertalent 2013: Ivana Opacak singt Nobody\'s Perfect',
|
||||
u'duration': 399,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
|
||||
info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' %
|
||||
(video_id, int(time.time())))
|
||||
info_xml = self._download_webpage(
|
||||
info_url, video_id, note=u'Downloading info page')
|
||||
doc = xml.etree.ElementTree.fromstring(info_xml)
|
||||
title = doc.find('title').text
|
||||
video_url = doc.find('filename').text
|
||||
thumbnail = doc.find('imageurl').text
|
||||
duration_str = doc.find('duration').text
|
||||
m = re.match(
|
||||
r'^(?P<hours>[0-9]+):(?P<minutes>[0-9]{2}):(?P<seconds>[0-9]{2}):(?P<ms>[0-9]*)$',
|
||||
duration_str)
|
||||
if m:
|
||||
duration = (
|
||||
(int(m.group('hours')) * 60 * 60) +
|
||||
(int(m.group('minutes')) * 60) +
|
||||
(int(m.group('seconds')))
|
||||
)
|
||||
else:
|
||||
duration = None
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'url': video_url,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
}
|
||||
58
youtube_dl/extractor/cnn.py
Normal file
58
youtube_dl/extractor/cnn.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import determine_ext
|
||||
|
||||
|
||||
class CNNIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)https?://((edition|www)\.)?cnn\.com/video/(data/.+?|\?)/
|
||||
(?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))'''
|
||||
|
||||
_TESTS = [{
|
||||
u'url': u'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
|
||||
u'file': u'sports_2013_06_09_nadal-1-on-1.cnn.mp4',
|
||||
u'md5': u'3e6121ea48df7e2259fe73a0628605c4',
|
||||
u'info_dict': {
|
||||
u'title': u'Nadal wins 8th French Open title',
|
||||
u'description': u'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
|
||||
},
|
||||
},
|
||||
{
|
||||
u"url": u"http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
|
||||
u"file": u"us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
|
||||
u"md5": u"b5cc60c60a3477d185af8f19a2a26f4e",
|
||||
u"info_dict": {
|
||||
u"title": "Student's epic speech stuns new freshmen",
|
||||
u"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\""
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
path = mobj.group('path')
|
||||
page_title = mobj.group('title')
|
||||
info_url = u'http://cnn.com/video/data/3.0/%s/index.xml' % path
|
||||
info_xml = self._download_webpage(info_url, page_title)
|
||||
info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8'))
|
||||
|
||||
formats = []
|
||||
for f in info.findall('files/file'):
|
||||
mf = re.match(r'(\d+)x(\d+)(?:_(.*)k)?',f.attrib['bitrate'])
|
||||
if mf is not None:
|
||||
formats.append((int(mf.group(1)), int(mf.group(2)), int(mf.group(3) or 0), f.text))
|
||||
formats = sorted(formats)
|
||||
(_,_,_, video_path) = formats[-1]
|
||||
video_url = 'http://ht.cdn.turner.com/cnn/big%s' % video_path
|
||||
|
||||
thumbnails = sorted([((int(t.attrib['height']),int(t.attrib['width'])), t.text) for t in info.findall('images/image')])
|
||||
thumbs_dict = [{'resolution': res, 'url': t_url} for (res, t_url) in thumbnails]
|
||||
|
||||
return {'id': info.attrib['id'],
|
||||
'title': info.find('headline').text,
|
||||
'url': video_url,
|
||||
'ext': determine_ext(video_url),
|
||||
'thumbnail': thumbnails[-1][1],
|
||||
'thumbnails': thumbs_dict,
|
||||
'description': info.find('description').text,
|
||||
}
|
||||
82
youtube_dl/extractor/collegehumor.py
Normal file
82
youtube_dl/extractor/collegehumor.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse_urlparse,
|
||||
determine_ext,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class CollegeHumorIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$'
|
||||
|
||||
_TESTS = [{
|
||||
u'url': u'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
|
||||
u'file': u'6902724.mp4',
|
||||
u'md5': u'1264c12ad95dca142a9f0bf7968105a0',
|
||||
u'info_dict': {
|
||||
u'title': u'Comic-Con Cosplay Catastrophe',
|
||||
u'description': u'Fans get creative this year at San Diego. Too creative. And yes, that\'s really Joss Whedon.',
|
||||
},
|
||||
},
|
||||
{
|
||||
u'url': u'http://www.collegehumor.com/video/3505939/font-conference',
|
||||
u'file': u'3505939.mp4',
|
||||
u'md5': u'c51ca16b82bb456a4397987791a835f5',
|
||||
u'info_dict': {
|
||||
u'title': u'Font Conference',
|
||||
u'description': u'This video wasn\'t long enough, so we made it double-spaced.',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
video_id = mobj.group('videoid')
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
}
|
||||
|
||||
self.report_extraction(video_id)
|
||||
xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id
|
||||
mdoc = self._download_xml(xmlUrl, video_id,
|
||||
u'Downloading info XML',
|
||||
u'Unable to download video info XML')
|
||||
|
||||
try:
|
||||
videoNode = mdoc.findall('./video')[0]
|
||||
youtubeIdNode = videoNode.find('./youtubeID')
|
||||
if youtubeIdNode is not None:
|
||||
return self.url_result(youtubeIdNode.text, 'Youtube')
|
||||
info['description'] = videoNode.findall('./description')[0].text
|
||||
info['title'] = videoNode.findall('./caption')[0].text
|
||||
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
|
||||
next_url = videoNode.findall('./file')[0].text
|
||||
except IndexError:
|
||||
raise ExtractorError(u'Invalid metadata XML file')
|
||||
|
||||
if next_url.endswith(u'manifest.f4m'):
|
||||
manifest_url = next_url + '?hdcore=2.10.3'
|
||||
adoc = self._download_xml(manifest_url, video_id,
|
||||
u'Downloading XML manifest',
|
||||
u'Unable to download video info XML')
|
||||
|
||||
try:
|
||||
video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
|
||||
except IndexError:
|
||||
raise ExtractorError(u'Invalid manifest file')
|
||||
url_pr = compat_urllib_parse_urlparse(info['thumbnail'])
|
||||
info['url'] = url_pr.scheme + '://' + url_pr.netloc + video_id[:-2].replace('.csmil','').replace(',','')
|
||||
info['ext'] = 'mp4'
|
||||
else:
|
||||
# Old-style direct links
|
||||
info['url'] = next_url
|
||||
info['ext'] = determine_ext(info['url'])
|
||||
|
||||
return info
|
||||
218
youtube_dl/extractor/comedycentral.py
Normal file
218
youtube_dl/extractor/comedycentral.py
Normal file
@@ -0,0 +1,218 @@
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .mtv import MTVIE, _media_xml_tag
|
||||
from ..utils import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
|
||||
ExtractorError,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class ComedyCentralIE(MTVIE):
|
||||
_VALID_URL = r'http://www.comedycentral.com/(video-clips|episodes|cc-studios)/(?P<title>.*)'
|
||||
_FEED_URL = u'http://comedycentral.com/feeds/mrss/'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.comedycentral.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
|
||||
u'md5': u'4167875aae411f903b751a21f357f1ee',
|
||||
u'info_dict': {
|
||||
u'id': u'cef0cbb3-e776-4bc9-b62e-8016deccb354',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'Uncensored - Greg Fitzsimmons - Too Good of a Mother',
|
||||
u'description': u'After a certain point, breastfeeding becomes c**kblocking.',
|
||||
},
|
||||
}
|
||||
# Overwrite MTVIE properties we don't want
|
||||
_TESTS = []
|
||||
|
||||
def _get_thumbnail_url(self, uri, itemdoc):
|
||||
search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail'))
|
||||
return itemdoc.find(search_path).attrib['url']
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
title = mobj.group('title')
|
||||
webpage = self._download_webpage(url, title)
|
||||
mgid = self._search_regex(r'data-mgid="(?P<mgid>mgid:.*?)"',
|
||||
webpage, u'mgid')
|
||||
return self._get_videos_info(mgid)
|
||||
|
||||
|
||||
class ComedyCentralShowsIE(InfoExtractor):
|
||||
IE_DESC = u'The Daily Show / Colbert Report'
|
||||
# urls can be abbreviations like :thedailyshow or :colbert
|
||||
# urls for episodes like:
|
||||
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
|
||||
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
|
||||
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
|
||||
_VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
|
||||
|(https?://)?(www\.)?
|
||||
(?P<showname>thedailyshow|colbertnation)\.com/
|
||||
(full-episodes/(?P<episode>.*)|
|
||||
(?P<clip>
|
||||
(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|
||||
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))|
|
||||
(?P<interview>
|
||||
extended-interviews/(?P<interID>[0-9]+)/playlist_tds_extended_(?P<interview_title>.*?)/.*?)))
|
||||
$"""
|
||||
_TEST = {
|
||||
u'url': u'http://www.thedailyshow.com/watch/thu-december-13-2012/kristen-stewart',
|
||||
u'file': u'422212.mp4',
|
||||
u'md5': u'4e2f5cb088a83cd8cdb7756132f9739d',
|
||||
u'info_dict': {
|
||||
u"upload_date": u"20121214",
|
||||
u"description": u"Kristen Stewart",
|
||||
u"uploader": u"thedailyshow",
|
||||
u"title": u"thedailyshow-kristen-stewart part 1"
|
||||
}
|
||||
}
|
||||
|
||||
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
|
||||
|
||||
_video_extensions = {
|
||||
'3500': 'mp4',
|
||||
'2200': 'mp4',
|
||||
'1700': 'mp4',
|
||||
'1200': 'mp4',
|
||||
'750': 'mp4',
|
||||
'400': 'mp4',
|
||||
}
|
||||
_video_dimensions = {
|
||||
'3500': (1280, 720),
|
||||
'2200': (960, 540),
|
||||
'1700': (768, 432),
|
||||
'1200': (640, 360),
|
||||
'750': (512, 288),
|
||||
'400': (384, 216),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
"""Receives a URL and returns True if suitable for this IE."""
|
||||
return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
|
||||
|
||||
@staticmethod
|
||||
def _transform_rtmp_url(rtmp_video_url):
|
||||
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp.comedystor/.*)$', rtmp_video_url)
|
||||
if not m:
|
||||
raise ExtractorError(u'Cannot transform RTMP url')
|
||||
base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
|
||||
return base + m.group('finalid')
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
if mobj.group('shortname'):
|
||||
if mobj.group('shortname') in ('tds', 'thedailyshow'):
|
||||
url = u'http://www.thedailyshow.com/full-episodes/'
|
||||
else:
|
||||
url = u'http://www.colbertnation.com/full-episodes/'
|
||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||
assert mobj is not None
|
||||
|
||||
if mobj.group('clip'):
|
||||
if mobj.group('showname') == 'thedailyshow':
|
||||
epTitle = mobj.group('tdstitle')
|
||||
else:
|
||||
epTitle = mobj.group('cntitle')
|
||||
dlNewest = False
|
||||
elif mobj.group('interview'):
|
||||
epTitle = mobj.group('interview_title')
|
||||
dlNewest = False
|
||||
else:
|
||||
dlNewest = not mobj.group('episode')
|
||||
if dlNewest:
|
||||
epTitle = mobj.group('showname')
|
||||
else:
|
||||
epTitle = mobj.group('episode')
|
||||
|
||||
self.report_extraction(epTitle)
|
||||
webpage,htmlHandle = self._download_webpage_handle(url, epTitle)
|
||||
if dlNewest:
|
||||
url = htmlHandle.geturl()
|
||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid redirected URL: ' + url)
|
||||
if mobj.group('episode') == '':
|
||||
raise ExtractorError(u'Redirected URL is still not specific: ' + url)
|
||||
epTitle = mobj.group('episode')
|
||||
|
||||
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
|
||||
|
||||
if len(mMovieParams) == 0:
|
||||
# The Colbert Report embeds the information in a without
|
||||
# a URL prefix; so extract the alternate reference
|
||||
# and then add the URL prefix manually.
|
||||
|
||||
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
|
||||
if len(altMovieParams) == 0:
|
||||
raise ExtractorError(u'unable to find Flash URL in webpage ' + url)
|
||||
else:
|
||||
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
|
||||
|
||||
uri = mMovieParams[0][1]
|
||||
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
|
||||
indexXml = self._download_webpage(indexUrl, epTitle,
|
||||
u'Downloading show index',
|
||||
u'unable to download episode index')
|
||||
|
||||
results = []
|
||||
|
||||
idoc = xml.etree.ElementTree.fromstring(indexXml)
|
||||
itemEls = idoc.findall('.//item')
|
||||
for partNum,itemEl in enumerate(itemEls):
|
||||
mediaId = itemEl.findall('./guid')[0].text
|
||||
shortMediaId = mediaId.split(':')[-1]
|
||||
showId = mediaId.split(':')[-2].replace('.com', '')
|
||||
officialTitle = itemEl.findall('./title')[0].text
|
||||
officialDate = unified_strdate(itemEl.findall('./pubDate')[0].text)
|
||||
|
||||
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
|
||||
compat_urllib_parse.urlencode({'uri': mediaId}))
|
||||
configXml = self._download_webpage(configUrl, epTitle,
|
||||
u'Downloading configuration for %s' % shortMediaId)
|
||||
|
||||
cdoc = xml.etree.ElementTree.fromstring(configXml)
|
||||
turls = []
|
||||
for rendition in cdoc.findall('.//rendition'):
|
||||
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
|
||||
turls.append(finfo)
|
||||
|
||||
if len(turls) == 0:
|
||||
self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found')
|
||||
continue
|
||||
|
||||
formats = []
|
||||
for format, rtmp_video_url in turls:
|
||||
w, h = self._video_dimensions.get(format, (None, None))
|
||||
formats.append({
|
||||
'url': self._transform_rtmp_url(rtmp_video_url),
|
||||
'ext': self._video_extensions.get(format, 'mp4'),
|
||||
'format_id': format,
|
||||
'height': h,
|
||||
'width': w,
|
||||
})
|
||||
|
||||
effTitle = showId + u'-' + epTitle + u' part ' + compat_str(partNum+1)
|
||||
info = {
|
||||
'id': shortMediaId,
|
||||
'formats': formats,
|
||||
'uploader': showId,
|
||||
'upload_date': officialDate,
|
||||
'title': effTitle,
|
||||
'thumbnail': None,
|
||||
'description': compat_str(officialTitle),
|
||||
}
|
||||
|
||||
# TODO: Remove when #980 has been merged
|
||||
info.update(info['formats'][-1])
|
||||
|
||||
results.append(info)
|
||||
|
||||
return results
|
||||
440
youtube_dl/extractor/common.py
Normal file
440
youtube_dl/extractor/common.py
Normal file
@@ -0,0 +1,440 @@
|
||||
import base64
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import netrc
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_urllib_error,
|
||||
compat_str,
|
||||
|
||||
clean_html,
|
||||
compiled_regex_type,
|
||||
ExtractorError,
|
||||
RegexNotFoundError,
|
||||
sanitize_filename,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class InfoExtractor(object):
|
||||
"""Information Extractor class.
|
||||
|
||||
Information extractors are the classes that, given a URL, extract
|
||||
information about the video (or videos) the URL refers to. This
|
||||
information includes the real video URL, the video title, author and
|
||||
others. The information is stored in a dictionary which is then
|
||||
passed to the FileDownloader. The FileDownloader processes this
|
||||
information possibly downloading the video to the file system, among
|
||||
other possible outcomes.
|
||||
|
||||
The dictionaries must include the following fields:
|
||||
|
||||
id: Video identifier.
|
||||
url: Final video URL.
|
||||
title: Video title, unescaped.
|
||||
ext: Video filename extension.
|
||||
|
||||
Instead of url and ext, formats can also specified.
|
||||
|
||||
The following fields are optional:
|
||||
|
||||
format: The video format, defaults to ext (used for --get-format)
|
||||
thumbnails: A list of dictionaries (with the entries "resolution" and
|
||||
"url") for the varying thumbnails
|
||||
thumbnail: Full URL to a video thumbnail image.
|
||||
description: One-line video description.
|
||||
uploader: Full name of the video uploader.
|
||||
upload_date: Video upload date (YYYYMMDD).
|
||||
uploader_id: Nickname or id of the video uploader.
|
||||
location: Physical location of the video.
|
||||
player_url: SWF Player URL (used for rtmpdump).
|
||||
subtitles: The subtitle file contents as a dictionary in the format
|
||||
{language: subtitles}.
|
||||
view_count: How many users have watched the video on the platform.
|
||||
urlhandle: [internal] The urlHandle to be used to download the file,
|
||||
like returned by urllib.request.urlopen
|
||||
age_limit: Age restriction for the video, as an integer (years)
|
||||
formats: A list of dictionaries for each format available, it must
|
||||
be ordered from worst to best quality. Potential fields:
|
||||
* url Mandatory. The URL of the video file
|
||||
* ext Will be calculated from url if missing
|
||||
* format A human-readable description of the format
|
||||
("mp4 container with h264/opus").
|
||||
Calculated from the format_id, width, height.
|
||||
and format_note fields if missing.
|
||||
* format_id A short description of the format
|
||||
("mp4_h264_opus" or "19")
|
||||
* format_note Additional info about the format
|
||||
("3D" or "DASH video")
|
||||
* width Width of the video, if known
|
||||
* height Height of the video, if known
|
||||
* abr Average audio bitrate in KBit/s
|
||||
* acodec Name of the audio codec in use
|
||||
* vbr Average video bitrate in KBit/s
|
||||
* vcodec Name of the video codec in use
|
||||
* filesize The number of bytes, if known in advance
|
||||
webpage_url: The url to the video webpage, if given to youtube-dl it
|
||||
should allow to get the same result again. (It will be set
|
||||
by YoutubeDL if it's missing)
|
||||
|
||||
Unless mentioned otherwise, the fields should be Unicode strings.
|
||||
|
||||
Subclasses of this one should re-define the _real_initialize() and
|
||||
_real_extract() methods and define a _VALID_URL regexp.
|
||||
Probably, they should also be added to the list of extractors.
|
||||
|
||||
_real_extract() must return a *list* of information dictionaries as
|
||||
described above.
|
||||
|
||||
Finally, the _WORKING attribute should be set to False for broken IEs
|
||||
in order to warn the users and skip the tests.
|
||||
"""
|
||||
|
||||
_ready = False
|
||||
_downloader = None
|
||||
_WORKING = True
|
||||
|
||||
def __init__(self, downloader=None):
|
||||
"""Constructor. Receives an optional downloader."""
|
||||
self._ready = False
|
||||
self.set_downloader(downloader)
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
"""Receives a URL and returns True if suitable for this IE."""
|
||||
|
||||
# This does not use has/getattr intentionally - we want to know whether
|
||||
# we have cached the regexp for *this* class, whereas getattr would also
|
||||
# match the superclass
|
||||
if '_VALID_URL_RE' not in cls.__dict__:
|
||||
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
|
||||
return cls._VALID_URL_RE.match(url) is not None
|
||||
|
||||
@classmethod
|
||||
def working(cls):
|
||||
"""Getter method for _WORKING."""
|
||||
return cls._WORKING
|
||||
|
||||
def initialize(self):
|
||||
"""Initializes an instance (authentication, etc)."""
|
||||
if not self._ready:
|
||||
self._real_initialize()
|
||||
self._ready = True
|
||||
|
||||
def extract(self, url):
|
||||
"""Extracts URL information and returns it in list of dicts."""
|
||||
self.initialize()
|
||||
return self._real_extract(url)
|
||||
|
||||
def set_downloader(self, downloader):
|
||||
"""Sets the downloader for this IE."""
|
||||
self._downloader = downloader
|
||||
|
||||
def _real_initialize(self):
|
||||
"""Real initialization process. Redefine in subclasses."""
|
||||
pass
|
||||
|
||||
def _real_extract(self, url):
|
||||
"""Real extraction process. Redefine in subclasses."""
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def ie_key(cls):
|
||||
"""A string for getting the InfoExtractor with get_info_extractor"""
|
||||
return cls.__name__[:-2]
|
||||
|
||||
@property
|
||||
def IE_NAME(self):
|
||||
return type(self).__name__[:-2]
|
||||
|
||||
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
|
||||
""" Returns the response handle """
|
||||
if note is None:
|
||||
self.report_download_webpage(video_id)
|
||||
elif note is not False:
|
||||
self.to_screen(u'%s: %s' % (video_id, note))
|
||||
try:
|
||||
return self._downloader.urlopen(url_or_request)
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
if errnote is None:
|
||||
errnote = u'Unable to download webpage'
|
||||
raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2], cause=err)
|
||||
|
||||
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None):
|
||||
""" Returns a tuple (page content as string, URL handle) """
|
||||
|
||||
# Strip hashes from the URL (#1038)
|
||||
if isinstance(url_or_request, (compat_str, str)):
|
||||
url_or_request = url_or_request.partition('#')[0]
|
||||
|
||||
urlh = self._request_webpage(url_or_request, video_id, note, errnote)
|
||||
content_type = urlh.headers.get('Content-Type', '')
|
||||
webpage_bytes = urlh.read()
|
||||
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
|
||||
if m:
|
||||
encoding = m.group(1)
|
||||
else:
|
||||
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
|
||||
webpage_bytes[:1024])
|
||||
if m:
|
||||
encoding = m.group(1).decode('ascii')
|
||||
else:
|
||||
encoding = 'utf-8'
|
||||
if self._downloader.params.get('dump_intermediate_pages', False):
|
||||
try:
|
||||
url = url_or_request.get_full_url()
|
||||
except AttributeError:
|
||||
url = url_or_request
|
||||
self.to_screen(u'Dumping request to ' + url)
|
||||
dump = base64.b64encode(webpage_bytes).decode('ascii')
|
||||
self._downloader.to_screen(dump)
|
||||
if self._downloader.params.get('write_pages', False):
|
||||
try:
|
||||
url = url_or_request.get_full_url()
|
||||
except AttributeError:
|
||||
url = url_or_request
|
||||
raw_filename = ('%s_%s.dump' % (video_id, url))
|
||||
filename = sanitize_filename(raw_filename, restricted=True)
|
||||
self.to_screen(u'Saving request to ' + filename)
|
||||
with open(filename, 'wb') as outf:
|
||||
outf.write(webpage_bytes)
|
||||
|
||||
content = webpage_bytes.decode(encoding, 'replace')
|
||||
return (content, urlh)
|
||||
|
||||
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
|
||||
""" Returns the data of the page as a string """
|
||||
return self._download_webpage_handle(url_or_request, video_id, note, errnote)[0]
|
||||
|
||||
def _download_xml(self, url_or_request, video_id, note=u'Downloading XML', errnote=u'Unable to downloand XML'):
|
||||
"""Return the xml as an xml.etree.ElementTree.Element"""
|
||||
xml_string = self._download_webpage(url_or_request, video_id, note, errnote)
|
||||
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
|
||||
|
||||
def to_screen(self, msg):
|
||||
"""Print msg to screen, prefixing it with '[ie_name]'"""
|
||||
self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
|
||||
|
||||
def report_extraction(self, id_or_name):
|
||||
"""Report information extraction."""
|
||||
self.to_screen(u'%s: Extracting information' % id_or_name)
|
||||
|
||||
def report_download_webpage(self, video_id):
|
||||
"""Report webpage download."""
|
||||
self.to_screen(u'%s: Downloading webpage' % video_id)
|
||||
|
||||
def report_age_confirmation(self):
|
||||
"""Report attempt to confirm age."""
|
||||
self.to_screen(u'Confirming age')
|
||||
|
||||
def report_login(self):
|
||||
"""Report attempt to log in."""
|
||||
self.to_screen(u'Logging in')
|
||||
|
||||
#Methods for following #608
|
||||
def url_result(self, url, ie=None, video_id=None):
|
||||
"""Returns a url that points to a page that should be processed"""
|
||||
#TODO: ie should be the class used for getting the info
|
||||
video_info = {'_type': 'url',
|
||||
'url': url,
|
||||
'ie_key': ie}
|
||||
if video_id is not None:
|
||||
video_info['id'] = video_id
|
||||
return video_info
|
||||
def playlist_result(self, entries, playlist_id=None, playlist_title=None):
|
||||
"""Returns a playlist"""
|
||||
video_info = {'_type': 'playlist',
|
||||
'entries': entries}
|
||||
if playlist_id:
|
||||
video_info['id'] = playlist_id
|
||||
if playlist_title:
|
||||
video_info['title'] = playlist_title
|
||||
return video_info
|
||||
|
||||
def _search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
|
||||
"""
|
||||
Perform a regex search on the given string, using a single or a list of
|
||||
patterns returning the first matching group.
|
||||
In case of failure return a default value or raise a WARNING or a
|
||||
RegexNotFoundError, depending on fatal, specifying the field name.
|
||||
"""
|
||||
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
|
||||
mobj = re.search(pattern, string, flags)
|
||||
else:
|
||||
for p in pattern:
|
||||
mobj = re.search(p, string, flags)
|
||||
if mobj: break
|
||||
|
||||
if sys.stderr.isatty() and os.name != 'nt':
|
||||
_name = u'\033[0;34m%s\033[0m' % name
|
||||
else:
|
||||
_name = name
|
||||
|
||||
if mobj:
|
||||
# return the first matching group
|
||||
return next(g for g in mobj.groups() if g is not None)
|
||||
elif default is not None:
|
||||
return default
|
||||
elif fatal:
|
||||
raise RegexNotFoundError(u'Unable to extract %s' % _name)
|
||||
else:
|
||||
self._downloader.report_warning(u'unable to extract %s; '
|
||||
u'please report this issue on http://yt-dl.org/bug' % _name)
|
||||
return None
|
||||
|
||||
def _html_search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
|
||||
"""
|
||||
Like _search_regex, but strips HTML tags and unescapes entities.
|
||||
"""
|
||||
res = self._search_regex(pattern, string, name, default, fatal, flags)
|
||||
if res:
|
||||
return clean_html(res).strip()
|
||||
else:
|
||||
return res
|
||||
|
||||
def _get_login_info(self):
|
||||
"""
|
||||
Get the the login info as (username, password)
|
||||
It will look in the netrc file using the _NETRC_MACHINE value
|
||||
If there's no info available, return (None, None)
|
||||
"""
|
||||
if self._downloader is None:
|
||||
return (None, None)
|
||||
|
||||
username = None
|
||||
password = None
|
||||
downloader_params = self._downloader.params
|
||||
|
||||
# Attempt to use provided username and password or .netrc data
|
||||
if downloader_params.get('username', None) is not None:
|
||||
username = downloader_params['username']
|
||||
password = downloader_params['password']
|
||||
elif downloader_params.get('usenetrc', False):
|
||||
try:
|
||||
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
|
||||
if info is not None:
|
||||
username = info[0]
|
||||
password = info[2]
|
||||
else:
|
||||
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
|
||||
except (IOError, netrc.NetrcParseError) as err:
|
||||
self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
|
||||
|
||||
return (username, password)
|
||||
|
||||
# Helper functions for extracting OpenGraph info
|
||||
@staticmethod
|
||||
def _og_regexes(prop):
|
||||
content_re = r'content=(?:"([^>]+?)"|\'(.+?)\')'
|
||||
property_re = r'property=[\'"]og:%s[\'"]' % re.escape(prop)
|
||||
template = r'<meta[^>]+?%s[^>]+?%s'
|
||||
return [
|
||||
template % (property_re, content_re),
|
||||
template % (content_re, property_re),
|
||||
]
|
||||
|
||||
def _og_search_property(self, prop, html, name=None, **kargs):
|
||||
if name is None:
|
||||
name = 'OpenGraph %s' % prop
|
||||
escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
|
||||
if escaped is None:
|
||||
return None
|
||||
return unescapeHTML(escaped)
|
||||
|
||||
def _og_search_thumbnail(self, html, **kargs):
|
||||
return self._og_search_property('image', html, u'thumbnail url', fatal=False, **kargs)
|
||||
|
||||
def _og_search_description(self, html, **kargs):
|
||||
return self._og_search_property('description', html, fatal=False, **kargs)
|
||||
|
||||
def _og_search_title(self, html, **kargs):
|
||||
return self._og_search_property('title', html, **kargs)
|
||||
|
||||
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
|
||||
regexes = self._og_regexes('video')
|
||||
if secure: regexes = self._og_regexes('video:secure_url') + regexes
|
||||
return self._html_search_regex(regexes, html, name, **kargs)
|
||||
|
||||
def _html_search_meta(self, name, html, display_name=None):
|
||||
if display_name is None:
|
||||
display_name = name
|
||||
return self._html_search_regex(
|
||||
r'''(?ix)<meta(?=[^>]+(?:name|property)=["\']%s["\'])
|
||||
[^>]+content=["\']([^"\']+)["\']''' % re.escape(name),
|
||||
html, display_name, fatal=False)
|
||||
|
||||
def _dc_search_uploader(self, html):
|
||||
return self._html_search_meta('dc.creator', html, 'uploader')
|
||||
|
||||
def _rta_search(self, html):
|
||||
# See http://www.rtalabel.org/index.php?content=howtofaq#single
|
||||
if re.search(r'(?ix)<meta\s+name="rating"\s+'
|
||||
r' content="RTA-5042-1996-1400-1577-RTA"',
|
||||
html):
|
||||
return 18
|
||||
return 0
|
||||
|
||||
def _media_rating_search(self, html):
|
||||
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
|
||||
rating = self._html_search_meta('rating', html)
|
||||
|
||||
if not rating:
|
||||
return None
|
||||
|
||||
RATING_TABLE = {
|
||||
'safe for kids': 0,
|
||||
'general': 8,
|
||||
'14 years': 14,
|
||||
'mature': 17,
|
||||
'restricted': 19,
|
||||
}
|
||||
return RATING_TABLE.get(rating.lower(), None)
|
||||
|
||||
|
||||
|
||||
class SearchInfoExtractor(InfoExtractor):
|
||||
"""
|
||||
Base class for paged search queries extractors.
|
||||
They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
|
||||
Instances should define _SEARCH_KEY and _MAX_RESULTS.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def _make_valid_url(cls):
|
||||
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return re.match(cls._make_valid_url(), url) is not None
|
||||
|
||||
def _real_extract(self, query):
|
||||
mobj = re.match(self._make_valid_url(), query)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid search query "%s"' % query)
|
||||
|
||||
prefix = mobj.group('prefix')
|
||||
query = mobj.group('query')
|
||||
if prefix == '':
|
||||
return self._get_n_results(query, 1)
|
||||
elif prefix == 'all':
|
||||
return self._get_n_results(query, self._MAX_RESULTS)
|
||||
else:
|
||||
n = int(prefix)
|
||||
if n <= 0:
|
||||
raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
|
||||
elif n > self._MAX_RESULTS:
|
||||
self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
|
||||
n = self._MAX_RESULTS
|
||||
return self._get_n_results(query, n)
|
||||
|
||||
def _get_n_results(self, query, n):
|
||||
"""Get a specified number of results for a query"""
|
||||
raise NotImplementedError("This method must be implemented by subclasses")
|
||||
|
||||
@property
|
||||
def SEARCH_KEY(self):
|
||||
return self._SEARCH_KEY
|
||||
106
youtube_dl/extractor/condenast.py
Normal file
106
youtube_dl/extractor/condenast.py
Normal file
@@ -0,0 +1,106 @@
|
||||
# coding: utf-8
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
orderedSet,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urlparse,
|
||||
)
|
||||
|
||||
|
||||
class CondeNastIE(InfoExtractor):
|
||||
"""
|
||||
Condé Nast is a media group, some of its sites use a custom HTML5 player
|
||||
that works the same in all of them.
|
||||
"""
|
||||
|
||||
# The keys are the supported sites and the values are the name to be shown
|
||||
# to the user and in the extractor description.
|
||||
_SITES = {'wired': u'WIRED',
|
||||
'gq': u'GQ',
|
||||
'vogue': u'Vogue',
|
||||
'glamour': u'Glamour',
|
||||
'wmagazine': u'W Magazine',
|
||||
'vanityfair': u'Vanity Fair',
|
||||
}
|
||||
|
||||
_VALID_URL = r'http://(video|www).(?P<site>%s).com/(?P<type>watch|series|video)/(?P<id>.+)' % '|'.join(_SITES.keys())
|
||||
IE_DESC = u'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
|
||||
u'file': u'5171b343c2b4c00dd0c1ccb3.mp4',
|
||||
u'md5': u'1921f713ed48aabd715691f774c451f7',
|
||||
u'info_dict': {
|
||||
u'title': u'3D Printed Speakers Lit With LED',
|
||||
u'description': u'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.',
|
||||
}
|
||||
}
|
||||
|
||||
def _extract_series(self, url, webpage):
|
||||
title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>',
|
||||
webpage, u'series title', flags=re.DOTALL)
|
||||
url_object = compat_urllib_parse_urlparse(url)
|
||||
base_url = '%s://%s' % (url_object.scheme, url_object.netloc)
|
||||
m_paths = re.finditer(r'<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]',
|
||||
webpage, flags=re.DOTALL)
|
||||
paths = orderedSet(m.group(1) for m in m_paths)
|
||||
build_url = lambda path: compat_urlparse.urljoin(base_url, path)
|
||||
entries = [self.url_result(build_url(path), 'CondeNast') for path in paths]
|
||||
return self.playlist_result(entries, playlist_title=title)
|
||||
|
||||
def _extract_video(self, webpage):
|
||||
description = self._html_search_regex([r'<div class="cne-video-description">(.+?)</div>',
|
||||
r'<div class="video-post-content">(.+?)</div>',
|
||||
],
|
||||
webpage, u'description',
|
||||
fatal=False, flags=re.DOTALL)
|
||||
params = self._search_regex(r'var params = {(.+?)}[;,]', webpage,
|
||||
u'player params', flags=re.DOTALL)
|
||||
video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, u'video id')
|
||||
player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, u'player id')
|
||||
target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, u'target')
|
||||
data = compat_urllib_parse.urlencode({'videoId': video_id,
|
||||
'playerId': player_id,
|
||||
'target': target,
|
||||
})
|
||||
base_info_url = self._search_regex(r'url = [\'"](.+?)[\'"][,;]',
|
||||
webpage, u'base info url',
|
||||
default='http://player.cnevids.com/player/loader.js?')
|
||||
info_url = base_info_url + data
|
||||
info_page = self._download_webpage(info_url, video_id,
|
||||
u'Downloading video info')
|
||||
video_info = self._search_regex(r'var video = ({.+?});', info_page, u'video info')
|
||||
video_info = json.loads(video_info)
|
||||
|
||||
def _formats_sort_key(f):
|
||||
type_ord = 1 if f['type'] == 'video/mp4' else 0
|
||||
quality_ord = 1 if f['quality'] == 'high' else 0
|
||||
return (quality_ord, type_ord)
|
||||
best_format = sorted(video_info['sources'][0], key=_formats_sort_key)[-1]
|
||||
|
||||
return {'id': video_id,
|
||||
'url': best_format['src'],
|
||||
'ext': best_format['type'].split('/')[-1],
|
||||
'title': video_info['title'],
|
||||
'thumbnail': video_info['poster_frame'],
|
||||
'description': description,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
site = mobj.group('site')
|
||||
url_type = mobj.group('type')
|
||||
id = mobj.group('id')
|
||||
|
||||
self.to_screen(u'Extracting from %s with the Condé Nast extractor' % self._SITES[site])
|
||||
webpage = self._download_webpage(url, id)
|
||||
|
||||
if url_type == 'series':
|
||||
return self._extract_series(url, webpage)
|
||||
else:
|
||||
return self._extract_video(webpage)
|
||||
40
youtube_dl/extractor/criterion.py
Normal file
40
youtube_dl/extractor/criterion.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import determine_ext
|
||||
|
||||
class CriterionIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.criterion\.com/films/(\d*)-.+'
|
||||
_TEST = {
|
||||
u'url': u'http://www.criterion.com/films/184-le-samourai',
|
||||
u'file': u'184.mp4',
|
||||
u'md5': u'bc51beba55685509883a9a7830919ec3',
|
||||
u'info_dict': {
|
||||
u"title": u"Le Samouraï",
|
||||
u"description" : u'md5:a2b4b116326558149bef81f76dcbb93f',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
final_url = self._search_regex(r'so.addVariable\("videoURL", "(.+?)"\)\;',
|
||||
webpage, 'video url')
|
||||
title = self._html_search_regex(r'<meta content="(.+?)" property="og:title" />',
|
||||
webpage, 'video title')
|
||||
description = self._html_search_regex(r'<meta name="description" content="(.+?)" />',
|
||||
webpage, 'video description')
|
||||
thumbnail = self._search_regex(r'so.addVariable\("thumbnailURL", "(.+?)"\)\;',
|
||||
webpage, 'thumbnail url')
|
||||
|
||||
return {'id': video_id,
|
||||
'url' : final_url,
|
||||
'title': title,
|
||||
'ext': determine_ext(final_url),
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
51
youtube_dl/extractor/cspan.py
Normal file
51
youtube_dl/extractor/cspan.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
)
|
||||
|
||||
class CSpanIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www.c-spanvideo.org/program/(.*)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.c-spanvideo.org/program/HolderonV',
|
||||
u'file': u'315139.flv',
|
||||
u'md5': u'74a623266956f69e4df0068ab6c80fe4',
|
||||
u'info_dict': {
|
||||
u"title": u"Attorney General Eric Holder on Voting Rights Act Decision"
|
||||
},
|
||||
u'skip': u'Requires rtmpdump'
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
prog_name = mobj.group(1)
|
||||
webpage = self._download_webpage(url, prog_name)
|
||||
video_id = self._search_regex(r'programid=(.*?)&', webpage, 'video id')
|
||||
data = compat_urllib_parse.urlencode({'programid': video_id,
|
||||
'dynamic':'1'})
|
||||
info_url = 'http://www.c-spanvideo.org/common/services/flashXml.php?' + data
|
||||
video_info = self._download_webpage(info_url, video_id, u'Downloading video info')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
title = self._html_search_regex(r'<string name="title">(.*?)</string>',
|
||||
video_info, 'title')
|
||||
description = self._html_search_regex(r'<meta (?:property="og:|name=")description" content="(.*?)"',
|
||||
webpage, 'description',
|
||||
flags=re.MULTILINE|re.DOTALL)
|
||||
|
||||
url = self._search_regex(r'<string name="URL">(.*?)</string>',
|
||||
video_info, 'video url')
|
||||
url = url.replace('$(protocol)', 'rtmp').replace('$(port)', '443')
|
||||
path = self._search_regex(r'<string name="path">(.*?)</string>',
|
||||
video_info, 'rtmp play path')
|
||||
|
||||
return {'id': video_id,
|
||||
'title': title,
|
||||
'ext': 'flv',
|
||||
'url': url,
|
||||
'play_path': path,
|
||||
'description': description,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
}
|
||||
22
youtube_dl/extractor/d8.py
Normal file
22
youtube_dl/extractor/d8.py
Normal file
@@ -0,0 +1,22 @@
|
||||
# encoding: utf-8
|
||||
from .canalplus import CanalplusIE
|
||||
|
||||
|
||||
class D8IE(CanalplusIE):
|
||||
_VALID_URL = r'https?://www\.d8\.tv/.*?/(?P<path>.*)'
|
||||
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/d8/%s'
|
||||
IE_NAME = u'd8.tv'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
|
||||
u'file': u'966289.flv',
|
||||
u'info_dict': {
|
||||
u'title': u'Campagne intime - Documentaire exceptionnel',
|
||||
u'description': u'md5:d2643b799fb190846ae09c61e59a859f',
|
||||
u'upload_date': u'20131108',
|
||||
},
|
||||
u'params': {
|
||||
# rtmp
|
||||
u'skip_download': True,
|
||||
},
|
||||
}
|
||||
228
youtube_dl/extractor/dailymotion.py
Normal file
228
youtube_dl/extractor/dailymotion.py
Normal file
@@ -0,0 +1,228 @@
|
||||
import re
|
||||
import json
|
||||
import itertools
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
|
||||
from ..utils import (
|
||||
compat_urllib_request,
|
||||
compat_str,
|
||||
get_element_by_attribute,
|
||||
get_element_by_id,
|
||||
orderedSet,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
class DailymotionBaseInfoExtractor(InfoExtractor):
|
||||
@staticmethod
|
||||
def _build_request(url):
|
||||
"""Build a request with the family filter disabled"""
|
||||
request = compat_urllib_request.Request(url)
|
||||
request.add_header('Cookie', 'family_filter=off')
|
||||
request.add_header('Cookie', 'ff=off')
|
||||
return request
|
||||
|
||||
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
"""Information Extractor for Dailymotion"""
|
||||
|
||||
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)'
|
||||
IE_NAME = u'dailymotion'
|
||||
|
||||
_FORMATS = [
|
||||
(u'stream_h264_ld_url', u'ld'),
|
||||
(u'stream_h264_url', u'standard'),
|
||||
(u'stream_h264_hq_url', u'hq'),
|
||||
(u'stream_h264_hd_url', u'hd'),
|
||||
(u'stream_h264_hd1080_url', u'hd180'),
|
||||
]
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
|
||||
u'file': u'x33vw9.mp4',
|
||||
u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
|
||||
u'info_dict': {
|
||||
u"uploader": u"Amphora Alex and Van .",
|
||||
u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
|
||||
}
|
||||
},
|
||||
# Vevo video
|
||||
{
|
||||
u'url': u'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
|
||||
u'file': u'USUV71301934.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'Roar (Official)',
|
||||
u'uploader': u'Katy Perry',
|
||||
u'upload_date': u'20130905',
|
||||
},
|
||||
u'params': {
|
||||
u'skip_download': True,
|
||||
},
|
||||
u'skip': u'VEVO is only available in some countries',
|
||||
},
|
||||
# age-restricted video
|
||||
{
|
||||
u'url': u'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
|
||||
u'file': u'xyh2zz.mp4',
|
||||
u'md5': u'0d667a7b9cebecc3c89ee93099c4159d',
|
||||
u'info_dict': {
|
||||
u'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
|
||||
u'uploader': 'HotWaves1012',
|
||||
u'age_limit': 18,
|
||||
}
|
||||
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract id and simplified title from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group(1).split('_')[0].split('?')[0]
|
||||
|
||||
url = 'http://www.dailymotion.com/video/%s' % video_id
|
||||
|
||||
# Retrieve video webpage to extract further information
|
||||
request = self._build_request(url)
|
||||
webpage = self._download_webpage(request, video_id)
|
||||
|
||||
# Extract URL, uploader and title from webpage
|
||||
self.report_extraction(video_id)
|
||||
|
||||
# It may just embed a vevo video:
|
||||
m_vevo = re.search(
|
||||
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?videoId=(?P<id>[\w]*)',
|
||||
webpage)
|
||||
if m_vevo is not None:
|
||||
vevo_id = m_vevo.group('id')
|
||||
self.to_screen(u'Vevo video detected: %s' % vevo_id)
|
||||
return self.url_result(u'vevo:%s' % vevo_id, ie='Vevo')
|
||||
|
||||
video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>',
|
||||
# Looking for official user
|
||||
r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'],
|
||||
webpage, 'video uploader', fatal=False)
|
||||
age_limit = self._rta_search(webpage)
|
||||
|
||||
video_upload_date = None
|
||||
mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
|
||||
if mobj is not None:
|
||||
video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
|
||||
|
||||
embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
|
||||
embed_page = self._download_webpage(embed_url, video_id,
|
||||
u'Downloading embed page')
|
||||
info = self._search_regex(r'var info = ({.*?}),$', embed_page,
|
||||
'video info', flags=re.MULTILINE)
|
||||
info = json.loads(info)
|
||||
if info.get('error') is not None:
|
||||
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
|
||||
raise ExtractorError(msg, expected=True)
|
||||
|
||||
formats = []
|
||||
for (key, format_id) in self._FORMATS:
|
||||
video_url = info.get(key)
|
||||
if video_url is not None:
|
||||
m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
|
||||
if m_size is not None:
|
||||
width, height = m_size.group(1), m_size.group(2)
|
||||
else:
|
||||
width, height = None, None
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'format_id': format_id,
|
||||
'width': width,
|
||||
'height': height,
|
||||
})
|
||||
if not formats:
|
||||
raise ExtractorError(u'Unable to extract video URL')
|
||||
|
||||
# subtitles
|
||||
video_subtitles = self.extract_subtitles(video_id, webpage)
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, webpage)
|
||||
return
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'uploader': video_uploader,
|
||||
'upload_date': video_upload_date,
|
||||
'title': self._og_search_title(webpage),
|
||||
'subtitles': video_subtitles,
|
||||
'thumbnail': info['thumbnail_url'],
|
||||
'age_limit': age_limit,
|
||||
}
|
||||
|
||||
def _get_available_subtitles(self, video_id, webpage):
|
||||
try:
|
||||
sub_list = self._download_webpage(
|
||||
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
|
||||
video_id, note=False)
|
||||
except ExtractorError as err:
|
||||
self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
|
||||
return {}
|
||||
info = json.loads(sub_list)
|
||||
if (info['total'] > 0):
|
||||
sub_lang_list = dict((l['language'], l['url']) for l in info['list'])
|
||||
return sub_lang_list
|
||||
self._downloader.report_warning(u'video doesn\'t have subtitles')
|
||||
return {}
|
||||
|
||||
|
||||
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
|
||||
IE_NAME = u'dailymotion:playlist'
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
|
||||
_MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/playlist/.+?".*?>.*?</a>.*?</div>'
|
||||
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
|
||||
|
||||
def _extract_entries(self, id):
|
||||
video_ids = []
|
||||
for pagenum in itertools.count(1):
|
||||
request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
|
||||
webpage = self._download_webpage(request,
|
||||
id, u'Downloading page %s' % pagenum)
|
||||
|
||||
playlist_el = get_element_by_attribute(u'class', u'row video_list', webpage)
|
||||
video_ids.extend(re.findall(r'data-id="(.+?)"', playlist_el))
|
||||
|
||||
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
|
||||
break
|
||||
return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
|
||||
for video_id in orderedSet(video_ids)]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
playlist_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
return {'_type': 'playlist',
|
||||
'id': playlist_id,
|
||||
'title': get_element_by_id(u'playlist_name', webpage),
|
||||
'entries': self._extract_entries(playlist_id),
|
||||
}
|
||||
|
||||
|
||||
class DailymotionUserIE(DailymotionPlaylistIE):
|
||||
IE_NAME = u'dailymotion:user'
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)'
|
||||
_MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/user/.+?".*?>.*?</a>.*?</div>'
|
||||
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
user = mobj.group('user')
|
||||
webpage = self._download_webpage(url, user)
|
||||
full_user = self._html_search_regex(
|
||||
r'<a class="label" href="/%s".*?>(.*?)</' % re.escape(user),
|
||||
webpage, u'user', flags=re.DOTALL)
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': user,
|
||||
'title': full_user,
|
||||
'entries': self._extract_entries(user),
|
||||
}
|
||||
74
youtube_dl/extractor/daum.py
Normal file
74
youtube_dl/extractor/daum.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# encoding: utf-8
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
determine_ext,
|
||||
)
|
||||
|
||||
|
||||
class DaumIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://tvpot\.daum\.net/.*?clipid=(?P<id>\d+)'
|
||||
IE_NAME = u'daum.net'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
|
||||
u'file': u'52554690.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'DOTA 2GETHER 시즌2 6회 - 2부',
|
||||
u'description': u'DOTA 2GETHER 시즌2 6회 - 2부',
|
||||
u'upload_date': u'20130831',
|
||||
u'duration': 3868,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
canonical_url = 'http://tvpot.daum.net/v/%s' % video_id
|
||||
webpage = self._download_webpage(canonical_url, video_id)
|
||||
full_id = self._search_regex(r'<link rel="video_src" href=".+?vid=(.+?)"',
|
||||
webpage, u'full id')
|
||||
query = compat_urllib_parse.urlencode({'vid': full_id})
|
||||
info_xml = self._download_webpage(
|
||||
'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id,
|
||||
u'Downloading video info')
|
||||
urls_xml = self._download_webpage(
|
||||
'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,
|
||||
video_id, u'Downloading video formats info')
|
||||
info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8'))
|
||||
urls = xml.etree.ElementTree.fromstring(urls_xml.encode('utf-8'))
|
||||
|
||||
self.to_screen(u'%s: Getting video urls' % video_id)
|
||||
formats = []
|
||||
for format_el in urls.findall('result/output_list/output_list'):
|
||||
profile = format_el.attrib['profile']
|
||||
format_query = compat_urllib_parse.urlencode({
|
||||
'vid': full_id,
|
||||
'profile': profile,
|
||||
})
|
||||
url_xml = self._download_webpage(
|
||||
'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,
|
||||
video_id, note=False)
|
||||
url_doc = xml.etree.ElementTree.fromstring(url_xml.encode('utf-8'))
|
||||
format_url = url_doc.find('result/url').text
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'ext': determine_ext(format_url),
|
||||
'format_id': profile,
|
||||
})
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'title': info.find('TITLE').text,
|
||||
'formats': formats,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'description': info.find('CONTENTS').text,
|
||||
'duration': int(info.find('DURATION').text),
|
||||
'upload_date': info.find('REGDTTM').text[:8],
|
||||
}
|
||||
# TODO: Remove when #980 has been merged
|
||||
info.update(formats[-1])
|
||||
return info
|
||||
39
youtube_dl/extractor/defense.py
Normal file
39
youtube_dl/extractor/defense.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class DefenseGouvFrIE(InfoExtractor):
|
||||
_IE_NAME = 'defense.gouv.fr'
|
||||
_VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/'
|
||||
r'ligthboxvideo/base-de-medias/webtv/(.*)')
|
||||
|
||||
_TEST = {
|
||||
u'url': (u'http://www.defense.gouv.fr/layout/set/ligthboxvideo/'
|
||||
u'base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1'),
|
||||
u'file': u'11213.mp4',
|
||||
u'md5': u'75bba6124da7e63d2d60b5244ec9430c',
|
||||
"info_dict": {
|
||||
"title": "attaque-chimique-syrienne-du-21-aout-2013-1"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
title = re.match(self._VALID_URL, url).group(1)
|
||||
webpage = self._download_webpage(url, title)
|
||||
video_id = self._search_regex(
|
||||
r"flashvars.pvg_id=\"(\d+)\";",
|
||||
webpage, 'ID')
|
||||
|
||||
json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/'
|
||||
+ video_id)
|
||||
info = self._download_webpage(json_url, title,
|
||||
'Downloading JSON config')
|
||||
video_url = json.loads(info)['renditions'][0]['url']
|
||||
|
||||
return {'id': video_id,
|
||||
'ext': 'mp4',
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
}
|
||||
60
youtube_dl/extractor/depositfiles.py
Normal file
60
youtube_dl/extractor/depositfiles.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import re
|
||||
import os
|
||||
import socket
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class DepositFilesIE(InfoExtractor):
|
||||
"""Information extractor for depositfiles.com"""
|
||||
|
||||
_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
|
||||
|
||||
def _real_extract(self, url):
|
||||
file_id = url.split('/')[-1]
|
||||
# Rebuild url in english locale
|
||||
url = 'http://depositfiles.com/en/files/' + file_id
|
||||
|
||||
# Retrieve file webpage with 'Free download' button pressed
|
||||
free_download_indication = {'gateway_result' : '1'}
|
||||
request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication))
|
||||
try:
|
||||
self.report_download_webpage(file_id)
|
||||
webpage = compat_urllib_request.urlopen(request).read()
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'Unable to retrieve file webpage: %s' % compat_str(err))
|
||||
|
||||
# Search for the real file URL
|
||||
mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
|
||||
if (mobj is None) or (mobj.group(1) is None):
|
||||
# Try to figure out reason of the error.
|
||||
mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
|
||||
if (mobj is not None) and (mobj.group(1) is not None):
|
||||
restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
|
||||
raise ExtractorError(u'%s' % restriction_message)
|
||||
else:
|
||||
raise ExtractorError(u'Unable to extract download URL from: %s' % url)
|
||||
|
||||
file_url = mobj.group(1)
|
||||
file_extension = os.path.splitext(file_url)[1][1:]
|
||||
|
||||
# Search for file title
|
||||
file_title = self._search_regex(r'<b title="(.*?)">', webpage, u'title')
|
||||
|
||||
return [{
|
||||
'id': file_id.decode('utf-8'),
|
||||
'url': file_url.decode('utf-8'),
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
'title': file_title,
|
||||
'ext': file_extension.decode('utf-8'),
|
||||
}]
|
||||
41
youtube_dl/extractor/dotsub.py
Normal file
41
youtube_dl/extractor/dotsub.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import re
|
||||
import json
|
||||
import time
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class DotsubIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?dotsub\.com/view/([^/]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://dotsub.com/view/aed3b8b2-1889-4df5-ae63-ad85f5572f27',
|
||||
u'file': u'aed3b8b2-1889-4df5-ae63-ad85f5572f27.flv',
|
||||
u'md5': u'0914d4d69605090f623b7ac329fea66e',
|
||||
u'info_dict': {
|
||||
u"title": u"Pyramids of Waste (2010), AKA The Lightbulb Conspiracy - Planned obsolescence documentary",
|
||||
u"uploader": u"4v4l0n42",
|
||||
u'description': u'Pyramids of Waste (2010) also known as "The lightbulb conspiracy" is a documentary about how our economic system based on consumerism and planned obsolescence is breaking our planet down.\r\n\r\nSolutions to this can be found at:\r\nhttp://robotswillstealyourjob.com\r\nhttp://www.federicopistono.org\r\n\r\nhttp://opensourceecology.org\r\nhttp://thezeitgeistmovement.com',
|
||||
u'thumbnail': u'http://dotsub.com/media/aed3b8b2-1889-4df5-ae63-ad85f5572f27/p',
|
||||
u'upload_date': u'20101213',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
info_url = "https://dotsub.com/api/media/%s/metadata" %(video_id)
|
||||
webpage = self._download_webpage(info_url, video_id)
|
||||
info = json.loads(webpage)
|
||||
date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': info['mediaURI'],
|
||||
'ext': 'flv',
|
||||
'title': info['title'],
|
||||
'thumbnail': info['screenshotURI'],
|
||||
'description': info['description'],
|
||||
'uploader': info['user'],
|
||||
'view_count': info['numberOfViews'],
|
||||
'upload_date': u'%04i%02i%02i' % (date.tm_year, date.tm_mon, date.tm_mday),
|
||||
}]
|
||||
85
youtube_dl/extractor/dreisat.py
Normal file
85
youtube_dl/extractor/dreisat.py
Normal file
@@ -0,0 +1,85 @@
|
||||
# coding: utf-8
|
||||
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class DreiSatIE(InfoExtractor):
|
||||
IE_NAME = '3sat'
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?3sat.de/mediathek/index.php\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
|
||||
_TEST = {
|
||||
u"url": u"http://www.3sat.de/mediathek/index.php?obj=36983",
|
||||
u'file': u'36983.webm',
|
||||
u'md5': u'57c97d0469d71cf874f6815aa2b7c944',
|
||||
u'info_dict': {
|
||||
u"title": u"Kaffeeland Schweiz",
|
||||
u"description": u"Über 80 Kaffeeröstereien liefern in der Schweiz das Getränk, in das das Land so vernarrt ist: Mehr als 1000 Tassen trinkt ein Schweizer pro Jahr. SCHWEIZWEIT nimmt die Kaffeekultur unter die...",
|
||||
u"uploader": u"3sat",
|
||||
u"upload_date": u"20130622"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
details_url = 'http://www.3sat.de/mediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
|
||||
details_xml = self._download_webpage(details_url, video_id, note=u'Downloading video details')
|
||||
details_doc = xml.etree.ElementTree.fromstring(details_xml.encode('utf-8'))
|
||||
|
||||
thumbnail_els = details_doc.findall('.//teaserimage')
|
||||
thumbnails = [{
|
||||
'width': te.attrib['key'].partition('x')[0],
|
||||
'height': te.attrib['key'].partition('x')[2],
|
||||
'url': te.text,
|
||||
} for te in thumbnail_els]
|
||||
|
||||
information_el = details_doc.find('.//information')
|
||||
video_title = information_el.find('./title').text
|
||||
video_description = information_el.find('./detail').text
|
||||
|
||||
details_el = details_doc.find('.//details')
|
||||
video_uploader = details_el.find('./channel').text
|
||||
upload_date = unified_strdate(details_el.find('./airtime').text)
|
||||
|
||||
format_els = details_doc.findall('.//formitaet')
|
||||
formats = [{
|
||||
'format_id': fe.attrib['basetype'],
|
||||
'width': int(fe.find('./width').text),
|
||||
'height': int(fe.find('./height').text),
|
||||
'url': fe.find('./url').text,
|
||||
'ext': determine_ext(fe.find('./url').text),
|
||||
'filesize': int(fe.find('./filesize').text),
|
||||
'video_bitrate': int(fe.find('./videoBitrate').text),
|
||||
'3sat_qualityname': fe.find('./quality').text,
|
||||
} for fe in format_els
|
||||
if not fe.find('./url').text.startswith('http://www.metafilegenerator.de/')]
|
||||
|
||||
def _sortkey(format):
|
||||
qidx = ['low', 'med', 'high', 'veryhigh'].index(format['3sat_qualityname'])
|
||||
prefer_http = 1 if 'rtmp' in format['url'] else 0
|
||||
return (qidx, prefer_http, format['video_bitrate'])
|
||||
formats.sort(key=_sortkey)
|
||||
|
||||
info = {
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'formats': formats,
|
||||
'description': video_description,
|
||||
'thumbnails': thumbnails,
|
||||
'thumbnail': thumbnails[-1]['url'],
|
||||
'uploader': video_uploader,
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
|
||||
# TODO: Remove when #980 has been merged
|
||||
info.update(formats[-1])
|
||||
|
||||
return info
|
||||
37
youtube_dl/extractor/ebaumsworld.py
Normal file
37
youtube_dl/extractor/ebaumsworld.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import determine_ext
|
||||
|
||||
|
||||
class EbaumsWorldIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.ebaumsworld\.com/video/watch/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.ebaumsworld.com/video/watch/83367677/',
|
||||
u'file': u'83367677.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'A Giant Python Opens The Door',
|
||||
u'description': u'This is how nightmares start...',
|
||||
u'uploader': u'jihadpizza',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
config_xml = self._download_webpage(
|
||||
'http://www.ebaumsworld.com/video/player/%s' % video_id, video_id)
|
||||
config = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8'))
|
||||
video_url = config.find('file').text
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': config.find('title').text,
|
||||
'url': video_url,
|
||||
'ext': determine_ext(video_url),
|
||||
'description': config.find('description').text,
|
||||
'thumbnail': config.find('image').text,
|
||||
'uploader': config.find('username').text,
|
||||
}
|
||||
46
youtube_dl/extractor/ehow.py
Normal file
46
youtube_dl/extractor/ehow.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import re
|
||||
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
determine_ext
|
||||
)
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class EHowIE(InfoExtractor):
|
||||
IE_NAME = u'eHow'
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?ehow\.com/[^/_?]*_(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.ehow.com/video_12245069_hardwood-flooring-basics.html',
|
||||
u'file': u'12245069.flv',
|
||||
u'md5': u'9809b4e3f115ae2088440bcb4efbf371',
|
||||
u'info_dict': {
|
||||
u"title": u"Hardwood Flooring Basics",
|
||||
u"description": u"Hardwood flooring may be time consuming, but its ultimately a pretty straightforward concept. Learn about hardwood flooring basics with help from a hardware flooring business owner in this free video...",
|
||||
u"uploader": u"Erick Nathan"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_url = self._search_regex(r'(?:file|source)=(http[^\'"&]*)',
|
||||
webpage, u'video URL')
|
||||
final_url = compat_urllib_parse.unquote(video_url)
|
||||
uploader = self._search_regex(r'<meta name="uploader" content="(.+?)" />',
|
||||
webpage, u'uploader')
|
||||
title = self._og_search_title(webpage).replace(' | eHow', '')
|
||||
ext = determine_ext(final_url)
|
||||
|
||||
return {
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'url': final_url,
|
||||
'ext': ext,
|
||||
'title': title,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'uploader': uploader,
|
||||
}
|
||||
|
||||
119
youtube_dl/extractor/eighttracks.py
Normal file
119
youtube_dl/extractor/eighttracks.py
Normal file
@@ -0,0 +1,119 @@
|
||||
import json
|
||||
import random
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class EightTracksIE(InfoExtractor):
|
||||
IE_NAME = '8tracks'
|
||||
_VALID_URL = r'https?://8tracks.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
|
||||
_TEST = {
|
||||
u"name": u"EightTracks",
|
||||
u"url": u"http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
|
||||
u"playlist": [
|
||||
{
|
||||
u"file": u"11885610.m4a",
|
||||
u"md5": u"96ce57f24389fc8734ce47f4c1abcc55",
|
||||
u"info_dict": {
|
||||
u"title": u"youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885608.m4a",
|
||||
u"md5": u"4ab26f05c1f7291ea460a3920be8021f",
|
||||
u"info_dict": {
|
||||
u"title": u"youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885679.m4a",
|
||||
u"md5": u"d30b5b5f74217410f4689605c35d1fd7",
|
||||
u"info_dict": {
|
||||
u"title": u"youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885680.m4a",
|
||||
u"md5": u"4eb0a669317cd725f6bbd336a29f923a",
|
||||
u"info_dict": {
|
||||
u"title": u"youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885682.m4a",
|
||||
u"md5": u"1893e872e263a2705558d1d319ad19e8",
|
||||
u"info_dict": {
|
||||
u"title": u"PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885683.m4a",
|
||||
u"md5": u"b673c46f47a216ab1741ae8836af5899",
|
||||
u"info_dict": {
|
||||
u"title": u"PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885684.m4a",
|
||||
u"md5": u"1d74534e95df54986da7f5abf7d842b7",
|
||||
u"info_dict": {
|
||||
u"title": u"phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885685.m4a",
|
||||
u"md5": u"f081f47af8f6ae782ed131d38b9cd1c0",
|
||||
u"info_dict": {
|
||||
u"title": u"phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
playlist_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
json_like = self._search_regex(r"PAGE.mix = (.*?);\n", webpage, u'trax information', flags=re.DOTALL)
|
||||
data = json.loads(json_like)
|
||||
|
||||
session = str(random.randint(0, 1000000000))
|
||||
mix_id = data['id']
|
||||
track_count = data['tracks_count']
|
||||
first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
|
||||
next_url = first_url
|
||||
res = []
|
||||
for i in range(track_count):
|
||||
api_json = self._download_webpage(next_url, playlist_id,
|
||||
note=u'Downloading song information %s/%s' % (str(i+1), track_count),
|
||||
errnote=u'Failed to download song information')
|
||||
api_data = json.loads(api_json)
|
||||
track_data = api_data[u'set']['track']
|
||||
info = {
|
||||
'id': track_data['id'],
|
||||
'url': track_data['track_file_stream_url'],
|
||||
'title': track_data['performer'] + u' - ' + track_data['name'],
|
||||
'raw_title': track_data['name'],
|
||||
'uploader_id': data['user']['login'],
|
||||
'ext': 'm4a',
|
||||
}
|
||||
res.append(info)
|
||||
next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
|
||||
return res
|
||||
37
youtube_dl/extractor/eitb.py
Normal file
37
youtube_dl/extractor/eitb.py
Normal file
@@ -0,0 +1,37 @@
|
||||
# encoding: utf-8
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .brightcove import BrightcoveIE
|
||||
from ..utils import ExtractorError
|
||||
|
||||
|
||||
class EitbIE(InfoExtractor):
|
||||
IE_NAME = u'eitb.tv'
|
||||
_VALID_URL = r'https?://www\.eitb\.tv/(eu/bideoa|es/video)/[^/]+/(?P<playlist_id>\d+)/(?P<chapter_id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
u'add_ie': ['Brightcove'],
|
||||
u'url': u'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
|
||||
u'md5': u'edf4436247185adee3ea18ce64c47998',
|
||||
u'info_dict': {
|
||||
u'id': u'2743577154001',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'60 minutos (Lasa y Zabala, 30 años)',
|
||||
# All videos from eitb has this description in the brightcove info
|
||||
u'description': u'.',
|
||||
u'uploader': u'Euskal Telebista',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
chapter_id = mobj.group('chapter_id')
|
||||
webpage = self._download_webpage(url, chapter_id)
|
||||
bc_url = BrightcoveIE._extract_brightcove_url(webpage)
|
||||
if bc_url is None:
|
||||
raise ExtractorError(u'Could not extract the Brightcove url')
|
||||
# The BrightcoveExperience object doesn't contain the video id, we set
|
||||
# it manually
|
||||
bc_url += '&%40videoPlayer={0}'.format(chapter_id)
|
||||
return self.url_result(bc_url, BrightcoveIE.ie_key())
|
||||
84
youtube_dl/extractor/escapist.py
Normal file
84
youtube_dl/extractor/escapist.py
Normal file
@@ -0,0 +1,84 @@
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class EscapistIE(InfoExtractor):
|
||||
_VALID_URL = r'^https?://?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
|
||||
_TEST = {
|
||||
u'url': u'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',
|
||||
u'file': u'6618-Breaking-Down-Baldurs-Gate.mp4',
|
||||
u'md5': u'ab3a706c681efca53f0a35f1415cf0d1',
|
||||
u'info_dict': {
|
||||
u"description": u"Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.",
|
||||
u"uploader": u"the-escapist-presents",
|
||||
u"title": u"Breaking Down Baldur's Gate"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
showName = mobj.group('showname')
|
||||
videoId = mobj.group('episode')
|
||||
|
||||
self.report_extraction(videoId)
|
||||
webpage = self._download_webpage(url, videoId)
|
||||
|
||||
videoDesc = self._html_search_regex(
|
||||
r'<meta name="description" content="([^"]*)"',
|
||||
webpage, u'description', fatal=False)
|
||||
|
||||
playerUrl = self._og_search_video_url(webpage, name=u'player URL')
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<meta name="title" content="([^"]*)"',
|
||||
webpage, u'title').split(' : ')[-1]
|
||||
|
||||
configUrl = self._search_regex('config=(.*)$', playerUrl, u'config URL')
|
||||
configUrl = compat_urllib_parse.unquote(configUrl)
|
||||
|
||||
formats = []
|
||||
|
||||
def _add_format(name, cfgurl):
|
||||
configJSON = self._download_webpage(
|
||||
cfgurl, videoId,
|
||||
u'Downloading ' + name + ' configuration',
|
||||
u'Unable to download ' + name + ' configuration')
|
||||
|
||||
# Technically, it's JavaScript, not JSON
|
||||
configJSON = configJSON.replace("'", '"')
|
||||
|
||||
try:
|
||||
config = json.loads(configJSON)
|
||||
except (ValueError,) as err:
|
||||
raise ExtractorError(u'Invalid JSON in configuration file: ' + compat_str(err))
|
||||
playlist = config['playlist']
|
||||
formats.append({
|
||||
'url': playlist[1]['url'],
|
||||
'format_id': name,
|
||||
})
|
||||
|
||||
_add_format(u'normal', configUrl)
|
||||
hq_url = (configUrl +
|
||||
('&hq=1' if '?' in configUrl else configUrl + '?hq=1'))
|
||||
try:
|
||||
_add_format(u'hq', hq_url)
|
||||
except ExtractorError:
|
||||
pass # That's fine, we'll just use normal quality
|
||||
|
||||
return {
|
||||
'id': videoId,
|
||||
'formats': formats,
|
||||
'uploader': showName,
|
||||
'title': title,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'description': videoDesc,
|
||||
'player_url': playerUrl,
|
||||
}
|
||||
56
youtube_dl/extractor/exfm.py
Normal file
56
youtube_dl/extractor/exfm.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class ExfmIE(InfoExtractor):
|
||||
IE_NAME = u'exfm'
|
||||
IE_DESC = u'ex.fm'
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?ex\.fm/song/([^/]+)'
|
||||
_SOUNDCLOUD_URL = r'(?:http://)?(?:www\.)?api\.soundcloud.com/tracks/([^/]+)/stream'
|
||||
_TESTS = [
|
||||
{
|
||||
u'url': u'http://ex.fm/song/eh359',
|
||||
u'file': u'44216187.mp3',
|
||||
u'md5': u'e45513df5631e6d760970b14cc0c11e7',
|
||||
u'info_dict': {
|
||||
u"title": u"Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive",
|
||||
u"uploader": u"deadjournalist",
|
||||
u'upload_date': u'20120424',
|
||||
u'description': u'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive',
|
||||
},
|
||||
u'note': u'Soundcloud song',
|
||||
u'skip': u'The site is down too often',
|
||||
},
|
||||
{
|
||||
u'url': u'http://ex.fm/song/wddt8',
|
||||
u'file': u'wddt8.mp3',
|
||||
u'md5': u'966bd70741ac5b8570d8e45bfaed3643',
|
||||
u'info_dict': {
|
||||
u'title': u'Safe and Sound',
|
||||
u'uploader': u'Capital Cities',
|
||||
},
|
||||
u'skip': u'The site is down too often',
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
song_id = mobj.group(1)
|
||||
info_url = "http://ex.fm/api/v3/song/%s" %(song_id)
|
||||
webpage = self._download_webpage(info_url, song_id)
|
||||
info = json.loads(webpage)
|
||||
song_url = info['song']['url']
|
||||
if re.match(self._SOUNDCLOUD_URL, song_url) is not None:
|
||||
self.to_screen('Soundcloud song detected')
|
||||
return self.url_result(song_url.replace('/stream',''), 'Soundcloud')
|
||||
return [{
|
||||
'id': song_id,
|
||||
'url': song_url,
|
||||
'ext': 'mp3',
|
||||
'title': info['song']['title'],
|
||||
'thumbnail': info['song']['image']['large'],
|
||||
'uploader': info['song']['artist'],
|
||||
'view_count': info['song']['loved_count'],
|
||||
}]
|
||||
50
youtube_dl/extractor/extremetube.py
Normal file
50
youtube_dl/extractor/extremetube.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import os
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urllib_request,
|
||||
compat_urllib_parse,
|
||||
)
|
||||
|
||||
class ExtremeTubeIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>extremetube\.com/video/.+?(?P<videoid>[0-9]+))(?:[/?&]|$)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431',
|
||||
u'file': u'652431.mp4',
|
||||
u'md5': u'1fb9228f5e3332ec8c057d6ac36f33e0',
|
||||
u'info_dict': {
|
||||
u"title": u"Music Video 14 british euro brit european cumshots swallow",
|
||||
u"uploader": u"unknown",
|
||||
u"age_limit": 18,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('videoid')
|
||||
url = 'http://www.' + mobj.group('url')
|
||||
|
||||
req = compat_urllib_request.Request(url)
|
||||
req.add_header('Cookie', 'age_verified=1')
|
||||
webpage = self._download_webpage(req, video_id)
|
||||
|
||||
video_title = self._html_search_regex(r'<h1 [^>]*?title="([^"]+)"[^>]*>\1<', webpage, u'title')
|
||||
uploader = self._html_search_regex(r'>Posted by:(?=<)(?:\s|<[^>]*>)*(.+?)\|', webpage, u'uploader', fatal=False)
|
||||
video_url = compat_urllib_parse.unquote(self._html_search_regex(r'video_url=(.+?)&', webpage, u'video_url'))
|
||||
path = compat_urllib_parse_urlparse(video_url).path
|
||||
extension = os.path.splitext(path)[1][1:]
|
||||
format = path.split('/')[5].split('_')[:2]
|
||||
format = "-".join(format)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'uploader': uploader,
|
||||
'url': video_url,
|
||||
'ext': extension,
|
||||
'format': format,
|
||||
'format_id': format,
|
||||
'age_limit': 18,
|
||||
}
|
||||
132
youtube_dl/extractor/facebook.py
Normal file
132
youtube_dl/extractor/facebook.py
Normal file
@@ -0,0 +1,132 @@
|
||||
import json
|
||||
import re
|
||||
import socket
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class FacebookIE(InfoExtractor):
|
||||
"""Information Extractor for Facebook"""
|
||||
|
||||
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
|
||||
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
|
||||
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
|
||||
_NETRC_MACHINE = 'facebook'
|
||||
IE_NAME = u'facebook'
|
||||
_TEST = {
|
||||
u'url': u'https://www.facebook.com/photo.php?v=120708114770723',
|
||||
u'file': u'120708114770723.mp4',
|
||||
u'md5': u'48975a41ccc4b7a581abd68651c1a5a8',
|
||||
u'info_dict': {
|
||||
u"duration": 279,
|
||||
u"title": u"PEOPLE ARE AWESOME 2013"
|
||||
}
|
||||
}
|
||||
|
||||
def report_login(self):
|
||||
"""Report attempt to log in."""
|
||||
self.to_screen(u'Logging in')
|
||||
|
||||
def _login(self):
|
||||
(useremail, password) = self._get_login_info()
|
||||
if useremail is None:
|
||||
return
|
||||
|
||||
login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
|
||||
login_page_req.add_header('Cookie', 'locale=en_US')
|
||||
self.report_login()
|
||||
login_page = self._download_webpage(login_page_req, None, note=False,
|
||||
errnote=u'Unable to download login page')
|
||||
lsd = self._search_regex(r'"lsd":"(\w*?)"', login_page, u'lsd')
|
||||
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, u'lgnrnd')
|
||||
|
||||
login_form = {
|
||||
'email': useremail,
|
||||
'pass': password,
|
||||
'lsd': lsd,
|
||||
'lgnrnd': lgnrnd,
|
||||
'next': 'http://facebook.com/home.php',
|
||||
'default_persistent': '0',
|
||||
'legacy_return': '1',
|
||||
'timezone': '-60',
|
||||
'trynum': '1',
|
||||
}
|
||||
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
|
||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
try:
|
||||
login_results = compat_urllib_request.urlopen(request).read()
|
||||
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
|
||||
self._downloader.report_warning(u'unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
|
||||
return
|
||||
|
||||
check_form = {
|
||||
'fb_dtsg': self._search_regex(r'"fb_dtsg":"(.*?)"', login_results, u'fb_dtsg'),
|
||||
'nh': self._search_regex(r'name="nh" value="(\w*?)"', login_results, u'nh'),
|
||||
'name_action_selected': 'dont_save',
|
||||
'submit[Continue]': self._search_regex(r'<input value="(.*?)" name="submit\[Continue\]"', login_results, u'continue'),
|
||||
}
|
||||
check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, compat_urllib_parse.urlencode(check_form))
|
||||
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
check_response = compat_urllib_request.urlopen(check_req).read()
|
||||
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
|
||||
self._downloader.report_warning(u'Unable to confirm login, you have to login in your brower and authorize the login.')
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
video_id = mobj.group('ID')
|
||||
|
||||
url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
BEFORE = '{swf.addParam(param[0], param[1]);});\n'
|
||||
AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
|
||||
m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
|
||||
if not m:
|
||||
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
|
||||
if m_msg is not None:
|
||||
raise ExtractorError(
|
||||
u'The video is not available, Facebook said: "%s"' % m_msg.group(1),
|
||||
expected=True)
|
||||
else:
|
||||
raise ExtractorError(u'Cannot parse data')
|
||||
data = dict(json.loads(m.group(1)))
|
||||
params_raw = compat_urllib_parse.unquote(data['params'])
|
||||
params = json.loads(params_raw)
|
||||
video_data = params['video_data'][0]
|
||||
video_url = video_data.get('hd_src')
|
||||
if not video_url:
|
||||
video_url = video_data['sd_src']
|
||||
if not video_url:
|
||||
raise ExtractorError(u'Cannot find video URL')
|
||||
video_duration = int(video_data['video_duration'])
|
||||
thumbnail = video_data['thumbnail_src']
|
||||
|
||||
video_title = self._html_search_regex(
|
||||
r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, u'title')
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'duration': video_duration,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
return [info]
|
||||
58
youtube_dl/extractor/faz.py
Normal file
58
youtube_dl/extractor/faz.py
Normal file
@@ -0,0 +1,58 @@
|
||||
# encoding: utf-8
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
)
|
||||
|
||||
|
||||
class FazIE(InfoExtractor):
|
||||
IE_NAME = u'faz.net'
|
||||
_VALID_URL = r'https?://www\.faz\.net/multimedia/videos/.*?-(?P<id>\d+).html'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
|
||||
u'file': u'12610585.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
|
||||
u'description': u'md5:1453fbf9a0d041d985a47306192ea253',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
self.to_screen(video_id)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
config_xml_url = self._search_regex(r'writeFLV\(\'(.+?)\',', webpage,
|
||||
u'config xml url')
|
||||
config_xml = self._download_webpage(config_xml_url, video_id,
|
||||
u'Downloading config xml')
|
||||
config = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8'))
|
||||
|
||||
encodings = config.find('ENCODINGS')
|
||||
formats = []
|
||||
for code in ['LOW', 'HIGH', 'HQ']:
|
||||
encoding = encodings.find(code)
|
||||
if encoding is None:
|
||||
continue
|
||||
encoding_url = encoding.find('FILENAME').text
|
||||
formats.append({
|
||||
'url': encoding_url,
|
||||
'ext': determine_ext(encoding_url),
|
||||
'format_id': code.lower(),
|
||||
})
|
||||
|
||||
descr = self._html_search_regex(r'<p class="Content Copy">(.*?)</p>', webpage, u'description')
|
||||
info = {
|
||||
'id': video_id,
|
||||
'title': self._og_search_title(webpage),
|
||||
'formats': formats,
|
||||
'description': descr,
|
||||
'thumbnail': config.find('STILL/STILL_BIG').text,
|
||||
}
|
||||
# TODO: Remove when #980 has been merged
|
||||
info.update(formats[-1])
|
||||
return info
|
||||
78
youtube_dl/extractor/fktv.py
Normal file
78
youtube_dl/extractor/fktv.py
Normal file
@@ -0,0 +1,78 @@
|
||||
import re
|
||||
import random
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
get_element_by_id,
|
||||
clean_html,
|
||||
)
|
||||
|
||||
|
||||
class FKTVIE(InfoExtractor):
|
||||
IE_NAME = u'fernsehkritik.tv'
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik.tv/folge-(?P<ep>[0-9]+)(?:/.*)?'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://fernsehkritik.tv/folge-1',
|
||||
u'file': u'00011.flv',
|
||||
u'info_dict': {
|
||||
u'title': u'Folge 1 vom 10. April 2007',
|
||||
u'description': u'md5:fb4818139c7cfe6907d4b83412a6864f',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
episode = int(mobj.group('ep'))
|
||||
|
||||
server = random.randint(2, 4)
|
||||
video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%d.jpg' % episode
|
||||
start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode,
|
||||
episode)
|
||||
playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
|
||||
u'playlist', flags=re.DOTALL)
|
||||
files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
|
||||
# TODO: return a single multipart video
|
||||
videos = []
|
||||
for i, _ in enumerate(files, 1):
|
||||
video_id = '%04d%d' % (episode, i)
|
||||
video_url = 'http://dl%d.fernsehkritik.tv/fernsehkritik%d%s.flv' % (server, episode, '' if i == 1 else '-%d' % i)
|
||||
videos.append({
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': determine_ext(video_url),
|
||||
'title': clean_html(get_element_by_id('eptitle', start_webpage)),
|
||||
'description': clean_html(get_element_by_id('contentlist', start_webpage)),
|
||||
'thumbnail': video_thumbnail
|
||||
})
|
||||
return videos
|
||||
|
||||
|
||||
class FKTVPosteckeIE(InfoExtractor):
|
||||
IE_NAME = u'fernsehkritik.tv:postecke'
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik.tv/inline-video/postecke.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
|
||||
_TEST = {
|
||||
u'url': u'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
|
||||
u'file': u'0120.flv',
|
||||
u'md5': u'262f0adbac80317412f7e57b4808e5c4',
|
||||
u'info_dict': {
|
||||
u"title": u"Postecke 120"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
episode = int(mobj.group('ep'))
|
||||
|
||||
server = random.randint(2, 4)
|
||||
video_id = '%04d' % episode
|
||||
video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
|
||||
video_title = 'Postecke %d' % episode
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': determine_ext(video_url),
|
||||
'title': video_title,
|
||||
}
|
||||
58
youtube_dl/extractor/flickr.py
Normal file
58
youtube_dl/extractor/flickr.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class FlickrIE(InfoExtractor):
|
||||
"""Information Extractor for Flickr videos"""
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
|
||||
_TEST = {
|
||||
u'url': u'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/',
|
||||
u'file': u'5645318632.mp4',
|
||||
u'md5': u'6fdc01adbc89d72fc9c4f15b4a4ba87b',
|
||||
u'info_dict': {
|
||||
u"description": u"Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
|
||||
u"uploader_id": u"forestwander-nature-pictures",
|
||||
u"title": u"Dark Hollow Waterfalls"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
video_uploader_id = mobj.group('uploader_id')
|
||||
webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
|
||||
webpage = self._download_webpage(webpage_url, video_id)
|
||||
|
||||
secret = self._search_regex(r"photo_secret: '(\w+)'", webpage, u'secret')
|
||||
|
||||
first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self'
|
||||
first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
|
||||
|
||||
node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
|
||||
first_xml, u'node_id')
|
||||
|
||||
second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
|
||||
second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
mobj = re.search(r'<STREAM APP="(.+?)" FULLPATH="(.+?)"', second_xml)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Unable to extract video url')
|
||||
video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'uploader_id': video_uploader_id,
|
||||
}]
|
||||
129
youtube_dl/extractor/francetv.py
Normal file
129
youtube_dl/extractor/francetv.py
Normal file
@@ -0,0 +1,129 @@
|
||||
# encoding: utf-8
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urlparse,
|
||||
)
|
||||
|
||||
|
||||
class FranceTVBaseInfoExtractor(InfoExtractor):
|
||||
def _extract_video(self, video_id):
|
||||
xml_desc = self._download_webpage(
|
||||
'http://www.francetvinfo.fr/appftv/webservices/video/'
|
||||
'getInfosOeuvre.php?id-diffusion='
|
||||
+ video_id, video_id, 'Downloading XML config')
|
||||
info = xml.etree.ElementTree.fromstring(xml_desc.encode('utf-8'))
|
||||
|
||||
manifest_url = info.find('videos/video/url').text
|
||||
video_url = manifest_url.replace('manifest.f4m', 'index_2_av.m3u8')
|
||||
video_url = video_url.replace('/z/', '/i/')
|
||||
thumbnail_path = info.find('image').text
|
||||
|
||||
return {'id': video_id,
|
||||
'ext': 'mp4',
|
||||
'url': video_url,
|
||||
'title': info.find('titre').text,
|
||||
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', thumbnail_path),
|
||||
'description': info.find('synopsis').text,
|
||||
}
|
||||
|
||||
|
||||
class PluzzIE(FranceTVBaseInfoExtractor):
|
||||
IE_NAME = u'pluzz.francetv.fr'
|
||||
_VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html'
|
||||
|
||||
# Can't use tests, videos expire in 7 days
|
||||
|
||||
def _real_extract(self, url):
|
||||
title = re.match(self._VALID_URL, url).group(1)
|
||||
webpage = self._download_webpage(url, title)
|
||||
video_id = self._search_regex(
|
||||
r'data-diffusion="(\d+)"', webpage, 'ID')
|
||||
return self._extract_video(video_id)
|
||||
|
||||
|
||||
class FranceTvInfoIE(FranceTVBaseInfoExtractor):
|
||||
IE_NAME = u'francetvinfo.fr'
|
||||
_VALID_URL = r'https?://www\.francetvinfo\.fr/replay.*/(?P<title>.+).html'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
|
||||
u'file': u'84981923.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'Soir 3',
|
||||
},
|
||||
u'params': {
|
||||
u'skip_download': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
page_title = mobj.group('title')
|
||||
webpage = self._download_webpage(url, page_title)
|
||||
video_id = self._search_regex(r'id-video=(\d+?)"', webpage, u'video id')
|
||||
return self._extract_video(video_id)
|
||||
|
||||
|
||||
class France2IE(FranceTVBaseInfoExtractor):
|
||||
IE_NAME = u'france2.fr'
|
||||
_VALID_URL = r'''(?x)https?://www\.france2\.fr/
|
||||
(?:
|
||||
emissions/.*?/videos/(?P<id>\d+)
|
||||
| emission/(?P<key>[^/?]+)
|
||||
)'''
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104',
|
||||
u'file': u'75540104.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'13h15, le samedi...',
|
||||
u'description': u'md5:2e5b58ba7a2d3692b35c792be081a03d',
|
||||
},
|
||||
u'params': {
|
||||
u'skip_download': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj.group('key'):
|
||||
webpage = self._download_webpage(url, mobj.group('key'))
|
||||
video_id = self._html_search_regex(
|
||||
r'''(?x)<div\s+class="video-player">\s*
|
||||
<a\s+href="http://videos.francetv.fr/video/([0-9]+)"\s+
|
||||
class="francetv-video-player">''',
|
||||
webpage, u'video ID')
|
||||
else:
|
||||
video_id = mobj.group('id')
|
||||
return self._extract_video(video_id)
|
||||
|
||||
|
||||
class GenerationQuoiIE(InfoExtractor):
|
||||
IE_NAME = u'france2.fr:generation-quoi'
|
||||
_VALID_URL = r'https?://generation-quoi\.france2\.fr/portrait/(?P<name>.*)(\?|$)'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://generation-quoi.france2.fr/portrait/garde-a-vous',
|
||||
u'file': u'k7FJX8VBcvvLmX4wA5Q.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'Génération Quoi - Garde à Vous',
|
||||
u'uploader': u'Génération Quoi',
|
||||
},
|
||||
u'params': {
|
||||
# It uses Dailymotion
|
||||
u'skip_download': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
name = mobj.group('name')
|
||||
info_url = compat_urlparse.urljoin(url, '/medias/video/%s.json' % name)
|
||||
info_json = self._download_webpage(info_url, name)
|
||||
info = json.loads(info_json)
|
||||
return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
|
||||
ie='Dailymotion')
|
||||
36
youtube_dl/extractor/freesound.py
Normal file
36
youtube_dl/extractor/freesound.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import determine_ext
|
||||
|
||||
class FreesoundIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.freesound.org/people/miklovan/sounds/194503/',
|
||||
u'file': u'194503.mp3',
|
||||
u'md5': u'12280ceb42c81f19a515c745eae07650',
|
||||
u'info_dict': {
|
||||
u"title": u"gulls in the city.wav",
|
||||
u"uploader" : u"miklovan",
|
||||
u'description': u'the sounds of seagulls in the city',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
music_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, music_id)
|
||||
title = self._html_search_regex(r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>',
|
||||
webpage, 'music title', flags=re.DOTALL)
|
||||
music_url = self._og_search_property('audio', webpage, 'music url')
|
||||
description = self._html_search_regex(r'<div id="sound_description">(.*?)</div>',
|
||||
webpage, 'description', fatal=False, flags=re.DOTALL)
|
||||
|
||||
return [{
|
||||
'id': music_id,
|
||||
'title': title,
|
||||
'url': music_url,
|
||||
'uploader': self._og_search_property('audio:artist', webpage, 'music uploader'),
|
||||
'ext': determine_ext(music_url),
|
||||
'description': description,
|
||||
}]
|
||||
35
youtube_dl/extractor/funnyordie.py
Normal file
35
youtube_dl/extractor/funnyordie.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class FunnyOrDieIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$'
|
||||
_TEST = {
|
||||
u'url': u'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
|
||||
u'file': u'0732f586d7.mp4',
|
||||
u'md5': u'f647e9e90064b53b6e046e75d0241fbd',
|
||||
u'info_dict': {
|
||||
u"description": u"Lyrics changed to match the video. Spoken cameo by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a concept by Dustin McLean (DustFilms.com). Performed, edited, and written by David A. Scott.",
|
||||
u"title": u"Heart-Shaped Box: Literal Video Version"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_url = self._search_regex(
|
||||
[r'type="video/mp4" src="(.*?)"', r'src="([^>]*?)" type=\'video/mp4\''],
|
||||
webpage, u'video URL', flags=re.DOTALL)
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
}
|
||||
return [info]
|
||||
38
youtube_dl/extractor/gamekings.py
Normal file
38
youtube_dl/extractor/gamekings.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class GamekingsIE(InfoExtractor):
|
||||
_VALID_URL = r'http?://www\.gamekings\.tv/videos/(?P<name>[0-9a-z\-]+)'
|
||||
_TEST = {
|
||||
u"url": u"http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/",
|
||||
u'file': u'20130811.mp4',
|
||||
# MD5 is flaky, seems to change regularly
|
||||
#u'md5': u'2f32b1f7b80fdc5cb616efb4f387f8a3',
|
||||
u'info_dict': {
|
||||
u"title": u"Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review",
|
||||
u"description": u"Melle en Steven hebben voor de review een week in de rechtbank doorbracht met Phoenix Wright: Ace Attorney - Dual Destinies.",
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
name = mobj.group('name')
|
||||
webpage = self._download_webpage(url, name)
|
||||
video_url = self._og_search_video_url(webpage)
|
||||
|
||||
video = re.search(r'[0-9]+', video_url)
|
||||
video_id = video.group(0)
|
||||
|
||||
# Todo: add medium format
|
||||
video_url = video_url.replace(video_id, 'large/' + video_id)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'ext': 'mp4',
|
||||
'url': video_url,
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
}
|
||||
59
youtube_dl/extractor/gamespot.py
Normal file
59
youtube_dl/extractor/gamespot.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
compat_urlparse,
|
||||
unescapeHTML,
|
||||
get_meta_content,
|
||||
)
|
||||
|
||||
|
||||
class GameSpotIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?'
|
||||
_TEST = {
|
||||
u"url": u"http://www.gamespot.com/arma-iii/videos/arma-iii-community-guide-sitrep-i-6410818/",
|
||||
u"file": u"gs-2300-6410818.mp4",
|
||||
u"md5": u"b2a30deaa8654fcccd43713a6b6a4825",
|
||||
u"info_dict": {
|
||||
u"title": u"Arma 3 - Community Guide: SITREP I",
|
||||
u'description': u'Check out this video where some of the basics of Arma 3 is explained.',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
page_id = mobj.group('page_id')
|
||||
webpage = self._download_webpage(url, page_id)
|
||||
data_video_json = self._search_regex(r'data-video=\'(.*?)\'', webpage, u'data video')
|
||||
data_video = json.loads(unescapeHTML(data_video_json))
|
||||
|
||||
# Transform the manifest url to a link to the mp4 files
|
||||
# they are used in mobile devices.
|
||||
f4m_url = data_video['videoStreams']['f4m_stream']
|
||||
f4m_path = compat_urlparse.urlparse(f4m_url).path
|
||||
QUALITIES_RE = r'((,\d+)+,?)'
|
||||
qualities = self._search_regex(QUALITIES_RE, f4m_path, u'qualities').strip(',').split(',')
|
||||
http_path = f4m_path[1:].split('/', 1)[1]
|
||||
http_template = re.sub(QUALITIES_RE, r'%s', http_path)
|
||||
http_template = http_template.replace('.csmil/manifest.f4m', '')
|
||||
http_template = compat_urlparse.urljoin('http://video.gamespotcdn.com/', http_template)
|
||||
formats = []
|
||||
for q in qualities:
|
||||
formats.append({
|
||||
'url': http_template % q,
|
||||
'ext': 'mp4',
|
||||
'format_id': q,
|
||||
})
|
||||
|
||||
info = {
|
||||
'id': data_video['guid'],
|
||||
'title': compat_urllib_parse.unquote(data_video['title']),
|
||||
'formats': formats,
|
||||
'description': get_meta_content('description', webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
}
|
||||
# TODO: Remove when #980 has been merged
|
||||
info.update(formats[-1])
|
||||
return info
|
||||
36
youtube_dl/extractor/gametrailers.py
Normal file
36
youtube_dl/extractor/gametrailers.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import re
|
||||
|
||||
from .mtv import MTVIE, _media_xml_tag
|
||||
|
||||
class GametrailersIE(MTVIE):
|
||||
"""
|
||||
Gametrailers use the same videos system as MTVIE, it just changes the feed
|
||||
url, where the uri is and the method to get the thumbnails.
|
||||
"""
|
||||
_VALID_URL = r'http://www.gametrailers.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer',
|
||||
u'file': u'70e9a5d7-cf25-4a10-9104-6f3e7342ae0d.mp4',
|
||||
u'md5': u'4c8e67681a0ea7ec241e8c09b3ea8cf7',
|
||||
u'info_dict': {
|
||||
u'title': u'E3 2013: Debut Trailer',
|
||||
u'description': u'Faith is back! Check out the World Premiere trailer for Mirror\'s Edge 2 straight from the EA Press Conference at E3 2013!',
|
||||
},
|
||||
}
|
||||
# Overwrite MTVIE properties we don't want
|
||||
_TESTS = []
|
||||
|
||||
_FEED_URL = 'http://www.gametrailers.com/feeds/mrss'
|
||||
|
||||
def _get_thumbnail_url(self, uri, itemdoc):
|
||||
search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail'))
|
||||
return itemdoc.find(search_path).attrib['url']
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
mgid = self._search_regex([r'data-video="(?P<mgid>mgid:.*?)"',
|
||||
r'data-contentId=\'(?P<mgid>mgid:.*?)\''],
|
||||
webpage, u'mgid')
|
||||
return self._get_videos_info(mgid)
|
||||
251
youtube_dl/extractor/generic.py
Normal file
251
youtube_dl/extractor/generic.py
Normal file
@@ -0,0 +1,251 @@
|
||||
# encoding: utf-8
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
|
||||
ExtractorError,
|
||||
smuggle_url,
|
||||
unescapeHTML,
|
||||
)
|
||||
from .brightcove import BrightcoveIE
|
||||
|
||||
|
||||
class GenericIE(InfoExtractor):
|
||||
IE_DESC = u'Generic downloader that works on some sites'
|
||||
_VALID_URL = r'.*'
|
||||
IE_NAME = u'generic'
|
||||
_TESTS = [
|
||||
{
|
||||
u'url': u'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
|
||||
u'file': u'13601338388002.mp4',
|
||||
u'md5': u'6e15c93721d7ec9e9ca3fdbf07982cfd',
|
||||
u'info_dict': {
|
||||
u"uploader": u"www.hodiho.fr",
|
||||
u"title": u"R\u00e9gis plante sa Jeep"
|
||||
}
|
||||
},
|
||||
# embedded vimeo video
|
||||
{
|
||||
u'add_ie': ['Vimeo'],
|
||||
u'url': u'http://skillsmatter.com/podcast/home/move-semanticsperfect-forwarding-and-rvalue-references',
|
||||
u'file': u'22444065.mp4',
|
||||
u'md5': u'2903896e23df39722c33f015af0666e2',
|
||||
u'info_dict': {
|
||||
u'title': u'ACCU 2011: Move Semantics,Perfect Forwarding, and Rvalue references- Scott Meyers- 13/04/2011',
|
||||
u"uploader_id": u"skillsmatter",
|
||||
u"uploader": u"Skills Matter",
|
||||
}
|
||||
},
|
||||
# bandcamp page with custom domain
|
||||
{
|
||||
u'add_ie': ['Bandcamp'],
|
||||
u'url': u'http://bronyrock.com/track/the-pony-mash',
|
||||
u'file': u'3235767654.mp3',
|
||||
u'info_dict': {
|
||||
u'title': u'The Pony Mash',
|
||||
u'uploader': u'M_Pallante',
|
||||
},
|
||||
u'skip': u'There is a limit of 200 free downloads / month for the test song',
|
||||
},
|
||||
# embedded brightcove video
|
||||
# it also tests brightcove videos that need to set the 'Referer' in the
|
||||
# http requests
|
||||
{
|
||||
u'add_ie': ['Brightcove'],
|
||||
u'url': u'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
|
||||
u'info_dict': {
|
||||
u'id': u'2765128793001',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'Le cours de bourse : l’analyse technique',
|
||||
u'description': u'md5:7e9ad046e968cb2d1114004aba466fd9',
|
||||
u'uploader': u'BFM BUSINESS',
|
||||
},
|
||||
u'params': {
|
||||
u'skip_download': True,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def report_download_webpage(self, video_id):
|
||||
"""Report webpage download."""
|
||||
if not self._downloader.params.get('test', False):
|
||||
self._downloader.report_warning(u'Falling back on generic information extractor.')
|
||||
super(GenericIE, self).report_download_webpage(video_id)
|
||||
|
||||
def report_following_redirect(self, new_url):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
|
||||
|
||||
def _test_redirect(self, url):
|
||||
"""Check if it is a redirect, like url shorteners, in case return the new url."""
|
||||
class HeadRequest(compat_urllib_request.Request):
|
||||
def get_method(self):
|
||||
return "HEAD"
|
||||
|
||||
class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
|
||||
"""
|
||||
Subclass the HTTPRedirectHandler to make it use our
|
||||
HeadRequest also on the redirected URL
|
||||
"""
|
||||
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
||||
if code in (301, 302, 303, 307):
|
||||
newurl = newurl.replace(' ', '%20')
|
||||
newheaders = dict((k,v) for k,v in req.headers.items()
|
||||
if k.lower() not in ("content-length", "content-type"))
|
||||
return HeadRequest(newurl,
|
||||
headers=newheaders,
|
||||
origin_req_host=req.get_origin_req_host(),
|
||||
unverifiable=True)
|
||||
else:
|
||||
raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
|
||||
|
||||
class HTTPMethodFallback(compat_urllib_request.BaseHandler):
|
||||
"""
|
||||
Fallback to GET if HEAD is not allowed (405 HTTP error)
|
||||
"""
|
||||
def http_error_405(self, req, fp, code, msg, headers):
|
||||
fp.read()
|
||||
fp.close()
|
||||
|
||||
newheaders = dict((k,v) for k,v in req.headers.items()
|
||||
if k.lower() not in ("content-length", "content-type"))
|
||||
return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
|
||||
headers=newheaders,
|
||||
origin_req_host=req.get_origin_req_host(),
|
||||
unverifiable=True))
|
||||
|
||||
# Build our opener
|
||||
opener = compat_urllib_request.OpenerDirector()
|
||||
for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
|
||||
HTTPMethodFallback, HEADRedirectHandler,
|
||||
compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
|
||||
opener.add_handler(handler())
|
||||
|
||||
response = opener.open(HeadRequest(url))
|
||||
if response is None:
|
||||
raise ExtractorError(u'Invalid URL protocol')
|
||||
new_url = response.geturl()
|
||||
|
||||
if url == new_url:
|
||||
return False
|
||||
|
||||
self.report_following_redirect(new_url)
|
||||
return new_url
|
||||
|
||||
def _real_extract(self, url):
|
||||
parsed_url = compat_urlparse.urlparse(url)
|
||||
if not parsed_url.scheme:
|
||||
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
|
||||
return self.url_result('http://' + url)
|
||||
|
||||
try:
|
||||
new_url = self._test_redirect(url)
|
||||
if new_url:
|
||||
return [self.url_result(new_url)]
|
||||
except compat_urllib_error.HTTPError:
|
||||
# This may be a stupid server that doesn't like HEAD, our UA, or so
|
||||
pass
|
||||
|
||||
video_id = url.split('/')[-1]
|
||||
try:
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
except ValueError:
|
||||
# since this is the last-resort InfoExtractor, if
|
||||
# this error is thrown, it'll be thrown here
|
||||
raise ExtractorError(u'Failed to download URL: %s' % url)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
# it's tempting to parse this further, but you would
|
||||
# have to take into account all the variations like
|
||||
# Video Title - Site Name
|
||||
# Site Name | Video Title
|
||||
# Video Title - Tagline | Site Name
|
||||
# and so on and so forth; it's just not practical
|
||||
video_title = self._html_search_regex(r'<title>(.*)</title>',
|
||||
webpage, u'video title', default=u'video', flags=re.DOTALL)
|
||||
|
||||
# Look for BrightCove:
|
||||
bc_url = BrightcoveIE._extract_brightcove_url(webpage)
|
||||
if bc_url is not None:
|
||||
self.to_screen(u'Brightcove video detected.')
|
||||
return self.url_result(bc_url, 'Brightcove')
|
||||
|
||||
# Look for embedded Vimeo player
|
||||
mobj = re.search(
|
||||
r'<iframe[^>]+?src="(https?://player.vimeo.com/video/.+?)"', webpage)
|
||||
if mobj:
|
||||
player_url = unescapeHTML(mobj.group(1))
|
||||
surl = smuggle_url(player_url, {'Referer': url})
|
||||
return self.url_result(surl, 'Vimeo')
|
||||
|
||||
# Look for embedded YouTube player
|
||||
matches = re.findall(
|
||||
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?youtube.com/embed/.+?)\1', webpage)
|
||||
if matches:
|
||||
urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Youtube')
|
||||
for tuppl in matches]
|
||||
return self.playlist_result(
|
||||
urlrs, playlist_id=video_id, playlist_title=video_title)
|
||||
|
||||
# Look for Bandcamp pages with custom domain
|
||||
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
|
||||
if mobj is not None:
|
||||
burl = unescapeHTML(mobj.group(1))
|
||||
# Don't set the extractor because it can be a track url or an album
|
||||
return self.url_result(burl)
|
||||
|
||||
# Start with something easy: JW Player in SWFObject
|
||||
mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
|
||||
if mobj is None:
|
||||
# Broaden the search a little bit
|
||||
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
|
||||
if mobj is None:
|
||||
# Broaden the search a little bit: JWPlayer JS loader
|
||||
mobj = re.search(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http[^\'"]*)', webpage)
|
||||
if mobj is None:
|
||||
# Try to find twitter cards info
|
||||
mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
|
||||
if mobj is None:
|
||||
# We look for Open Graph info:
|
||||
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
|
||||
m_video_type = re.search(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
|
||||
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
|
||||
if m_video_type is not None:
|
||||
mobj = re.search(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
|
||||
if mobj is None:
|
||||
# HTML5 video
|
||||
mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Unsupported URL: %s' % url)
|
||||
|
||||
# It's possible that one of the regexes
|
||||
# matched, but returned an empty group:
|
||||
if mobj.group(1) is None:
|
||||
raise ExtractorError(u'Did not find a valid video URL at %s' % url)
|
||||
|
||||
video_url = mobj.group(1)
|
||||
video_url = compat_urlparse.urljoin(url, video_url)
|
||||
video_id = compat_urllib_parse.unquote(os.path.basename(video_url))
|
||||
|
||||
# here's a fun little line of code for you:
|
||||
video_id = os.path.splitext(video_id)[0]
|
||||
|
||||
# video uploader is domain name
|
||||
video_uploader = self._search_regex(r'(?:https?://)?([^/]*)/.*',
|
||||
url, u'video uploader')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': video_uploader,
|
||||
'upload_date': None,
|
||||
'title': video_title,
|
||||
}
|
||||
98
youtube_dl/extractor/googleplus.py
Normal file
98
youtube_dl/extractor/googleplus.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# coding: utf-8
|
||||
|
||||
import datetime
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class GooglePlusIE(InfoExtractor):
|
||||
IE_DESC = u'Google Plus'
|
||||
_VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
|
||||
IE_NAME = u'plus.google'
|
||||
_TEST = {
|
||||
u"url": u"https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
|
||||
u"file": u"ZButuJc6CtH.flv",
|
||||
u"info_dict": {
|
||||
u"upload_date": u"20120613",
|
||||
u"uploader": u"井上ヨシマサ",
|
||||
u"title": u"嘆きの天使 降臨"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract id from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
post_url = mobj.group(0)
|
||||
video_id = mobj.group(1)
|
||||
|
||||
video_extension = 'flv'
|
||||
|
||||
# Step 1, Retrieve post webpage to extract further information
|
||||
webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
# Extract update date
|
||||
upload_date = self._html_search_regex(
|
||||
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
|
||||
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
|
||||
webpage, u'upload date', fatal=False, flags=re.VERBOSE)
|
||||
if upload_date:
|
||||
# Convert timestring to a format suitable for filename
|
||||
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
|
||||
upload_date = upload_date.strftime('%Y%m%d')
|
||||
|
||||
# Extract uploader
|
||||
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
|
||||
webpage, u'uploader', fatal=False)
|
||||
|
||||
# Extract title
|
||||
# Get the first line for title
|
||||
video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
|
||||
webpage, 'title', default=u'NA')
|
||||
|
||||
# Step 2, Simulate clicking the image box to launch video
|
||||
DOMAIN = 'https://plus.google.com/'
|
||||
video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
|
||||
webpage, u'video page URL')
|
||||
if not video_page.startswith(DOMAIN):
|
||||
video_page = DOMAIN + video_page
|
||||
|
||||
webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
|
||||
|
||||
# Extract video links on video page
|
||||
"""Extract video links of all sizes"""
|
||||
pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
|
||||
mobj = re.findall(pattern, webpage)
|
||||
if len(mobj) == 0:
|
||||
raise ExtractorError(u'Unable to extract video links')
|
||||
|
||||
# Sort in resolution
|
||||
links = sorted(mobj)
|
||||
|
||||
# Choose the lowest of the sort, i.e. highest resolution
|
||||
video_url = links[-1]
|
||||
# Only get the url. The resolution part in the tuple has no use anymore
|
||||
video_url = video_url[-1]
|
||||
# Treat escaped \u0026 style hex
|
||||
try:
|
||||
video_url = video_url.decode("unicode_escape")
|
||||
except AttributeError: # Python 3
|
||||
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
|
||||
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': uploader,
|
||||
'upload_date': upload_date,
|
||||
'title': video_title,
|
||||
'ext': video_extension,
|
||||
}]
|
||||
39
youtube_dl/extractor/googlesearch.py
Normal file
39
youtube_dl/extractor/googlesearch.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import itertools
|
||||
import re
|
||||
|
||||
from .common import SearchInfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
)
|
||||
|
||||
|
||||
class GoogleSearchIE(SearchInfoExtractor):
|
||||
IE_DESC = u'Google Video search'
|
||||
_MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
|
||||
_MAX_RESULTS = 1000
|
||||
IE_NAME = u'video.google:search'
|
||||
_SEARCH_KEY = 'gvsearch'
|
||||
|
||||
def _get_n_results(self, query, n):
|
||||
"""Get a specified number of results for a query"""
|
||||
|
||||
res = {
|
||||
'_type': 'playlist',
|
||||
'id': query,
|
||||
'entries': []
|
||||
}
|
||||
|
||||
for pagenum in itertools.count(1):
|
||||
result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
|
||||
webpage = self._download_webpage(result_url, u'gvsearch:' + query,
|
||||
note='Downloading result page ' + str(pagenum))
|
||||
|
||||
for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage):
|
||||
e = {
|
||||
'_type': 'url',
|
||||
'url': mobj.group(1)
|
||||
}
|
||||
res['entries'].append(e)
|
||||
|
||||
if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
|
||||
return res
|
||||
37
youtube_dl/extractor/hark.py
Normal file
37
youtube_dl/extractor/hark.py
Normal file
@@ -0,0 +1,37 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import determine_ext
|
||||
|
||||
class HarkIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.hark\.com/clips/(.+?)-.+'
|
||||
_TEST = {
|
||||
u'url': u'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013',
|
||||
u'file': u'mmbzyhkgny.mp3',
|
||||
u'md5': u'6783a58491b47b92c7c1af5a77d4cbee',
|
||||
u'info_dict': {
|
||||
u'title': u"Obama: 'Beyond The Afghan Theater, We Only Target Al Qaeda' on May 23, 2013",
|
||||
u'description': u'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.',
|
||||
u'duration': 11,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
json_url = "http://www.hark.com/clips/%s.json" %(video_id)
|
||||
info_json = self._download_webpage(json_url, video_id)
|
||||
info = json.loads(info_json)
|
||||
final_url = info['url']
|
||||
|
||||
return {'id': video_id,
|
||||
'url' : final_url,
|
||||
'title': info['name'],
|
||||
'ext': determine_ext(final_url),
|
||||
'description': info['description'],
|
||||
'thumbnail': info['image_original'],
|
||||
'duration': info['duration'],
|
||||
}
|
||||
44
youtube_dl/extractor/hotnewhiphop.py
Normal file
44
youtube_dl/extractor/hotnewhiphop.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import re
|
||||
import base64
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class HotNewHipHopIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.hotnewhiphop.com/.*\.(?P<id>.*)\.html'
|
||||
_TEST = {
|
||||
u'url': u"http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html",
|
||||
u'file': u'1435540.mp3',
|
||||
u'md5': u'2c2cd2f76ef11a9b3b581e8b232f3d96',
|
||||
u'info_dict': {
|
||||
u"title": u"Freddie Gibbs - Lay It Down"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
m = re.match(self._VALID_URL, url)
|
||||
video_id = m.group('id')
|
||||
|
||||
webpage_src = self._download_webpage(url, video_id)
|
||||
|
||||
video_url_base64 = self._search_regex(r'data-path="(.*?)"',
|
||||
webpage_src, u'video URL', fatal=False)
|
||||
|
||||
if video_url_base64 == None:
|
||||
video_url = self._search_regex(r'"contentUrl" content="(.*?)"', webpage_src,
|
||||
u'video URL')
|
||||
return self.url_result(video_url, ie='Youtube')
|
||||
|
||||
video_url = base64.b64decode(video_url_base64).decode('utf-8')
|
||||
|
||||
video_title = self._html_search_regex(r"<title>(.*)</title>",
|
||||
webpage_src, u'title')
|
||||
|
||||
results = [{
|
||||
'id': video_id,
|
||||
'url' : video_url,
|
||||
'title' : video_title,
|
||||
'thumbnail' : self._og_search_thumbnail(webpage_src),
|
||||
'ext' : 'mp3',
|
||||
}]
|
||||
return results
|
||||
45
youtube_dl/extractor/howcast.py
Normal file
45
youtube_dl/extractor/howcast.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class HowcastIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
|
||||
u'file': u'390161.mp4',
|
||||
u'md5': u'8b743df908c42f60cf6496586c7f12c3',
|
||||
u'info_dict': {
|
||||
u"description": u"The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here's the proper way to tie a square knot.",
|
||||
u"title": u"How to Tie a Square Knot Properly"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
|
||||
webpage, u'video URL')
|
||||
|
||||
video_title = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') property=\'og:title\'',
|
||||
webpage, u'title')
|
||||
|
||||
video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
|
||||
webpage, u'description', fatal=False)
|
||||
|
||||
thumbnail = self._html_search_regex(r'<meta content=\'(.+?)\' property=\'og:image\'',
|
||||
webpage, u'thumbnail', fatal=False)
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': video_title,
|
||||
'description': video_description,
|
||||
'thumbnail': thumbnail,
|
||||
}]
|
||||
71
youtube_dl/extractor/hypem.py
Normal file
71
youtube_dl/extractor/hypem.py
Normal file
@@ -0,0 +1,71 @@
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class HypemIE(InfoExtractor):
|
||||
"""Information Extractor for hypem"""
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?hypem\.com/track/([^/]+)/([^/]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://hypem.com/track/1v6ga/BODYWORK+-+TAME',
|
||||
u'file': u'1v6ga.mp3',
|
||||
u'md5': u'b9cc91b5af8995e9f0c1cee04c575828',
|
||||
u'info_dict': {
|
||||
u"title": u"Tame"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
track_id = mobj.group(1)
|
||||
|
||||
data = {'ax': 1, 'ts': time.time()}
|
||||
data_encoded = compat_urllib_parse.urlencode(data)
|
||||
complete_url = url + "?" + data_encoded
|
||||
request = compat_urllib_request.Request(complete_url)
|
||||
response, urlh = self._download_webpage_handle(request, track_id, u'Downloading webpage with the url')
|
||||
cookie = urlh.headers.get('Set-Cookie', '')
|
||||
|
||||
self.report_extraction(track_id)
|
||||
|
||||
html_tracks = self._html_search_regex(r'<script type="application/json" id="displayList-data">(.*?)</script>',
|
||||
response, u'tracks', flags=re.MULTILINE|re.DOTALL).strip()
|
||||
try:
|
||||
track_list = json.loads(html_tracks)
|
||||
track = track_list[u'tracks'][0]
|
||||
except ValueError:
|
||||
raise ExtractorError(u'Hypemachine contained invalid JSON.')
|
||||
|
||||
key = track[u"key"]
|
||||
track_id = track[u"id"]
|
||||
artist = track[u"artist"]
|
||||
title = track[u"song"]
|
||||
|
||||
serve_url = "http://hypem.com/serve/source/%s/%s" % (compat_str(track_id), compat_str(key))
|
||||
request = compat_urllib_request.Request(serve_url, "" , {'Content-Type': 'application/json'})
|
||||
request.add_header('cookie', cookie)
|
||||
song_data_json = self._download_webpage(request, track_id, u'Downloading metadata')
|
||||
try:
|
||||
song_data = json.loads(song_data_json)
|
||||
except ValueError:
|
||||
raise ExtractorError(u'Hypemachine contained invalid JSON.')
|
||||
final_url = song_data[u"url"]
|
||||
|
||||
return [{
|
||||
'id': track_id,
|
||||
'url': final_url,
|
||||
'ext': "mp3",
|
||||
'title': title,
|
||||
'artist': artist,
|
||||
}]
|
||||
129
youtube_dl/extractor/ign.py
Normal file
129
youtube_dl/extractor/ign.py
Normal file
@@ -0,0 +1,129 @@
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
)
|
||||
|
||||
|
||||
class IGNIE(InfoExtractor):
|
||||
"""
|
||||
Extractor for some of the IGN sites, like www.ign.com, es.ign.com de.ign.com.
|
||||
Some videos of it.ign.com are also supported
|
||||
"""
|
||||
|
||||
_VALID_URL = r'https?://.+?\.ign\.com/(?P<type>videos|show_videos|articles|(?:[^/]*/feature))(/.+)?/(?P<name_or_id>.+)'
|
||||
IE_NAME = u'ign.com'
|
||||
|
||||
_CONFIG_URL_TEMPLATE = 'http://www.ign.com/videos/configs/id/%s.config'
|
||||
_DESCRIPTION_RE = [r'<span class="page-object-description">(.+?)</span>',
|
||||
r'id="my_show_video">.*?<p>(.*?)</p>',
|
||||
]
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
u'url': u'http://www.ign.com/videos/2013/06/05/the-last-of-us-review',
|
||||
u'file': u'8f862beef863986b2785559b9e1aa599.mp4',
|
||||
u'md5': u'eac8bdc1890980122c3b66f14bdd02e9',
|
||||
u'info_dict': {
|
||||
u'title': u'The Last of Us Review',
|
||||
u'description': u'md5:c8946d4260a4d43a00d5ae8ed998870c',
|
||||
}
|
||||
},
|
||||
{
|
||||
u'url': u'http://me.ign.com/en/feature/15775/100-little-things-in-gta-5-that-will-blow-your-mind',
|
||||
u'playlist': [
|
||||
{
|
||||
u'file': u'5ebbd138523268b93c9141af17bec937.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'GTA 5 Video Review',
|
||||
u'description': u'Rockstar drops the mic on this generation of games. Watch our review of the masterly Grand Theft Auto V.',
|
||||
},
|
||||
},
|
||||
{
|
||||
u'file': u'638672ee848ae4ff108df2a296418ee2.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'GTA 5\'s Twisted Beauty in Super Slow Motion',
|
||||
u'description': u'The twisted beauty of GTA 5 in stunning slow motion.',
|
||||
},
|
||||
},
|
||||
],
|
||||
u'params': {
|
||||
u'skip_download': True,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def _find_video_id(self, webpage):
|
||||
res_id = [r'data-video-id="(.+?)"',
|
||||
r'<object id="vid_(.+?)"',
|
||||
r'<meta name="og:image" content=".*/(.+?)-(.+?)/.+.jpg"',
|
||||
]
|
||||
return self._search_regex(res_id, webpage, 'video id')
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
name_or_id = mobj.group('name_or_id')
|
||||
page_type = mobj.group('type')
|
||||
webpage = self._download_webpage(url, name_or_id)
|
||||
if page_type == 'articles':
|
||||
video_url = self._search_regex(r'var videoUrl = "(.+?)"', webpage, u'video url')
|
||||
return self.url_result(video_url, ie='IGN')
|
||||
elif page_type != 'video':
|
||||
multiple_urls = re.findall(
|
||||
'<param name="flashvars" value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
|
||||
webpage)
|
||||
if multiple_urls:
|
||||
return [self.url_result(u, ie='IGN') for u in multiple_urls]
|
||||
|
||||
video_id = self._find_video_id(webpage)
|
||||
result = self._get_video_info(video_id)
|
||||
description = self._html_search_regex(self._DESCRIPTION_RE,
|
||||
webpage, 'video description',
|
||||
flags=re.DOTALL)
|
||||
result['description'] = description
|
||||
return result
|
||||
|
||||
def _get_video_info(self, video_id):
|
||||
config_url = self._CONFIG_URL_TEMPLATE % video_id
|
||||
config = json.loads(self._download_webpage(config_url, video_id,
|
||||
u'Downloading video info'))
|
||||
media = config['playlist']['media']
|
||||
video_url = media['url']
|
||||
|
||||
return {'id': media['metadata']['videoId'],
|
||||
'url': video_url,
|
||||
'ext': determine_ext(video_url),
|
||||
'title': media['metadata']['title'],
|
||||
'thumbnail': media['poster'][0]['url'].replace('{size}', 'grande'),
|
||||
}
|
||||
|
||||
|
||||
class OneUPIE(IGNIE):
|
||||
"""Extractor for 1up.com, it uses the ign videos system."""
|
||||
|
||||
_VALID_URL = r'https?://gamevideos.1up.com/(?P<type>video)/id/(?P<name_or_id>.+)'
|
||||
IE_NAME = '1up.com'
|
||||
|
||||
_DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://gamevideos.1up.com/video/id/34976',
|
||||
u'file': u'34976.mp4',
|
||||
u'md5': u'68a54ce4ebc772e4b71e3123d413163d',
|
||||
u'info_dict': {
|
||||
u'title': u'Sniper Elite V2 - Trailer',
|
||||
u'description': u'md5:5d289b722f5a6d940ca3136e9dae89cf',
|
||||
}
|
||||
}
|
||||
|
||||
# Override IGN tests
|
||||
_TESTS = []
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
id = mobj.group('name_or_id')
|
||||
result = super(OneUPIE, self)._real_extract(url)
|
||||
result['id'] = id
|
||||
return result
|
||||
39
youtube_dl/extractor/ina.py
Normal file
39
youtube_dl/extractor/ina.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class InaIE(InfoExtractor):
|
||||
"""Information Extractor for Ina.fr"""
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?ina\.fr/video/(?P<id>I?[A-F0-9]+)/.*'
|
||||
_TEST = {
|
||||
u'url': u'www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html',
|
||||
u'file': u'I12055569.mp4',
|
||||
u'md5': u'a667021bf2b41f8dc6049479d9bb38a3',
|
||||
u'info_dict': {
|
||||
u"title": u"Fran\u00e7ois Hollande \"Je crois que c'est clair\""
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self,url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
mrss_url='http://player.ina.fr/notices/%s.mrss' % video_id
|
||||
video_extension = 'mp4'
|
||||
webpage = self._download_webpage(mrss_url, video_id)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
video_url = self._html_search_regex(r'<media:player url="(?P<mp4url>http://mp4.ina.fr/[^"]+\.mp4)',
|
||||
webpage, u'video URL')
|
||||
|
||||
video_title = self._search_regex(r'<title><!\[CDATA\[(?P<titre>.*?)]]></title>',
|
||||
webpage, u'title')
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': video_extension,
|
||||
'title': video_title,
|
||||
}]
|
||||
62
youtube_dl/extractor/infoq.py
Normal file
62
youtube_dl/extractor/infoq.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import base64
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class InfoQIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
|
||||
_TEST = {
|
||||
u"name": u"InfoQ",
|
||||
u"url": u"http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things",
|
||||
u"file": u"12-jan-pythonthings.mp4",
|
||||
u"info_dict": {
|
||||
u"description": u"Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.",
|
||||
u"title": u"A Few of My Favorite [Python] Things"
|
||||
},
|
||||
u"params": {
|
||||
u"skip_download": True
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id=url)
|
||||
self.report_extraction(url)
|
||||
|
||||
# Extract video URL
|
||||
mobj = re.search(r"jsclassref ?= ?'([^']*)'", webpage)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Unable to extract video url')
|
||||
real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
|
||||
video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
|
||||
|
||||
# Extract title
|
||||
video_title = self._search_regex(r'contentTitle = "(.*?)";',
|
||||
webpage, u'title')
|
||||
|
||||
# Extract description
|
||||
video_description = self._html_search_regex(r'<meta name="description" content="(.*)"(?:\s*/)?>',
|
||||
webpage, u'description', fatal=False)
|
||||
|
||||
video_filename = video_url.split('/')[-1]
|
||||
video_id, extension = video_filename.split('.')
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
'title': video_title,
|
||||
'ext': extension, # Extension is always(?) mp4, but seems to be flv
|
||||
'thumbnail': None,
|
||||
'description': video_description,
|
||||
}
|
||||
|
||||
return [info]
|
||||
35
youtube_dl/extractor/instagram.py
Normal file
35
youtube_dl/extractor/instagram.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
class InstagramIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?instagram.com/p/(.*?)/'
|
||||
_TEST = {
|
||||
u'url': u'http://instagram.com/p/aye83DjauH/?foo=bar#abc',
|
||||
u'file': u'aye83DjauH.mp4',
|
||||
u'md5': u'0d2da106a9d2631273e192b372806516',
|
||||
u'info_dict': {
|
||||
u"uploader_id": u"naomipq",
|
||||
u"title": u"Video by naomipq",
|
||||
u'description': u'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
|
||||
webpage, u'uploader id', fatal=False)
|
||||
desc = self._search_regex(r'"caption":"(.*?)"', webpage, u'description',
|
||||
fatal=False)
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': self._og_search_video_url(webpage, secure=False),
|
||||
'ext': 'mp4',
|
||||
'title': u'Video by %s' % uploader_id,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'uploader_id' : uploader_id,
|
||||
'description': desc,
|
||||
}]
|
||||
84
youtube_dl/extractor/internetvideoarchive.py
Normal file
84
youtube_dl/extractor/internetvideoarchive.py
Normal file
@@ -0,0 +1,84 @@
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urlparse,
|
||||
compat_urllib_parse,
|
||||
xpath_with_ns,
|
||||
determine_ext,
|
||||
)
|
||||
|
||||
|
||||
class InternetVideoArchiveIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://video\.internetvideoarchive\.net/flash/players/.*?\?.*?publishedid.*?'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247',
|
||||
u'file': u'452693.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'SKYFALL',
|
||||
u'description': u'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
|
||||
u'duration': 153,
|
||||
},
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _build_url(query):
|
||||
return 'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?' + query
|
||||
|
||||
@staticmethod
|
||||
def _clean_query(query):
|
||||
NEEDED_ARGS = ['publishedid', 'customerid']
|
||||
query_dic = compat_urlparse.parse_qs(query)
|
||||
cleaned_dic = dict((k,v[0]) for (k,v) in query_dic.items() if k in NEEDED_ARGS)
|
||||
# Other player ids return m3u8 urls
|
||||
cleaned_dic['playerid'] = '247'
|
||||
cleaned_dic['videokbrate'] = '100000'
|
||||
return compat_urllib_parse.urlencode(cleaned_dic)
|
||||
|
||||
def _real_extract(self, url):
|
||||
query = compat_urlparse.urlparse(url).query
|
||||
query_dic = compat_urlparse.parse_qs(query)
|
||||
video_id = query_dic['publishedid'][0]
|
||||
url = self._build_url(query)
|
||||
|
||||
flashconfiguration_xml = self._download_webpage(url, video_id,
|
||||
u'Downloading flash configuration')
|
||||
flashconfiguration = xml.etree.ElementTree.fromstring(flashconfiguration_xml.encode('utf-8'))
|
||||
file_url = flashconfiguration.find('file').text
|
||||
file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')
|
||||
# Replace some of the parameters in the query to get the best quality
|
||||
# and http links (no m3u8 manifests)
|
||||
file_url = re.sub(r'(?<=\?)(.+)$',
|
||||
lambda m: self._clean_query(m.group()),
|
||||
file_url)
|
||||
info_xml = self._download_webpage(file_url, video_id,
|
||||
u'Downloading video info')
|
||||
info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8'))
|
||||
item = info.find('channel/item')
|
||||
|
||||
def _bp(p):
|
||||
return xpath_with_ns(p,
|
||||
{'media': 'http://search.yahoo.com/mrss/',
|
||||
'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'})
|
||||
formats = []
|
||||
for content in item.findall(_bp('media:group/media:content')):
|
||||
attr = content.attrib
|
||||
f_url = attr['url']
|
||||
formats.append({
|
||||
'url': f_url,
|
||||
'ext': determine_ext(f_url),
|
||||
'width': int(attr['width']),
|
||||
'bitrate': int(attr['bitrate']),
|
||||
})
|
||||
formats = sorted(formats, key=lambda f: f['bitrate'])
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': item.find('title').text,
|
||||
'formats': formats,
|
||||
'thumbnail': item.find(_bp('media:thumbnail')).attrib['url'],
|
||||
'description': item.find('description').text,
|
||||
'duration': int(attr['duration']),
|
||||
}
|
||||
52
youtube_dl/extractor/jeuxvideo.py
Normal file
52
youtube_dl/extractor/jeuxvideo.py
Normal file
@@ -0,0 +1,52 @@
|
||||
# coding: utf-8
|
||||
|
||||
import json
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class JeuxVideoIE(InfoExtractor):
|
||||
_VALID_URL = r'http://.*?\.jeuxvideo\.com/.*/(.*?)-\d+\.htm'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm',
|
||||
u'file': u'5182.mp4',
|
||||
u'md5': u'046e491afb32a8aaac1f44dd4ddd54ee',
|
||||
u'info_dict': {
|
||||
u'title': u'GC 2013 : Tearaway nous présente ses papiers d\'identité',
|
||||
u'description': u'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.\n',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
title = mobj.group(1)
|
||||
webpage = self._download_webpage(url, title)
|
||||
xml_link = self._html_search_regex(
|
||||
r'<param name="flashvars" value="config=(.*?)" />',
|
||||
webpage, u'config URL')
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
|
||||
xml_link, u'video ID')
|
||||
|
||||
xml_config = self._download_webpage(
|
||||
xml_link, title, u'Downloading XML config')
|
||||
config = xml.etree.ElementTree.fromstring(xml_config.encode('utf-8'))
|
||||
info_json = self._search_regex(
|
||||
r'(?sm)<format\.json>(.*?)</format\.json>',
|
||||
xml_config, u'JSON information')
|
||||
info = json.loads(info_json)['versions'][0]
|
||||
|
||||
video_url = 'http://video720.jeuxvideo.com/' + info['file']
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': config.find('titre_video').text,
|
||||
'ext': 'mp4',
|
||||
'url': video_url,
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': config.find('image').text,
|
||||
}
|
||||
56
youtube_dl/extractor/jukebox.py
Normal file
56
youtube_dl/extractor/jukebox.py
Normal file
@@ -0,0 +1,56 @@
|
||||
# coding: utf-8
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
class JukeboxIE(InfoExtractor):
|
||||
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+).html'
|
||||
_IFRAME = r'<iframe .*src="(?P<iframe>[^"]*)".*>'
|
||||
_VIDEO_URL = r'"config":{"file":"(?P<video_url>http:[^"]+[.](?P<video_ext>[^.?]+)[?]mdtk=[0-9]+)"'
|
||||
_TITLE = r'<h1 class="inline">(?P<title>[^<]+)</h1>.*<span id="infos_article_artist">(?P<artist>[^<]+)</span>'
|
||||
_IS_YOUTUBE = r'config":{"file":"(?P<youtube_url>http:[\\][/][\\][/]www[.]youtube[.]com[\\][/]watch[?]v=[^"]+)"'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('video_id')
|
||||
|
||||
html = self._download_webpage(url, video_id)
|
||||
|
||||
mobj = re.search(self._IFRAME, html)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Cannot extract iframe url')
|
||||
iframe_url = unescapeHTML(mobj.group('iframe'))
|
||||
|
||||
iframe_html = self._download_webpage(iframe_url, video_id, 'Downloading iframe')
|
||||
mobj = re.search(r'class="jkb_waiting"', iframe_html)
|
||||
if mobj is not None:
|
||||
raise ExtractorError(u'Video is not available(in your country?)!')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
mobj = re.search(self._VIDEO_URL, iframe_html)
|
||||
if mobj is None:
|
||||
mobj = re.search(self._IS_YOUTUBE, iframe_html)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Cannot extract video url')
|
||||
youtube_url = unescapeHTML(mobj.group('youtube_url')).replace('\/','/')
|
||||
self.to_screen(u'Youtube video detected')
|
||||
return self.url_result(youtube_url,ie='Youtube')
|
||||
video_url = unescapeHTML(mobj.group('video_url')).replace('\/','/')
|
||||
video_ext = unescapeHTML(mobj.group('video_ext'))
|
||||
|
||||
mobj = re.search(self._TITLE, html)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Cannot extract title')
|
||||
title = unescapeHTML(mobj.group('title'))
|
||||
artist = unescapeHTML(mobj.group('artist'))
|
||||
|
||||
return [{'id': video_id,
|
||||
'url': video_url,
|
||||
'title': artist + '-' + title,
|
||||
'ext': video_ext
|
||||
}]
|
||||
155
youtube_dl/extractor/justintv.py
Normal file
155
youtube_dl/extractor/justintv.py
Normal file
@@ -0,0 +1,155 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
formatSeconds,
|
||||
)
|
||||
|
||||
|
||||
class JustinTVIE(InfoExtractor):
|
||||
"""Information extractor for justin.tv and twitch.tv"""
|
||||
# TODO: One broadcast may be split into multiple videos. The key
|
||||
# 'broadcast_id' is the same for all parts, and 'broadcast_part'
|
||||
# starts at 1 and increases. Can we treat all parts as one video?
|
||||
|
||||
_VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
|
||||
(?:
|
||||
(?P<channelid>[^/]+)|
|
||||
(?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
|
||||
(?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
|
||||
)
|
||||
/?(?:\#.*)?$
|
||||
"""
|
||||
_JUSTIN_PAGE_LIMIT = 100
|
||||
IE_NAME = u'justin.tv'
|
||||
_TEST = {
|
||||
u'url': u'http://www.twitch.tv/thegamedevhub/b/296128360',
|
||||
u'file': u'296128360.flv',
|
||||
u'md5': u'ecaa8a790c22a40770901460af191c9a',
|
||||
u'info_dict': {
|
||||
u"upload_date": u"20110927",
|
||||
u"uploader_id": 25114803,
|
||||
u"uploader": u"thegamedevhub",
|
||||
u"title": u"Beginner Series - Scripting With Python Pt.1"
|
||||
}
|
||||
}
|
||||
|
||||
def report_download_page(self, channel, offset):
|
||||
"""Report attempt to download a single page of videos."""
|
||||
self.to_screen(u'%s: Downloading video information from %d to %d' %
|
||||
(channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
|
||||
|
||||
# Return count of items, list of *valid* items
|
||||
def _parse_page(self, url, video_id):
|
||||
info_json = self._download_webpage(url, video_id,
|
||||
u'Downloading video info JSON',
|
||||
u'unable to download video info JSON')
|
||||
|
||||
response = json.loads(info_json)
|
||||
if type(response) != list:
|
||||
error_text = response.get('error', 'unknown error')
|
||||
raise ExtractorError(u'Justin.tv API: %s' % error_text)
|
||||
info = []
|
||||
for clip in response:
|
||||
video_url = clip['video_file_url']
|
||||
if video_url:
|
||||
video_extension = os.path.splitext(video_url)[1][1:]
|
||||
video_date = re.sub('-', '', clip['start_time'][:10])
|
||||
video_uploader_id = clip.get('user_id', clip.get('channel_id'))
|
||||
video_id = clip['id']
|
||||
video_title = clip.get('title', video_id)
|
||||
info.append({
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': video_title,
|
||||
'uploader': clip.get('channel_name', video_uploader_id),
|
||||
'uploader_id': video_uploader_id,
|
||||
'upload_date': video_date,
|
||||
'ext': video_extension,
|
||||
})
|
||||
return (len(response), info)
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'invalid URL: %s' % url)
|
||||
|
||||
api_base = 'http://api.justin.tv'
|
||||
paged = False
|
||||
if mobj.group('channelid'):
|
||||
paged = True
|
||||
video_id = mobj.group('channelid')
|
||||
api = api_base + '/channel/archives/%s.json' % video_id
|
||||
elif mobj.group('chapterid'):
|
||||
chapter_id = mobj.group('chapterid')
|
||||
|
||||
webpage = self._download_webpage(url, chapter_id)
|
||||
m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
|
||||
if not m:
|
||||
raise ExtractorError(u'Cannot find archive of a chapter')
|
||||
archive_id = m.group(1)
|
||||
|
||||
api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
|
||||
chapter_info_xml = self._download_webpage(api, chapter_id,
|
||||
note=u'Downloading chapter information',
|
||||
errnote=u'Chapter information download failed')
|
||||
doc = xml.etree.ElementTree.fromstring(chapter_info_xml)
|
||||
for a in doc.findall('.//archive'):
|
||||
if archive_id == a.find('./id').text:
|
||||
break
|
||||
else:
|
||||
raise ExtractorError(u'Could not find chapter in chapter information')
|
||||
|
||||
video_url = a.find('./video_file_url').text
|
||||
video_ext = video_url.rpartition('.')[2] or u'flv'
|
||||
|
||||
chapter_api_url = u'https://api.twitch.tv/kraken/videos/c' + chapter_id
|
||||
chapter_info_json = self._download_webpage(chapter_api_url, u'c' + chapter_id,
|
||||
note='Downloading chapter metadata',
|
||||
errnote='Download of chapter metadata failed')
|
||||
chapter_info = json.loads(chapter_info_json)
|
||||
|
||||
bracket_start = int(doc.find('.//bracket_start').text)
|
||||
bracket_end = int(doc.find('.//bracket_end').text)
|
||||
|
||||
# TODO determine start (and probably fix up file)
|
||||
# youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
|
||||
#video_url += u'?start=' + TODO:start_timestamp
|
||||
# bracket_start is 13290, but we want 51670615
|
||||
self._downloader.report_warning(u'Chapter detected, but we can just download the whole file. '
|
||||
u'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
|
||||
|
||||
info = {
|
||||
'id': u'c' + chapter_id,
|
||||
'url': video_url,
|
||||
'ext': video_ext,
|
||||
'title': chapter_info['title'],
|
||||
'thumbnail': chapter_info['preview'],
|
||||
'description': chapter_info['description'],
|
||||
'uploader': chapter_info['channel']['display_name'],
|
||||
'uploader_id': chapter_info['channel']['name'],
|
||||
}
|
||||
return [info]
|
||||
else:
|
||||
video_id = mobj.group('videoid')
|
||||
api = api_base + '/broadcast/by_archive/%s.json' % video_id
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
info = []
|
||||
offset = 0
|
||||
limit = self._JUSTIN_PAGE_LIMIT
|
||||
while True:
|
||||
if paged:
|
||||
self.report_download_page(video_id, offset)
|
||||
page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
|
||||
page_count, page_info = self._parse_page(page_url, video_id)
|
||||
info.extend(page_info)
|
||||
if not paged or page_count != limit:
|
||||
break
|
||||
offset += limit
|
||||
return info
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user