From d77c3dfd027e9af4d44fc7109fac0012451268c2 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Sun, 25 Mar 2012 03:07:37 +0200 Subject: [PATCH] Split code as a package, compiled into an executable zip --- Makefile | 5 +- youtube-dl | Bin 165784 -> 41021 bytes youtube_dl/FileDownloader.py | 681 ++++++ youtube_dl/InfoExtractors.py | 3076 ++++++++++++++++++++++++ youtube_dl/PostProcessing.py | 185 ++ youtube_dl/Utils.py | 375 +++ youtube_dl/__init__.py | 4288 +--------------------------------- youtube_dl/__main__.py | 7 + 8 files changed, 4333 insertions(+), 4284 deletions(-) create mode 100644 youtube_dl/FileDownloader.py create mode 100644 youtube_dl/InfoExtractors.py create mode 100644 youtube_dl/PostProcessing.py create mode 100644 youtube_dl/Utils.py mode change 100755 => 100644 youtube_dl/__init__.py create mode 100755 youtube_dl/__main__.py diff --git a/Makefile b/Makefile index b1a41079a..884aeb326 100644 --- a/Makefile +++ b/Makefile @@ -18,6 +18,9 @@ update-readme: echo "$${footer}" >> README.md compile: - cp youtube_dl/__init__.py youtube-dl + zip --junk-paths youtube-dl youtube_dl/*.py + echo '#!/usr/bin/env python' > youtube-dl + cat youtube-dl.zip >> youtube-dl + rm youtube-dl.zip .PHONY: default compile update update-latest update-readme diff --git a/youtube-dl b/youtube-dl index 8d0d1cc3381afab236486af52f9712c110cfa311..d088bd9532507833210d3d04f20f7216fbf62b4b 100755 GIT binary patch literal 41021 zcmV)GK)$~tAun}vaxY?OZZBnSb|7$hbZBpG3Q$V}1QY-O00;nl7g)=9RL8w zUH||Q02}~DX>4UgZ+C8NZ((F*axQRrRa6N81N?4oPZ)1*Pjz?;009K(0{{R7=mP)% ztvqRW+eVU~$zRd2R}Y{~!m@MBL=!pVjg^ddlGsm{CvR5PazLU#LG$eZA3JQAQ~b6e_qJQ}a6VyiwhX;v$F^<+FAFSEF=)%R(p zUl-STR>X;}!ea5OUN#dz^61ece5c$?MI)l|ctkG}_ku^&Dz3C@Hd+$!P z0N7$>!GUiGGakjurj0Xol_t7SE5sP*iMp=R20>C-aG{d4zErSAMB3Ic(9jQ0D+7>miA+CWMe4|ypDcUSi3nEdKx*Csw;p-~i zM(R~PRqOOh=PDMWKAiqIrK!E8CWWr^pi!53ajiD+zorf!?RbPsgC&kC-mCIwno zdxWrwuYi?mt3h8Ao)PFOu0a!eGXr+uM5Wg$aI+$UVIcFMr~qfAe%ZipkVRS(VaW5? z@ug3~1|*mPvsriyQPsT_(_&X?p43cSrBNkoAO+R+< zYCuu64LxNs!T|c=*jnk8ru9!z=Ku>R4882`1`-GFM-N>`s%255hTo1yJ^g0jfz1ND zWVzHO2t{Q@gQ6{C&_4|lV5KF_tGpl{)^XETaRb|h_7#a3tWRDyRZFb6PMZzvyLG$O zc~he%!RnS8WC#<%{#hcOadit5jYma^e}Q~pyp9uMZ4GGUF>c@ZDo!&N!lo+P^@bKh zBqF<;rb-uW1A-+z)O}?Jl3|~+%zDg>vl_OF(Kb|)T4yU2*ZnO9TDaW6lJ-r4yg;Q! zY7^XNFew^>x}cqbx*DrT+m_4=&sUMS9-%m1*#*k7kKiHN7Br$q%KZBvPl$%N1uF6; zU9y6eZ3S}IU(a=aEvGM4)Hr0(A1^-?bq%CEY z=HPHZS>v@PNz_dOI|0T3!leeE;f8&WPk0C_ENn;M6>JRy+@wurxzPircNTEdZWlSQ z^k@Lkyz?<9x?WZ(vHqw(zJl7~qL)+LPK(<{*UxIbgkgD6 zTZ<2t(e8P8SM4Ci7V#G zJ9z@uL2r3Oz$+Cu9<)?apAfp`rGNoYE6-xyY{F zfmyL`AZP(ISjoU2i*t#xBI0EG~w zNlm;h5gn;!$4dNyPrR+`y+mxWiu@;tQLvz^{Su`m#-lmxLHOhR0)CdI{b@$c zVF#k2a`1V(ojYB#k731H=W}AZJcP(auhe{==4msZhv<{1;_opHKzhEbG@7lr@6jb3 zp%4UDB#|S#)d&|d8X>;OBVQi4MaYAJr5xIq{nfZP-36UPx)kwtm@4@Vo8{u1*AuOE z69(QE3#RIO2ut+D(qZ{75zlz^XHZOV!yHaCv&0#YRSoIC#3?Xf^alnY$Xa}c2DGhm z75x0{RlqQqmB}cCgy4h#dFZiP!6F-Z?<0lu?l3weP<;c56PIyj&|+>*>ZZe?~p^#oXUNF3Wwvu@&${K^yzjxgs>v(>(+q&1Oh1SV`bDrj;{xR6J4;k^C{0L9lO76%L;tsmJOqq=(xy z1IML-)yW`cj1;UG4PmfcK!xhl#71F+Ekx`D$mov1A&3ek9m1&j7M##j;V+mRRO1*h zUV*vCms9oCq3EvxRWH@oUk|U{%Ife%a)kdOAq~uNhaf2E>4?H@%{{zzJL}Upd+_{_ zbPBa?@vQEwKwg2mLBxf>x<)hV7?0l00t)u&+5#Npe32E)%XyvtQ-?Z>OGwA(=sNwC z=K30W2CLo1H;fq&zlaWD`$r@Zf{1bN16=5!o;awf`r`T5UwnS}#RT|Z0#V3aur%?7 z`sv?At{w@NaUPpUrDV2=Q$s_OKb#Q~z)K}V?m}gNzfrQ@Vc<0PtP(wD{T<{mSo~W1 zM!l(w1AK?C!2iJ`s!iny1!{QV7SVt;VK+a@IYu{I-i(OW zkkOanbMgR?DK%-Cr3A;G0nIu1gYCL0^*?U|h=&q=(;xj}0xsoRSK(xbw_r$=NM$owMphB-bl4!^{WJe?z~s;kw8OgN*I?*42AJ}KhTKcvILMYB}0|! z2%^ooD=*NDK0%OlG;=BQSc1y3Ly&q0K<^C9etMRhIL$i*L7-y@0-W%;Ebk(yDI}y3 zIwbN3=Xsfyb6sS|9IzP0^mBff@?$QAdlu{fl?&7svsfkzt9V#V#wbJvPS6C>gEk06 z%V^8Hh=@ZcLS};|QatdSu+3tW62xC^@G*z?fac=dP>j*UNTU5?OY2kRMvf5z<_s2r zA$upfi*N~Or7^67h@yrVlXU6)@B+-h=V-CtQHfr?0JXsCmn{SG(3Nu0;WHN;;@plr z{t@XMW6-dqLSY5jNX`}i5KLphxjHdwHwuj?X$Tq59*|Ym)|=25IZ@IT`CTFgV38(g z!!KXD^;iUVEW(=_v%xv#A;Z(Z*Nw>-pNYgWNgX+HM3T&mOw&H?@zc|;60sHGp}pe_ zIuziOj0mUAvglG*xz0X+5&fW>?-=JVL{P}s9;@a28Y_tNGMs>uTW=a7Xo2-L+`*W_ z!c6}Vvi|{=l*h#xWID+$?Pi!!wRuV`BsuOO;=Lu}3>WrGC~%Gj!_f)F>Zs>uC#OFJ zsOGR(onNI@!4AwxQPe2-c=-9}7l(g3s6QSa{sE(j2ME_l7lLhcuTphlGh;Hkjb2{X z=43^*-$0^cp|K)QvGHMrrZld=Z?)*S+R4GkFnap!$4L#+^rU?rNbM`Qm>4HzH4dD_ zqOWVm1Rt~YcvV+&zmvH{app>>TbI+q%~)@W(bF=Nei)vqk$ z5z7L$A!_t0UAL7Kk7+|=nAk0{p}7IYnQ~cK4pg@BEtSTv(M{t2n_p4v?Y0;~T3f<8K?on= z-hBMI$gl?gPwz#F6i|DCsLiO13CXlB`_hmaC9D(6j`WHmIx1Qk#e)Z2a)sG<7HI3A zusT*T8yCtT(LEHSPZtIJUAu#s!7-JgBK%LtTb1I|Zr$nPgK|^&ZcP$VmBUa?l~G(5t^x*8zG$%>3DMD%6taK zp2Ki~#}?eJ*6zrL>xfqEPy!YiaK8;^0Y3Xk&Nl=rSYoL{-(dMRWYhM&z=xL73Hbt4 zb~7NY@#X#jzIU+%jCY>g5AMf=*CcaXEv!53Bcggh?Q2s zrbw%mJ85@43(n;gnZj=D)}v z^G~Wex+;*N)C)if=iPo#fc7LjJ8Sz`d8<39(O|)UL6q>{le1Tf%IfSkKW!0S3E`}2 zU8L8|0o&jfg7TfPc1?2kq~AcOnMRnrnQg;tf?X=Vxwo; zo&`9W#0@&@hFuB5Ai-?pargri=~Sk3;sp#;+v9@_Z|XjsU`mBPiqWP@4$XTs@+mnU zz{WK>qV8e&{UrWCVSa_-vKtx!<$xC{4-FR%s2+8YOrj`~LzoU-y%pe=nM>;H>Qaff zU*`iG)>;#jG?P_XwOMxSQZAE18gFyg@($`-~!AH3iQ$M#8CrFmL!h6u14%pg_> zpJJfyw7CZVvVbHX()A?Ze5uyMV=d`v4v(-=&7J0U7VyTr=dwhVP_>D>y?~c%%1@-g zj13y`a^nYaLx;srUEW#(=O81u!K>#!=h!G`sQ5wWnsQOEHqvuRMxRZ|gvQhP*Rly6 z!X1$F07SIh1Q!5nic3^G_d>48(6G2|3SM|;Y;bV+C2fs?r4t&L3+$=Tz}U`I!wHkw zY=4WhR!fxsSAF}iFh9+&;w(+D{ED45SQVoNkcuo=u8X$rm!P<~>K|%In*tq1%t1q} z&D{~7GN~-({^@GvT2)>9uiN5^8!kp%%sh7Z&I7JgVxHzYfQBj&?ycQKV|P@;!%XVx zifHcTRLTFr%Qrv#{Py(ZyI1c|I!R<_13tn)htWP&!6W&Zq&x%RRV6wD7)Y=l9XNo` zD{Y6;U~$F0?ybomh>LOy^4>gXMZ?P=T6H4xbl2g}?H1Bk5;@DE(}8_TB4;^xrU$N* zxMw?M86%LexZOF5ibPw$TVS7%aIT}4o$XHyBhbASy#Wi$YsF#SJ{Xf8-gFva22{}l z2&@|Vqxysxx6m`3FY){`>i6LS@JHKmk`ewJopfT3nsjoA! z)zNy7vvQrOU1!CIAiEp)?qi&rZHA@@-T{GGywrwW!}GRPcF(K`@%zqc8)#i64Yyq2 zo?|>|N!Y^-3;g>M=Ou+b^87asAkYW@eMvMb*QL)YXn!X#WnNkDccg^6KJHN<0s0qs zK)Z+=HK?ByV==6_<4LOYMAIqg9uf`AV3OJ`b`DjhynrY{n`mldNm>n+7dyp6YUs+< zXU8fCcIvxQ5KbW7Q;!iWb&)zFrM4U_Vr}C~4f7GO7qCbva;l^_`3W`2C3ignQjM1X zmaMV~ya35$vcseu#`X(fy|Z&++KB5>cwW${Nc#f`j_~iRt(l^hMaVsoMG4-eCjkKk zWx8H)sO4y}W910)zKt5t9k_tGF`}0IYzNgcD{47L-7A?Rx5OQ(d>7|*wnpu@JM}@o zq}MHRkM5F-t+@5fmwVnZUi8xAh6B5hV`o>w>mMZ2T@_K>ZJftz@OF6dh1W@K*_F=p zRSX8<%?V?pwC6g$rGzrgGT6x=z6R3kHQoy2gT@w5pr`@2&Fi*A&A|Qt=0tTAj7o`~ zennMXW+^Ztg0(-NU=IapT#ZK{gPP_QM!76$(C*hV1{_v08M&)t9#5#&q#a`CGEEKSN=1g=C!{v_qFclz|RGj zbD8NCpIb-j12y-fJv7}l(N|CLxW8#jh@Poi9z08n!9HYkna10P*myIW5%n{qdPZah z{oQF-K`Yy%MY8H)HA1{|w?icX4=<-z6FRhnADulM0YymFwhkwzN$L_ILX6({&S5YW zO>@i*yYooNeMlea#qX<)w&31~gsf6}3ei={!G-`T{~E*8Q_R82kh>?;HINTIu9X$G zOUks46_%ZeC8kj1+XD+_+7B$L-pN$AvholoZ?csSxxLs(M;ZjzK_XYGqYtM)3O2dQ z9bvKc#58(L<;r%XcV@aTps-Fs4yHh+M^as`o63b09S3H}`>x0vu>NO7@qPoQK}dYS z{7W7P1Qb~Rr~sF~DIuf*$8k_gH!g~mO3V0c!p+xJ6&XO9C2}>BKAlE~Scp5aCDrn} zEbo`7*KP^!Y*(nmOT{@#JL+!Ua;d=T7?%k^2jV zo1u_6KNo%ijlCW$zX9hxFaUQAH{k5)pQ3~s3>lzDGi7Gs0JcpVDo2y4hOI+;l&XGs z%UZiWO1!~GtADJ1C{Pu1eY0~bTg-+aWxFhGq&hN>ah9RPu? zRSQ0O3^^#kp>?M+vsVE|l`2c8Qe|1&m_yQ5^RJwPMJ$&!aUcdctc4yLhN4|P` z_R~A(SrXl_WrvtgJu)Od!h^{yF-@=_s7`&I&4!c^1W`+RWDrD-F_je8!TIy$X!U|J zkuj$%8vRYKxpR|_!LVtqf{{oS*Nf$#Rr)shD-<-`UjJ2Ra*vz)w%D@LWLV^CoxmXqx*H|NA_EX@>ft<#KfA9^z0i29ekTLdrem1No>+nbPN8 zP*pZM%jUne1zotw;_V`d@gos6_DBTLuLWKFDMK0&5!{sCPyCqm4WT6sJQ_d?8?bVE z*(qe}MZ3OmI+UO%IULwn~JM6H8j(iNc zhr?$*dllT$Wo%_;~Y?c=@G$7ctf|Y3SD9E1R ze{2*W9|*%f2}$Zt`AJBNL0YCVNzIPuQ{ZV8CpAX&f1|!UJd_R`e)95IeRX)q=FC0>z4+q!S3>q<^&z*FBiDfFRoS?NgE!nP zhT}elZd02I?Z^C7g$lu@6w*1^F$gL}8&^yMByZ6-_fm~`jo}2^FmvgX(OFY+2U|Nv zLna(a*=)|+8PjDLOr`p8avn%EZuTR+6{$T`5+sXzMPXn))_SVX6_c3m$IR+MfP<0d zZ@E+r(c=|KI7zv=8#hv2w^}`Z{)aD3lO=w+D$X>2=Fs5-UvUP$f`T{xN)=e1nzMy% z!Jo!k>1!!|QkAK9ksFsVbqJTRJ=f;m9=DQV_*rLoxiq`wm^5ZTEcX&?7e3o+hCE=s=KVl(a`=l4+G!%!54vi znDMs^A;v`!yY)MS^8HTaH~!qjRictAz0y`*pB%*4K@W;k*|PhwYG3t5%|xT|i=3|U zZK|%deCMlk-4G8mh8D2X(Q9cK@S60`8s5j;dQC|eIRNT49eEz1=S+GY6Pv|)o!N4J z!=qnz1`n5i*69`M9G=BdqIFQ4K@y`g+R<*Y$9_epW7^n18uP{+?%uETzrNGyp79Wp z;c%bPdFq)AZ{|7QgyK56|5X$_a?GDaau+wj!S;~3Pol_YEGX(k@%Qg@vX@7d63_0c z`|{mc(zONl>gb)(Of}7BIEG3ey7>u;rLZ$t4hbBNiAxThLUq;(%G4V&9y|9(JNwtR zrjI4al`e|jIN4-x5DJ~@{z+k4~X1hiff{vHA~BaFPG7S?m+I%v>k=g-?H~$ zuuob?`3ZghGeNA6GsRjx?W>g0sdwi4fN~F<434?7V{q?&WvO#fuzyRr8_MtdZdXSE z>@BU}LyjL<&ecJ1$JHhab#nHKYckGDcz2UJ^xyM|G1fM|nbaBQedepPYP%?Z#rH8| zc+sDBpGze7sw~0vyQ}loRd~1bne01A8t=0m`&9O|^qPaW0PA^6)GAV`df;fmZXMSb z(TySKCd(cEg)DT3{?Cprpm(G?bV%Nlbk{XmQtppTDD9VS7_z>6c~`#A>plyxlWe-r zV}dSImDnDfnZ8fCJWFS8mYn{z+#N67x!!D?w13P<;nV*CP)h>@6aWAK2mpc@c|fL$ zaMRFE008Qo0RRyI8~{mfW^YA!baG*1bZ>HVE^v8OR0#kB18;6m7;kP*b$AN^0R-p+ z000E&0{{TjJZpE`HnN|kUxC_dlT2hvel$&W~8wscV+;P07Y5$-E?p7o~}&dF)s`TGlOB)zv-8G*1rnlK96r$dRJU0ad(|p8Y`yWuQNvVsME!HaOl)%y=;?~XtCS1bty(GDM z$|?wXGXBI1^`^rvvq%FZ-geY0pZQ_rmPHul-ENWH4Z9uLGyYg)K>g8s=I2a3clqs@ zrv*ErF^5@}Wc7?{5U`&jGv4Q%&0+Q|x#0nuWXX&b*PM-~Av6VL#95J03lK#URCC71 zJT9U;SCT|it3eW-LeWkWO+ie}(I%JC?JROZww$+vN&P%$>+A%*dc4?iza6rZ1Xgk~3CE!yMRzPOQyj{J9=j@mFkY|_mNg2~ZfEXi zF^D#I@xiuQF)9Eo1c8Tn4&z<+Dv9$j;2A5^Y38FgqR0gm?BzZKLRdIqcS*^n9MvfT z#S9ondUMxux?PgvkEkC?l25wb?l?lZ9L1C5@V3bOagk(pa`hV@7Y+z!ZEX#^vYB6m zfTtS5XqwyYcJywpx^w1dTxbZ6<2gM0MUPFx8y+*C{djuZQ=>up8i-1@Zh+Z@*?@|k za)(-BBCs~$fT2iSc6J@+=KL@oM`b_=QThBx0wRnx+#)PMIYAXz4zh^CNyvk4hgy5g zFF~jo^(HU{#0Q8#S#>ly%Yjc2aESZ_;TW6!><(7=Ivii)0GJvx)H)a?cgLGB;;)l= z93?(1r2uQTfu3f`m`j-f6dh}tfHGWxh6QpwX+h1DKr#V<-A;a&7kmcH%n}$xe5m=H zC1o*AW?V8>Q6B=*S(z6~iXah$mqf5t5EV!%3i2VWdKe5lou3FR{KN$cFmt;d*xAtW zCX98#V|yyt8+J~@@u%3I0Syz4bk*<5MF1ejVZk#1L+{X@tB#M=idB7}N)^gKyPd=W`A z1Tkmf3{-)oWd;)Gqzei2ab~vz8?-2|_*M{MBODVq9L+L{;>riZ4@x&d<&%QRxu1bL zP0EIS>wPjNk91CFzh#4_x>kNH*< z#%igxK9uUm@=DnV(g+Y1a+O~vWfTClTLGWoWMtRF4ef<-Sb(7of9JM?o3z^z?b4n= zD|)%IQAyemXcldhH1dGUc}^(D@-tu6)=~WE$=iQa{QQTzNkT>a81vIU0G* z#<^r3y8?Z1$M)G9m)x-8l$-v$sM6~opRw53U zox=GEYlXa2dZtZuk%CSK;kivRL~G6qjUKyxbRb5u2xEJBcx zACEb+(ORPg8v9~{@M7#p5`aJiu1N=h)zUI`$><_MBA1dXrCbX_l1E}rBGQeuF0K$w zYNbMGZ*zU+8dONhX=Ezwh*vF}xvou(QsjN)g)kMGPA;yMKxL?H{i2k&DKLF?PVuCz z`s8UP4l>kXxd}lAd9~doYCIqf3`-?VIArf*;Y&B?7hx!m4sFBXY4LQbZBEjX2G)f` z)0KO^wkG>5vwpSh(GVT|d^GI$oe{jzoD27g_rJi~#r)}o>x?8IXxNIAaguxrxih*z zxP8YSeRC7?xrb+xhr04*c1JJxoPY7N0^3vr3?GXBZocr@6%W2}r|*0bO>^3DqoJ23 zX_*o^7jAkDPvUv>W&N8GLU5mMI-{F?hdRy&Ti+jS{g1!(ySH`u6uRwg$9?LI{$>Hc z#|I~GD|V4n;B!8oyP9bFqw8p&$D1j*OhBOrZ`+&GX#dBLRnrukz>aw^%Y)-C*xx4L z@82GsNazGSZ-JhN)3{7WF)Yn~U&1AX2;t!Ep+3K5`VG&*$(;uW6oJ4qe-X^d;eVeY z9Pjjy7(KrjZC+Nx?ekwZK`>(A+373q-NCCLj!tl>B~d}&Iy(kdg=?5(xSL?le=cET z6yP3u{tcXzXfr*bZ8^+7w|1XfJ!b8Ghrc^J_`CNlwb1Y09zE0VHjUid#ouT6LnF3n z=Ju|2*@nVP$!Yc`7+o~c2}BaK zo_u|7yaImxb=MrcOM?j!AYR9f%wT(#6mJDdY!&QNoXqh^BFi=p!A*f9{N4MXdhFn* zBUlJNgK^MD&yaBv9znTg7z_&3?F??Wcb?0(-MxijyM%o2`QY~Xw}#xd2hg_t z{mwv_-tOSL9iZ}iiB^cffG4D2z!QSdDfy!VLEoWwa_|oIzN{5r81al_Xz4|MJZ+p1 z6mt3nL?sYSlkW?$F$^owXKX)*+#>P7TjV^pOY2->x@2&uk-w^DTTTOcG7aMu*kKH8 z(8u9DPNq}D8^%qHK!i6=;z=maj}^$}Q1)ofUx|rGL<`?SM?q^-q(=dhC#mQRgXL7I z3DQxQa2`kCMm}V(wdm9UWwDK#j5gBIfBI_lBe1W@gbr^dC*X%raGL++*r+JGhX>7% z^=gmn!5qjb8Uqs8HsLf7>66)yg29}#;4ZPvv(iREiS;dj2=ffO|75?yx=H>%D{Nz zD9L%?jeUwibMJN*c?c;}g2jfM(#AwEbf8xR?og)yq2h4~ksx^K0UX(OX1%~x1~FV# z?62A97r})Kp4FPof-=(^oY710x?aHJi#|?yY3ZnX2=v{L$7e^!;D%xBw9d}nzkV-r z#5j-wMF{862cxhb#-K+Jfuc>=dx+61{3$*3nB>qw?~TXhjAtNon|L6`R8R?nhxmhB z?pYK&A~<-RfCvi#oMK8$>Ff(u*;CmI6CHid+Qwif99BQQjlpiX#x?|$zWr?QH`D~8rU&p zC}0&b%&1g&K$0$9rQr>nFQ{KYNfNLW0MIhZv*}4EVoYOa0mOMd{gPIzX0&Cj>ZP$f zx|b%R9%!O;ovmXMS_P!b$6Q-9j|C;iQ=Xv_!)r5CxJt9y4aPdgPM}-xNWmEy zP#Ho)lsPW5j7zhM@g|KyH;RH#7=S4lYX}d0)goL{5$ZvYh47DJ)m!b^VWso3r`n;z zO6x_>9}ii5m8a^-0l2z69wz~}YrdSN%0Y1{iCB!zcwR>29Ie(ClrkQkbRlHp@fkWV z5Z(E&FE5`kWO0A365JXX76Zqk)qAwwh>&^6u6)fX!$z+}YV5B6JN;I4uF&jPbr25k z7;&0PsnUL}dM6`z)I15tdbk?NWMzvIDJqt#&_bet*uvJw=+d9JG8uVkur7zEr|(bo zRnc-D_b<(=tAKSC6oT%BQ}e1>)DPgpJS?tZzXV~%<;`gne&UFoQ6{*$FDmJ{P>apz zkcRLF(}c6+>Nn}Kie0A;vnoj}s1ha+M-Qdtzb6lk?x$M!_;Z;+bb&M7DTpq$w;jig zFt|x=&^f1T@GB$WpyJY?@t$k8nn1%ouU@_|z5ypOKwN6kB)oc?tB@9`&=}c~w7=zr znFQ*Z3cVy}Rmb)BZKq{2ru&c4&HhHNC-^t7{oE6a&++2HNFUVO&7kNnyPevTbt{eD z!D5yM>mJgo?x9WXGb^7@$jnXMC!;l+q<(SHhyUQ6gbjbU;QtpcjCAnuF{Ic~K;syy zOEBu_FtaW$Y}WxW@U#to%pKCzTvKDZyhb+E4jtN7ooi1v>KcJwERVe6E~UH0b8C}> z9^qvh#3gtEg6{z@Ak?FzplrF}H+V~7S(lnV5jH{PQniK1K%;H6sb~Q`22DZ}|AlwTU=p!X#c%9ePGP;d@ zx+D#)=X?R6svVOO7u9>qME;nSR|ZlcV6m)Sv)T^8YSez?c=yt%)OG({k%-G8d7zBE zQW<>yn)GWWkFAu8V#w8G3kZVh=3MQqsc8#ib03pWzjM)GYn@UWd85J))dHJ%4ML2# zTMD94lcq$gTFI|}yog(ZjfRjF0wHKgS4E4=h|{*~tjmk*6vQ#co9j8&++ho*$yr#m zLs+>=29KJK2?eEsbxh&Wn}=_7jeBxnwIBU({H*b0zJ(*vpxLiD4q<)uDI-c1e}GE49&Pi#0Z?FYB8rm)wVzwSNJI z-j}HbZRQ#Va|Ms_>ISu8Z9x%K!lHHgTd6Zvxy6K3M-^Tac;r%`sjWrf)?Z%O6ivg~ zvabQ?+|<6_*qlkwZ1vHDs~&8VC2>a~Q9H|}EVWrj=ia7%ac#q1vc+D~Wa;$jV#{{q znJjOxFa_?%vTj>^PXe8!6yh#UU039leXVYG>m+ES=9Wus z-Cce_phm;pX_8DM;qu~E2#|kSJBA+XKGwHwV)WJw(dwO;V>&G~aNXmN{wEyAmb0PZ zI#%Lq^&2%`Oy-dlgT}=vb>QZpThjRjR_bBAr36xW)CLr3SXQY+MIlZ!wLzn6`9)3} z6hV-<46&q(-$t7b-l`%R?Fz!cuLG#AF;u;;8n+bF16DCkn)?inhcc$BKD=wkDth6f zXjMoHB!7h+f#5Nbq4tBVSkfvnBxdcP64Js8@P`F#UvZE8x!a^6;1<&-kl$CZASBZo z5$V7m!i6CD+Rs}QMy2m4aH_pve0zu=N0FoEzr6;93pf+s7pdA&O?)C#CaKumSVAn2n^u*ns#EGL{tS1d&Q8|2hh1;R7{FjIHzvV6@dc3qbdmDX%)Kw6SV_ zbz(8x#mlfr*VN!mb5TjIW~b&VdwS(YEgw?kg+b>FEiRnn{MOdNJHt!!_+J53_%qN6 zKB`_qe#;uwB?1C)daD)|^x;x+kK3aO#bNvVZ1B*05ZF7B=utHIddZ8}IL#Zul@17! zTrXzzRg|K~EWTNwe-W8x5aq=OURdg85}b`II1DX4GBJ!(6N}hBLYn6-onW%qI{Q1G z`c=`tG+_>!TSnmM{O@fg27 zBtUN{w)Zh-)Wa=cZTENjyZt>2)j;|`T$<1wOZc4C2OjXa6_=l>#>aG(>s=?Co0;W__*M%IA6XRTP|!^>Uc6L3f|AHk zgxd#&_+}(cunrH_{MyHFa#_shVnQoZVLZ<0i~XuGx5c4J_qCz~ul9GU+P&)ZYpv-s z84%@tT_f;aXCK6OZ;SQN?S@-^CPzMuElL|YR*3a4)y-|w6Yj}TviQN24I$FX^4iNd~p`JZfUAgMh^TMiA)(SNQyo?O#2@gAl@T=njO#@{|Swp`T z!iS3NDhE7lrf-+gpyh+ANm`p4R^N4E^-}dx38l*844YvQjrc>)h5T4o$kR9RGS+)Y zrsE=BaY=?cE5EpW#|wY#Pag8g<)QJ(`QQ;!=HYIiJt*3rjWcqAmpJ+vmUb zFP}QqU-t=q!uR^<DTC}CSv&wa_SfJlZ7D6VXfgH&f30us(bNk=q%(n{=+^^P`qBUQ!x+k3TtTp z;V-aiYVQ98OU+kV3&d55j{yloUNuS;K7 z8o;&n@2)Pht2g+vYC27I$u?@r`d6*{$reTN$V$h0qEYZ@3DcBg{jK;Olui_k=7$Xb zQ3_bZlCv##WX)J|10vdfzDC0yQu3%7&6cvq21$>se*Ay1YJWoUV;D64{UZ5~8vTW8 z1G~$L1Gi;L^VKzn@JfX$jQ&j`!GNr6kxcC9Tx8a-cg2?NK5_nL^^EHw$;ZyZjbrmP zbCqyc7Z4&_1k@-$Yt+=O4j>*LKQt;GjFQTQdr{Lm8l`}Gal)j`=pxY!79LhtX*e~) zpW4X#%Zeur>)JdEjjO!Bbb)2oD$`sSqs+>_CkSWOWdkvpKt9BFY8d3IGzUnKlmMPd@-~`)#%As1vQbg?7p^E6`x~#NVx3C>T|CiKU=9Rs}#A$?&^%ewN7$mN?zh6SmlZ zaFGqqQf7d5))7^HxY%h$>${%A#%LRRmEGGJ@6u+UDS#sn#RWRMA}YH=8aq>Am#M2; zR#lhO)S)&^yvbD2R7)1q49ZHj{oKR(;1*h+0)DQStj;4js_$VnN z95PfGdKk!<*xX8njA)-QHf#r+HSwpji$*Ez@6c+~E!+1F1;lRtT57LXN>2T=0qgnL zkGkH(M-z8_CI$XVVeRF>_OjkJnGy;5gmA=nn9=jGKNSnH^n^kSnQQI!!fwz*1z-y`fOM&?V||^-uL11q z<{ifvc~|yw|xnc^3YIQQ0&0fNW_;2{RXqWuk>={L;4o_2C#nh zhQWSGO)bpGjtl(JMMX$yA->3x9Id+S5{eX5( zMik8aon$c)!3FrHcN~@B42}6)#dLaIDhA6Z43=`Gb0Pnxa5otBg(#`M$0k0*q!sH< zWiOg`8{!JF^5nLzToWrOeer6V_gXHWWvGX9pA;J~$(Wo-WN)LCbojR>D%zWM-7iEG z)#bZ;iPIu+9uC4;cs$3gwzSweiR%_Ni^dzGt<8V7DgBJr0NY2`)|;(85FbSVPBh49 z)d2J)?~SyIUxO&A4d;$Zk|_%B1<5b{is)Vn{FG9lyE_cVr+7!uC<$knX}9DIrv6Ez z1YB+p^F*TZtb_qHZcVp;ZK3VTVwLzP#}|$;5z@)ansPYgPX32Y@^I@%T=r1rsFP(WKPx&mi{6NOSJ+s) zqUK_B$Iwn(Fc6W9?PMOn%dtN@52vT-|3*j4XCLMUrb$hp%Y$csM|Alpew7I> zDdL*L6k~SyaX8-8_do7>$BU@6{`~p4KMbOQiZH=tblkb+4lfngHylvauMX@=;MLG{_EBl~Ot-Dequ z&yA?~i6b?3ED{#+s;#Ocr$gJ(8M%>lro@`tdy($bigdSSzo!QxO&My{ZqqnRsDQ66 z(p=lbYIpER+E&ei&2LLjc0D1@>RZZ0fD^o@_AfycL20*AXZBEXj(rcN59Q#JxPn77 ztihvOokJzCEMtLX_jt^{;mKD7P%LO&5%5XYj(L5MTt8Wt2SrA{_`!gnu5LQRhI>uWULws(|4Pa6JLT1YSAwK_-vpQ=ig>lQ<8+Ao8711cCLYewBdxxlh$2 z-ydP)bh|eSp{EdYko%*mL&2NR1Cg9H@~0RyA@avPXvO(vYrDon8dAjDcUvDnei!uq zJO266AH(VY9)ADv~A#xNBm3@~IpyzJgX? z%IC(4m%YHlpUncie6VtKe$w!w$7|DGBY{ZQW~Ytud^lVKk#9ahxZ*>Z?dsX1>KZJs z@eBsF{djBpAOHNvSLMucV^C1-GDd)EBmVL_BgfG0`=fWy_g_2vhldT3W2j!I)9J;B zSFaBDk6t!P^}TXkTv4|6gkAnkaG|uAlcl)VmGOC+dstqkFK?!rdv0bvDv}mgRlFM- z>Mubfy>wKzVnNyR+Pyn!@1KH^e-*Vi)LNwQrlxas)v*UFJF#B!gj7grJ@R91HEors zPL8Wwfeonk@E-zpq%_IUz}KNehJoB!?XjZmUT=JRRtdMHr{XZmm?U0$rbhR8#Yk{` zj$ifI?+5YDz~Fw7Gn#h?#>RnXoTN!;wGfXBIY9Q_t2r+yPVu=PUx*KUqZ#0LBTl4@ z@`Yl$QI%Sv==B(?aB8*@k|2RX35WvBc1043Zmo!7&Ddy25(OpeJO6(F_6@uf|7ysI zlQE6q$)Egg$E`HC<1qJdu=tqQzQXx8d;)QW#*^t8fN1V|$0nE(d ziL_y+j*iJ_vSeFoH-##(y|j8Rt?;xdvQpiVPbB_80lU;*q{~Vxh|&X)FYT>;A?c$G zRRZM@d;Q_n(ZMU3EB@+Nn+XQ0fkY31xsQ0VBN_;`a<9#Bn=g_!#KlyVG9Zap-fFofi5emfP#CN@Z`6KZ5Z= zZS+~rNt5~QDDEK7!6@jDy%}mu_!?pa553WZws}9^My@FDwwtc1Lb>5o6?APf-s)D561k1gU!PSpnn1D<~Iqxg4YF6sjcraZl zpy%dw(Uk4AchvMWs1~(AyH|e^i5D``Y@K71CQ86$+qP}nwr$(CZ9i?>wvB1qo;Id! zyZ3Hv?7m<2H&j$aM&+rK-!8_d+@|1&;U-g*B@>q{{Y)nDvNXx`}au-Z$su5y1rE#(TIC2c@iVJ-beE$uP@~dZ=;9%f-S1TB{H5B1fxU z%$om7rA8F_S58u(o11jkV7E&Nh}upag(;P^G#Du!_mag1!nV2*g-FgUq@E7xgC?xn zxt~km?h(O^YCwgjl6vQ_{lQJzWwxF{f;dCIOdEVJ{-j0`Tgagr?w?Fe6bJLt4C1b! zXB>bBQyw^B<(pz)PCkMaQBBFthu{{ew(ofuObFn=!*PNET$*)3zFSEw|DdLfScg?H z4d*rZ{>dOZvtemxaBb-zN)A8Y zUfiO+8f}_M1D8Gj_xNqu`Dq5YoA(T<%?Lt|_%micO+`z&>uf5w6#DvT>Ht>8BWs-7 zodgLQeotxycd80`XND0AwI36t&2veV@SgnU!AgCliMo6mv3Wm?}YOd${Y|oM-*!Tk4MVTJM!p4KCu4& z2FEo5Ek15Cnp<+v;3Y?3V0m$wtkB+k^=u|kO2)jAXHT3f*UE^|;BR%xZjW|6J(04y z^b7n_WIs6_(%Bm57Z64W>zSBJ#dl25=r_G*=Px_UTdOToYYygx@3R~n0?=&M8>B?# z_oVm&un5L{2_ibUksDZ|UyQjBDg3#_IloNS5!qUw)}`wLv>DCf>U4P1YDobEXF`n= zya*8T)!4s8m(BXiAm^i+!nu}3JaC{OU{ZEhH2buY)lF&n9u<4vog{cgFqo_q8y>6U zZgbL9a^!>FnHE2Nm2<*52#6y|(JG?9a!$eL+NHVTm4-0Pt*yxrICP{Ad&&O+X$~=p zUxqz9G)kQNixSN8Cch`4Q(!JVr(lQ`;a^b45(TE`6r+WQVCe5xFzmlq>ix>s{uxiM z66@Xc`pKRmoNwe()pJcLJ@zGC2FdmjKUKuC=1;1*AdVWt;!)0SCkk6MCltjQbPLjp zj#*~vTtc&o>|piea*OxB2jbdZVdUr+d6fpAF(l-M;Ji=r$^H!N z3xdIFGm{Xi9f8pKe))3}7>>weaBgy%f$VftZ>XIeh1;{jo7vSlP(oEm9^FBciu)wC zzt{h}Xh{gye|pn2i^(Y*O^QE5=6;Z9;zCKm+5X!J-VOMhPv(F#Tp7{*?=Q0vwc1#Q z{9{sN>zIBjK;gvnU0j&2{GF&YUh+VkYfrn$`pfKbWK&7K4#U;8XJ@+{=ncb$ZnPte zhy723XjQjbVXQUTOGhX0zgu87?$ZOmjU!{CoQNe>6!$T&zP0O!$0nAe;SrGFaSuPL zH)VQBt_%Fd)%o&Af^XM59>!T*ece%li` zJIq2mht_A9kNXj_K%XwLQH&413?WaPy3MyHbKppA78UwW@{j}fmIDv6uy(}ye_D2& ze)Ra_VT!QJcHz6)RK2#QWz1m^xfoB1Vj|%cRzy83L+@OV$~6Ks9FI^s4a^-YHe+;d zw{Xu^djh?PWRkTV!9I%;CU@gjG%5S{02jMtNC!1kE~_}yLmbp4=BDggxZZ@w`oXQJ z?EO1BytaHgj}a{p2JfD%4_Y3D@jeU}L(|uFb@Zw`bW>M%Qa2javd}&3&ujtlfa*}i zu}5W(G3hq0RB}3FHxcjeeYhZWHNlq(k$I_E5bCE6y~o@~f-uaf9LPx1u4XNEdWhwd z^4ujGH5T<1vB!To`}$N~GYdJk7i&GM>3- zd+Ls43g+*{F=@=LRJ7hBJ~B7C&&-T)OyW0iWC#UBuD>zQt$9uf<@UDJ4tKpx#~#T! zg>b~RM1IfV!>H_8Z)N=TvziGoS&6}*H^hV9`vp#Pd{_`AK_r`DN@XTg`Gg0{VP@(6JH?Fen)2pUE@#Se zUH<$Y&$)_S5OX7mToPtHU?w-Vykd*5NYXmC9fC4vQh08g`?r)M?g^}cXr(s%NL}IY zh+T?aN&c>_wVCS1ICu@7Aw?SE92?~>TCRoeeJ0V*M1`aDT-*D`wg@iU{W$=yI z8_fx^n!VY)CbnkaxroiDW864$L+(o|+tA~9Hjp(#WGNSO;uZ{{l zl2e}2UI{cckxZI&4)7vwua__T)BV|&(27q<$30|eY=wE2`hMhmT|lB?G1B|S*=5IV zz9$pQ*r(X^P=^l-L(=G$8DGS_`Vh^w7tD0bxVMr9p_xFwy8kv(?y9xMzyJtS&HPy% zi4ME9Gfjh4`d5quok4z%eV_oZx*Cx3^*5{ zB`$iorK>4c`G; zk=D-T1hJN-2cVX~60G@J7W?sW#Bk1H~&>#2<0fN%spZEso&gy`5PmPFTxqv-AT zKz?71{}($;$z(q}(>Y#|;*QMIEY!q=WtK^OlWmV_6sPZgOvZmVg`7Tl1 zty@#_z_}E{p}3z@I1K-=krn7ABvC}7alAVgJQ=oDcku&P?O%ybrl>c0+R&35b9}iA z?8;DaXpRcZ_|sH@A$$bJB9WsiHC_1?f$*L?nElnC1|T8aBW(qIKm9K|x4b&IHQAjK zOJ1Ls2q;=c9^^av>^^ManK!fDe%Qhf0_8n=vATxrkV2h?5Q>%pV7Honjclnk?gDXN(=C4lZxbIWIefXh z&ng)h`p)4BCLz<-4D9_I_`04L&I{tEy^j?y?`jnU87SSKkBwcI9M^l?;j#kWWJbD; zYbIyoq1NNk+fMW3y$m)t0=_4Ik9UO`GGDjk}YH7l4Yi~H58==P*Qj@KJSE@LH8(2vN^ekhB^6ArP+b)(o!W`8I@2-*7269bTN$%X z(pH8;@Xa$PGEESLM;&5`(^%;z{hO$0ZAGGb0u5@VB~=ph+B{47)r}Q~rqp0yw0JYE zP;sD1zzGTFMq--p&akR!A$c3O{FMA`mE*ld`~d)6ZP7n_y;EG;K@(h`TMX6kFGqD| zlm#mY&05%c*7boIiILp!X^D|68UhbL4CcC7DZmtp*Gl>sbL&a1{@4DnoJ56g)~VbP zztPRnFqbx`NgNUW$;u2G`FJl)R7v*cyeloG0B@1|!kqukeR5AW<`b>?^F17z{TSaK zhdLO>h3cXcK;>2}JVKFhw#)F)ZbHz6^isPZptdk7Wq}ugsg@v2LC`sl`SpsYa9jjI zFh?!p?8*R~UiDa!6Z)L{OXC}E z$HyVHcsu+ObVBcY@c-MD7il%Yx-M#A)W>5k{iS*fMZA-bZ6#_)bwo^m?^J*mB~7hH zV4}%X5k$cT9bYc2z@R!6cdppwyRN=`(yJ?oJ&Cbx&?3*~aj5!yGu; zp;o5e8%o=SEgdnnO*WYj9<54A#Z=4sLqf2&cwwua*DLgnjHmmyjnwWy&QhIfRsRNriac&&v zdAXy_7#IZ&l!eB>cCv^=j_k%9b72;0H&nNFwBdZcCl+75qrdnkTmaFfB#pQi3u>0K zlHEicMqdt3!pl&6>LI1fDEws?@h^j(iI+_;WG2Ks5x@NtVV7zSaf`&yi`~S_WcJD& zo*@j6GM138jbq(cN7k}Sj_J8Wxb7`(5`eO(O@hHCpl%!VG<*Y_Db|Lkrv^Q@4{9Wl z8C9RFuD0k!yDgo%@bHh7fZ?@iXay`6i-}3D1sh7 zlwQgBxeptoQyiESelWTz6|03CnSK|sEyOb7e|4H*yLV8BMk!Kc&H^MA#Ny0zDy@xa zMT78^E+t7~2tEH$LLCi9DNHzg9D%kq1Tcz@Lc(HG#?sg(z{D!I-VRjREij&;-hR0J zuY&0I%8mN>b#=Y?Q=^FWj^R7)rQY>L$^+;9V9~OYK1W_fMh| z7UrB-xX;8fh+&Rkc^RX`9SXJ{HCIr`Aq%BKJDT)Xh_HNW{dU?Ps7)5Cr5bT@uLJk*BPKq5ojw{`ou-2ZZJt$BWm#cSk$SssCvNPpmd2%_ zT=P19nABEHZrjvX1W&|#e(UEml>T;w?ln!vKt8XO*b*kVPa>B|hDykQFzE#e+cVj< zKg~4>ubPB?LBA3b#$HTNk^<@F(@&i93A_B)y}N-Q5Bu+-F$gw%7eL?C& z;!Nf4xt&ZM>77S99kDeYswpx4h}&(ROe|Z&eX&lgawW8_j+st zygqH;!7RB8z;&KtAGG8C6eB1Q0CnOC3NyX%EvkXNfIc%)XL^yt6r;#@kX1w9KP(kRT6JA3>y`{y{4o8jX!iL<3E?a%sYqnb?PffeZDR4I!!gnN65? zm@eNtE+2rZI^IGy?vuDkI98Vn0{eMS2)F^MurdLJnlGV{9fnIX6!dK(G|ihti41P6 zm^tjm-=Ja`HZlmP&e%TQ86sQg#NBe^&B9^3eM{qlCHoy6y^57#;Lb-%{4$oiEtqo_ z7Bg_<-1$xAZV>Ec*_-QnRPj)Qr&e%gjc%&cFh}HTI`jDcF_}P(k+hXixhqic)gbg+AUU%*440+51WHFVI?I_zD=5|ob z9oKYPHm4*yXGG2LjJn9G;~xz3BH3qE+C404dXgknF+z0yz>#^=^Go2!B8!mS{esl~ zNKs|eY?2hNp;!^Qc_P&dRD@laTw8nIUzD*E1)}SF@4p!O{YH5D?9?y+eippNCqM#F z;D{B-6Y(a%Nl)IZ(FG8kM0W7o4Bm;=CD|b19q?lHy+P=@xmEAhi;-far__L%y^51u z&o^#s{@`aD8@j0lU~OD;jT_9sU)WeAq3`R6 z7zp$8rdDK@B&YuF9eq$?Y1$xAlAR^*VLbFhUFP`?#;VGf4)5-U6xVDpJ+|rs+?onb z{_FisY|?LoeD7w^p$;SiEsA&1UE3`FIPVA=3Ex~v=@nuX7g2a82W9rlMsv=Cp|V~$6zRvj%}$Fe*% z*or6WYYT+g<*G1a-RDAUZ3Hdten4&rB;vq{dh>#{<(Vp zc8cmoSz-~o$8n7%_gSGu!5L;@^@;(9wq7}DF~V|=5Tjem`c_{1n^NJa$LNki+na?2 zq#?}y1N}qQQ}e5hUw7y~U{l@I_NW+tT2_H+jyL&edNOz%oW+fOG{{1>M*du#lJb$9 zj@Gy~H<3d|A)gVgkxCswMO$hW%hXPux-tL75aV+i8if8_`gb{#+Ocyl+^KtlOXSQy zJU7_=U0O0iidHFSx|(BzlkM-K04cQi$+X`jPdU|pR}tObg0CToUqk!0Q3L`nyHu$X z6a%ObilTHut(s8U=`E^cVJjHB(DG2UeYAGJL%v-kK15f%AyzFwL^`yX4FQlb5*Zt{ zn$09Pwfd2A`mQ_OoxzyTgr}=kI^b~T0lVpczzG0Kr2LSXHXW?)kU$@-Wl_#m7 zfliSOr6u<7#DgQ6(s`ZIes0ZG#PcJXp{eEjU|~@+LC0wdyOnlVmRO5aESI6UWeDN1 zX}xPLUWdftFY_+TSZ0h@<2`Hs#4%85d29y|Gj!D<$sMB{;moc_X_J1i9*yjVDO2eY zZ0uX^N`q)u-LXC-GrKETuGlKui5`-3--5bHQ|4uK;iU@+E39R>!F)quJ#NY-yQ@g^ z>6n=z167a5yOmtOnW~!+l7KS`EZt_l4HIR!B?dgKi;snRi(%3OzV)E&f1q>1sqYeD z#NhE+MH&5he36d5NBcH8wT6vwol9T4^1Gnd7L+>Ad?5FsP200Z-8R~}UT07MI90cs zra%+(=7k6nOA&yk`$6Vz`FqERxI05x-w7a_G@E$6Um411BCQ5BylFj?wi@0ZE57pAK$ln7}hd zoIR%|I4Q0=1MfL`mdg{W53sX4(L98eS)&mAu=N52O%oyTc?AjM-r^`&+D|=zLVDK? zPJV)jpg zbNwuiD*j_jY+o@-cO~}9yy!TWSX9+v5K2XoDSu=#2>+`Yo|IJc8dpX45d*|b`)veC ztVOv}KkM~w7)NC&MbY;=QhV6MWVI43y-PQW-5to+i~!|^|Kj~cJ>^Nz$agn}lwRC_ zPd;6d#8CJXHfrd=6>Wc?Iw!?dIi)9EXc5o4s)GLvNWm5b{K29qjo)v-=QcO@+{(=K zXzlvDrPt@AVR%#9Z;-c;u2{)PLm_giP*|3<2g}#7Q%XOyGI!0VpF6#Z z%K|772ey0|BNN&0V#PQ9OT&jAy-B5K*%|Y{D2t z#RIhGs)JmY5@#h&DstT!m4OxB;vF;6G}*uw;wY2bXAfMNjPiaA8lXZ;lLoq;-)7IH zmij0;9&j6~Llk({&`cK+U_97%R1ASfugL2uJmn2l}H0wNf&Q!|Vh^IQ^YuYAfeb4`GD zuQAE=)ScWDgC$#EArrx@qYT<=OXLySZHG;29+EzG&WX6(0JU_@S+d2wUJQG~J<4v` z`xS>})DD={8%OFJ$~eMqp0T%#;3`Jkw#X6uuSLHz+py|#^A^{zVxSzg>RQqzi;D7J zaq9>9jT>tDlhNecH;X!GauybIF>7dh9j9^KAx(%=3%fN8*A6g!@=4<7iapD z^UYyKKR9N@5Z1x4NZK}Gh$Ie!t8>^rLf{=so#{i`K6q+Y%F20w8N)$?s zh!Y3MS+WtoeSlh6F}`=~X~De2G(f?+?jP9P zdBjmlVZq9eeg+EutP|p6Ra$0PwEz6CS5E0Zr+a0F9?zk^hbkMCb&3h93xz8mWG&ma zzj%d%dU<>4^h?WTgR-%*hw)Ldjr^Wjarqri{^cJTMvF~fvwrMLb~<>hPtKa~;+7Af zH^TqSM9RDhvW{dHqg8F9Lnoi?wgbIRZV z3-?5x%(w_f=^QJ`0(5OCgK_iQsC#{oC>Y#;p610jj9$Oi`=_SkME98R^(gyGbgYTs z4PVs)^_l*Sh%=IgR=o(|Fn44t;AdF3kGV5AX^2D?yd@Y63F}mb`QEDGy%M#gyZk#>dp2I&VFo_fZjx*uif-Em zK!EM#)u`0m;T!d6xhO469?q6Q18!w1WcCCDVHh^ z^lTjm?(gNN3a>ZaCklb3gKP;ci75ZU(z*CIcRTegkw6!weI5=EQ_dcI(5H+>{V_bZ91bEWgs>p z9g$r6hwiW^+%P6@xY6pM1Iou36XMyKtv2}U_OtC&RKC zM}fdfi}wVW9cl9bJUN(ae^@-|Kj=6HSvU|5y!^sUQ#tE=%}rl zgvM4{_86TczbQ+cpslS&NQ?L0-pC7tkNGYl`L)a%rMO8Uwe~62QDe!S;?dNvVs*#{ zF4x;VLtZUKZ|mFG>k57pCGr$Vwbv+?Fk+M9_q{|gpnjm{JAw{nU9e_{(Ch|;Svv6E z#b{_(doY5eJ6X+>oE+YMYox3 z5SiE5(q#xlp*JS#MELR8o|6%9KeTyrbc&Nex@I(1ECbfOzcKkMh=nCyzGmLu36?qE zS$?u3mCp+)8|ELtN2Z zjT^t(L%Y%d-7!I}4^`fD=3gqwEOPGy~$abZ>5htNN>S`juAkEWH2`?iW1#aDe%()a9tzKRWbGap!*k8q$W_Z{*l# zy@e>ZugC79zcxQQ71QkU=HLl0``V~K+ckBRZJN#B;xV*W2k<)EbZhYw6qwevYALkf zE9#mzf|`nHWW?23%x_*PPmqJ)ohh()~a{q^iDg1q{ybEDx6XiRSG6n#EpKPBv^ZbQM!n7LkV{ zV{Q}Cn#%S9CzYV)t3}ogoEMfVz`f2OM7PDtFrU0`hV6-PRyUz4fn?N){@7qt4*Ich zI?Aw<3*s8M2OdVu_tx&zLA8G^=>wq_=rf~Fdftydp(~?H12zXR-kFEzGTFVG%1z&Ec#>(}z zAd*4h5A>&es!v+)ttknxHNb3V8yFv*8M8ZTKbS{KRTysY#qV;C68&{iQt|U;P%bty z^+>Vok#r9K%22Bc&fnfp57qTp6lNJMG4MW!NHNkLC^=7FeBa>_1_j6&eEN6i0v}Vp zp0d@1E6GF!Wbs&B_WS;lK&SUMRIvxmd9JBa%THLw+{3i**As;13ImLVQgqd_n&tpzk7{4VwFWW zsp+D{CTI~bjkE%%l|Zx%8!9nb&@Lq!<7)pBaht8i9#%t(nYQXh+al>qUqp-c=|IJb zYMQC9ikO#PlI9O(<3P8agQ21<(RkV_ohNZrfjEq+R|b1Q6YT$+afvWSAv&U@d_~q0 zXKB3-ZJ0y&!L;#x=R+XWes1UEKt*TQqL4pOhcdwTi#~kqyd?0XPw2Ta9(}ymWB|&! zpsa2O2OG{XQ5r(|sTr|`b&kR=0_4oe#Q+Vlz;u&*K3!|MutIy4o9UKOKAUs_sJX{_ zGe3t+@aa8t7RM;QZZisi)OQbsFeC4E8dd4`6!Ugolw)RrWBIV2jVu4$s^*ZX+(7M~ zO#Ym}9)D*IOy3LIT*vKEd{5QQWnz!ea@*>(){C=xg7PqsV{I_VxYKLaPU@5%$mm>H z(sa2sgwkGgp}1P8dX0kNWHY?K&CztlnrYqM$M^HXo%WRxtCdzC-e`I6bImEAMe+Uh z%2DKORR7>cG)rcxgoxzpM)jJ=_ICtR%QxZp&#x%q+c z%RbsvS+zF7`&XRNyl{E;M!@ zZih|`gZxh!2pSS6H!O7>>SCJM=ID@!g(})&2g4F*Hq$Bzxpo9u#~b;XEYXWS$@5UtBHzD+l=;pRa;hREvS zL2OHPn|Xg=D&B^cPytX&t;K<7#|XBdhZsq`4TrWzfzm|_ZhUZK*F(bSa)TW9uaUro z7+t#rQ9VIYKg3_ol+?mM#Dr8~B}5|Q3^C>}TGx@R5|t2Ot$ z%tzIZ$ufs~>$$j!Ocz%Li2~)hxew-y#q`xr?vmPv2m*Nm!$Dl0@vuKGVta*;Zos1~ z#fQH=t#V<8Kd=ANrqtq$1+)Vm3MK)5H0}-3{gDSV zWqTxt$XLrG^qhh2TbJnTJ|h7rc@4aIaTEehXbAQtT1tJs56DUy{2tl5rI=$*v3ve4 zsU}*P(J~z{z)ZjH73jks3mB2R^AYjvv>v$_O-%K5nlf0c96FJ%M7$4xq~Jdi+e1k% z+NMzHDO0nWGvDWRXL2h1syz7jrZ?&w+lfwhqRdH= z&J;jRN8cj3NJSIf5Ea!la#1bFxfSsOEW{*5_8foKDUF_u|Q+qy=;ql_X>%EYW& z!WiniGH__impp#&gsKDhZ0A(qq>#_IOE5 zkj{GD+K5?+@`$N^Oo1_RVAbZxYsl;*b2<|M0c`%(THR07zUSE;Y4>^@Nw=1HE}2-- zbGkGVN9y=sV)QirN9@w}jPWtAsLkHa zE_Ohx_V88R>Ze~e^cj7T;0%^g2o0c;UzJ8!?MC<EbrfbckG0{M(t}j(<^S8|C^YT$l9{n;J_sSvz4qr|~Q`<0DR4k0Ai!^-M*O zZ^TkCO?Gx2WDw094&xDd?mOqo=rexipW2ZOyERoQ?P3KtP-pONMieGL@b4w(JbonC zyB2U&`_Iv-Z{gGksrk?eprkkl&v@!-5i6DTY8icWeBSg;da!NbP2LHk%py2`iV^VP zJYmqpJK1THQfDqG^lRk681|F4lS{fj42EkB!CctXT%sYEmLZnVNL8iAil_?r$mQ~h z*w@Y?>E?4sjg@Sj}13KHQAe; z;(n|%Cd8ic=%iko)p@NoG+Nq|E&#dlp?x15GL!ekuu4X_YuT;L)zLrcS{akaXHz8-ta^$xsW&k417l!M1LbEt$15b@dl1pE+(n>$7TZxQx>=BH4Rc zibYIpO~I3jRg6;=Ejw{f5DSbG6=R~gs+VM)s)X@YpvY77$kW7SK6q8h_EN~8uGdMq z1E1W(3O5fR6f`v=YfjD+V`$u{RP9vnfo@-9D!MQ)gq6m8wM6}#Re^dB-Sb)`-fr{%=k<=_6 zQl&Y*@0#5^5(lYHPr~80&WZJ!KE6n6Rn?(I3A@o_z72#H9f9PqZ6xQlXtRuvGVOiP z|05zDr$cX6gLQ(L=VkG41^%k_EEYy{2>AW(_}$+2upR-AI4?W&**#>U{6X_<5R`{g zJ**#ytE%XuD^aN4x7KAw9tNSTSXscs^0Y?_mk6z;{0Lwi4k79gjlEBeX8cJC*j1bd|FfP+BP6huUM_n4D-ikU>7l%wQyxd!lLY+m z5A$4&SDj-)x&qUPPiL9U(Ot90E}Tb8^<${~vH-Gg69|Yt_05Wr`7Zbd8}obvP>Bb* z(d4l2V8N$!j!+t-N{64Nz!Qkw>HI;#ZA}?0c(q!Og@5^xJ{wcC*Ng2s6vRCT=Hy8q znY8y6_&;jA=>ODs2~)gZvQR)keVG4e{Y=Tx)lJF8(bU}4)!M=G|Elp&{{L#cJT3Wz zqfTW0t$~m&|HR&sHM?eRFi@-si{%uy#2KubHx~?(Z9W+w2b9s$G~i%jd0|BjSYsN8$G>NH)XUicF$>Jw zla`zuT%XYo&zZ@J$M-9-aBuWsmii3|rulS>@+C-Ub{GfL~i@&i?G3{4nC=~P@)Up=un8AcjYTAob z2F-TkWhUKR&nthB(pXKoi<9n~#d3GX>__dknZ|rjATt~U89*AlUh+e z?<}@7M6@M3)4$GoE#eyOON_qrGwdTrE+nqX9A?Q1(GC>7uCF~5h-=3#x*SYM zB!l>RjS07N#)4tf<}Hm$6UgAf4q_)73XF@URS2;xIn`}-xmKg<*xC;?Y}|*3qM1o) zwOw+__<6?5Sdjo-R2x3>&bkezq*El=_^-)fQ9-OeDfu99 zf*`TT9%RlCCr(C_j39IJR_dKn#56VPGGqM?qQ#MW;zRf4E=lL@-gf-n^K;AiXR}gA z7KN~M`*a!D^p7VKGSCpHFo!@6Gc0kb>|7tS_iP&!<^{Q0-eCdCQ4*DMNclU?mwur< z&LJ#Na_FVdA2BSRsgNh~#Ink+Jw5j_Z4$d!GKN|}*So*DNu`hE$5b!S+AJrAhgESv zV9iPf?qH=@z62FljvqqD@(PJG0dQ6`<{E^IEap&2tZmb=IO>y{cz?AACnsOfz$9gX z5Z)7;A-7g*k?d(m;wHkjRg0*V1gXAw>)VgR7SrwqUB#VV3M<#7o6kc~^>9{(m{EhA z_t>(TM%U$5O1&hI{5=_i`F6|7+GY23HnD5cu?pPz>Jkb0GmH|T?dx_jyA z|3RqYtdYV110un8T z(azhp7|QWHfg5KRHw)CcmGX;=h05*3QX9|auAYqJTo}E~w}&!-v~p6Pmh`UFb<=|c#9%3lb$jqG1#54pUs!9` z>%cqr$i>lXb;wyA#CyFy2C;)cVH^?lblk3Q)2|>;^)evq58U3z4kHudcc4Gt0KQJZ zSrhRgYwoq1u|D7wV8ZAly6M?ClBkluYDtp8%w$R3(L+j?38azD<&E^AD`2~}USv^x z-#6GFakZwHpvnk^4oURFtcW%VZCX5y6YzMZdJAaW0!nRL5rM25F{c%PnVW6btDB|LfapEUiLJ*Ul&U_NLvpG`MeA?*@@_NUY%~>H zntNtuibTcY!F@Pp;1Lrg*KH)~cGc{S$4?=s^?U}6A0(UcLJ)qaIv;hIR?w5X#j0K* z9PoG?<}tJ)r}BIQw0&ZF+7@!e+)w<9@#rVF(;e?YJ|Vh(?wCbtlafUiEHsF`v#e~+ z?~CMx)iuEL(kIJAd^A5khHB0FZcAsJ!0*%B!NIVhv$LvSC{A>IJ!g6Z5y)dSR`RGm zN;gSw<{If2K@{$TB*v8s;M@mk2e@7uZcd1*KSuRC__Lb@1}&X?KEmbom&&(#yLrIY zROVP>h@&oNnoFJ>tBbXcO2J{_oCeHQy+J_20Uk78Ni6E-Y|S#y9fb`oJ&wB4?oSx| zJbi?cL-Sb|V{-|qMk1vwvHq&y(mee|!g!Hmh?p;mowWqVALOXxl{ED9= z@6`)MK@9|I=!@69lw-%Db@6WVf3kTqhHc1WZJwsSR9r5DVv>B{eGwOe{oQI>(pha$ zoT>q6NH9!B+k)0u-4|^nNhQ?UoLGOFQS!Ni&ddb-_XO6LooG?%D3;3vEH(T9cVjo6 z1ISq#zXbN?p8>@TtW)nN7@d5`YtE4?&WNi~IQ&Wu@jRt#UF(3^)|&GdlznIbWAcB4 zAD%L&)vbOzsy>%EAiz@%XbU_=02X)4Y^@xequie3qS}(v10M15CHU5QZ(+QcWQIMl z$z)wUeCI5rA1K7`cM-$n%6V1ZQkzH)8>JK5ZWLN)t;qO)b$`M@SnoPKkB7#g*BzTB zpjfUZUym(cFjoSr^rk@pz>C3*60C8dJUKT!lVorTNB8vwa$i^?!ORd(S(~%a-7h_tSlW#Xar{o^oITar&vdId|kIV+JALF&^C@Oq({-{eN$GPFK69RDxSAb zn44M>n%Zz;!pPRpCXov1$!9(H_PBwhrZkp3EG-=7?5BQ9b(ssA#&H%Gu zi4NdF7)g5YYwCFGd+(aPPTR!e#p3;N=BunvXhOY>8%1dDr+#&l@FMXxKxt5xmWy7a zZ}1F4BoH56cmM#8j!Y`Z@53j+;|t)~hSgfz3*^HI-vdXSy`7@~S#v4#ls*$y`;TXo za=adca!FtjEjM$uVO<|GRCmp1pnZkI(28u03&-et^>(p ztsW2hSh_U@Pz{#U(qu^i772wWUT=0jl`lM$J69X`Zu>I2>s`p7L}IGcy0y`03?|CZ zT8aY~T$r(|+aeU|U!?)V7lp20Fm13UU0H4+8YPe_DK;_6+%Z6;`zl&35LNXpg8qw! z^q|c$C_f&HSj0y*1CO1wW6EcD`Z8$R*4<2Y12Oij(21rC$0~+XWl{teLST>k7qTlT zsI-SFtbj14p`Kd7^G!yXIa6x*$Y@()=@BP)#UpPOPUmE^A^cUKk~v-495a` z2Ny4f7Gb9XQK5{0ou@uq@a=TEx$kRbkuv}`l)ThvG)9ayQ;e00gL?;yPNR(gbjyEt zZ6Al(Jg~?n*~X1WgGtXPSx1-0=+@4G4B(0sw(<}!lmKMmN-od04isA*mZSt_4=Os~SzT9^HBxwq0{PLX zf=$oOTSj(c%T0VpDfazm6cx9FxH+PU^dY`zyg$g>EE$PrwbWhb(P8O}@zY{c9Zc38 zUo+cshSIHGIzixCB28(9=NJ@*upB7Ii`P5glY+4&?O42^{?*Uj`mk@tfPp}dAJ@wS=8dm@%-hIO?`m(*j?g*m>9Dif z@CH?LVn~uMD@^sT{UFxq%DKvzcmO@^^)lyvEpI*hu8rW z{JLTfWfdecp01G1ZTBIRJ~dy|?y6L*68^%lX#y~SD`yhOhI0??J_+@ep<+rp zxfk5ajhQf^SeW)^6UP<81QKiD`a-I>trTSpRxac2TTlJpbRc1&O_eVjP6OJt zctBrXJ>g`t0ZkhfT%YX1T0hZIOk|w%wt#UKe7EFnpq4C#V@U?csH|)zk>O(bq-0>x zG4EC%oVMS$ySU>>Wgs=d_o=Ux5Y4BUcKS57D`(Iww@b7|CVm)V{on^1)aGfRRV_6I zJ6KD0`-3eUZYF7K2Es*AC&9rt99dmltI|mwHuyw5L**9AaG7q@c#Fj!N{k}Cgm*ul|)+>_I8P9xmDf{^MJeur`{XadOzyRe@P3Z(I18W%>1nvAfT15skIA30lb?R-K5Oxx>UldgcOBNHm?c+9S0Lv{-lDy5-zwAzzdg9*(iz-_-}!G2`ldyBNl;mFUG@Fp zv>=gQiSy&d1}XaaD=)95x|tLMr;aDI$v#HSJgG$0dSwg}Oajw7UIxa!s%wm>!WW<- zqQS^rpbU(^+8xwYrq)j~LlMogpLrEvs9dLTt!JtHFCKowDTXG`)$t_Cxu}%p zL`}shg2AN`d>vBruAOi0Vua0cCc;=@yX8Wv`5?#St(g3rV3JuJqFq3)3}Mw@?N^i# zCEt1UAf_QwM@0(z*3Nbwo^Dyh0vFRGCL0?&s(0>8rL+nC)(9 zS2qxK+K5+}X`ZbB9ckvcytum428Y779C)V*k}f>BuBzxu>T+#Zhm6We=>w^@FWses z!jovgJ(jxq?Bn9qz6Do=BbszKc; z%pcEmb$3notlwI_x_ddWKz(Nd*p_R<(F2RMPYDr?^v@Dg639e834b-NCjk3m`@B~s z@-gl@*HRMow$7&9tbntgNJka@G3~0)EesNP!Md!7WU1Qe5~(bJ`^1st(`Wkr%Bf`j z??>hv^%?cFc$&M;aJ^Vq`Bwm zT3&a!VJJIpKL;NE%7NWFz#=8JwOo|^p1`gtN%9GjRZYK26*kuD{^u>w>wbOaDj-cQ ziPK*F@aM~0kKKw)QZ@I+hQEqL=V_}#?)%-%kN%5TIq<7!!L7#}BjD(Z;dOl2N3z4F z>={VEL#s4!+P4{w3yf7xX`_ZS73_%0&@`&I6<3upsx!-9Fi5H6yeUp{jkBcM)cc!&mpHR{D*Uy1aui6M&QosmZ06{}3u{y;hU>&Gv^SPNVLD@ncBg5d}AMCR1R+50aP zoWbj!k)PiwiU57Weeyd+X`Z@p&!QX~Na$6r6)Z?whm5Zxg>9A0A^F|*7~1V9JY2e1;oSWR9znQlKUmX7|VSgIY~%- z`050lM3sTLXfwEg2Mn7w(=|60!hCRH%e%(Sm4;sAG#pl-6~YGLfb^>^Yn78{T988i zN4Rf|WLg>x4ktw&S#L)Z82*Fg?2*W`pr7#y(@nwdS})qKQQ6K1KC#NDr}85KjWd?5 z?Y*Ek;8)NZfiTLe?9m4EV_I3%=SaIkCGv3%Cxx=3`jU6_(rLS2uUrT%qQYtpxCgy{ zSP;$p7*At?xRgDA9=dD`aCCyq3mQxe^HwWc8+pKX?pDXDNc4t z-G5!}?I^M@;;>@V?U~*9Z1=|&eKzImYB(!0(e>{BMIkZ8I+q155fl1>OjSe!v7O{D z2EFVg(^zy)>_(T6zZC1@6FQ_-8^gk|`^FLUL9rjEW^pD4s3j;L>%*xW9-)xrFLF1o8R@R6O)w$EYjf5HAB*w;02~ed)AL2G)UPI{qoL(pYR7k7W?V|OxBn>jVOUZ1XDUJ>CGWhG zB^xL)y`n?-%>E4F458_)qgwcavrlQE=t#dkp?DhnJOu`PlN6hLbyFtz)59W*seVQI zQ-W76X`X%hrTBy7A&+dapGr#78J4tlBO;s*jFcfdq9|E(Z6i=$f;uBk?NW2UKsu_~ z`l>w!a{d`<#Bh{GCqm1bOiC>S`>;s1^S4)WL|1Wrp>)QB3}N(xE2YqIxG!s_#=ZCp zFXOD|N5c_c9NU>9U-h@^oNZN0FI`!x4vjzR18B}%laLsCt;3s~w-l)|&x{cGYVPQ5 z`q6%VUJX|Qf})W~|ChW7+fV}gd<6$8e z^62^Jad+SC;_^t4a2&d);v*7LtM7?Ifr}=FPl20%yiymxcAWlm{@u_Tatu{&@8&pwdj(gUZS2ZYcwS3MOx~9J!n#x)op4L zP~}2&yM=cBhyEJAvJBiv=fwvEjnaP#{qz)OovV1u=O@3d9;>lVFCa&Ec&L^!s)Dk= zK>tfV`YJI>WSrQxi|c!c3>!mO!buE@d4D56qO{3OVbOMhtq8w#(0AfccTR@1po| z8;3(_jwmWx^8#GPaFLlROZn(>j zlvbVdR*J>;0kgQr-(7ERzlU!|l07XeDMb&Nt*D5u5K_mDGhPO$?_HOQ@E|n2%LWE{ zp7G7wobsY~u#w{z=$Fw{!XACFDVHZ*JG%9T(foLgz*QA+(qlZ)O>>)mw`r9bL}OKB zzbiYpL$4s4l03$LyeAtg#@P2JJZ8Zw%G-KGfI1(o)3T9{xple`((bs@<1fWyO#?_S zI+j8xI~B{Mf0tB8cVFzKYq6!GM+QPuV%#z+LFTqqwn=dxZIH9XM%>mjLiI&B4GcWl zE3dqQ_x~Xt8DmzGw@$B!_Vq{PN}!!*{2IUVbUV}E+9P!2IweGx)&+SVk=nGD zD{K%13>1WMpQhv~K%oeSpN=7Mx&5u$+s5X%*?%(xP=a9~-7g(+<$wb{(&>d}Y=z~& zug+b;wXEe9proL~R^T~^=G01#Ny~}An#CqfoO^0;pwc^AWds1 z;|&t45Vs9Iszr!c;lGr}*Uk(5W3Vcf0r*g)A+`cet$>y7*bf9KtgUaIw-5zx(Rxdy z-!ys=Y_FIZdiY2XhLrDMe%!m}m1c}}ypm=5_HMzW4~!jLnW~Azfp~4+T^W);X7U>T zEb-XQ$%5Widm5R$vKO4o)E0oV)6SseA-7`EVOV+7A|w>1dpn=$l`7V)3i)p17kXSS_eIwiawCRw z4OY*1XN2W)ZoC5xb=L_E29UYpZKLA-y^qn1bThb-ly(Q^r0=?i{#%7t@0XSm;i(qm zc^WJgOg0ld%n&W$xo~zs3!+m?>|reU%f?`SAv$M<2oXOc8}<>`<~ps;xxI4o9;}ak z;CI|rcX5?i6I}AAEUUB<@o<#2!sgqksUWuGoV+2{8r+LZS+0*DAd3y0{RYKaYpW<= zc(V3nQ6^A0O(|putiMfn(f2N3QauX;j5c7Oa_}p%S3KN3uYX+85_YMdEHD^NYg52S z{d2X`N>Z&kuG%DdQbPRvBmK%Tz8FTo^KL~jD1=8;orma@>m zPfpk1G%vrj^pWeX_68VFcA-T8)p=ly8p+0tjlSt%(i6`;icDMz1JKH7?je2AC$!Uj zj-9OmcC!E%{@^By+R!pt^-pGbZ%mK?xfO0Xq$j_MrCZe4C*3?#PO1!knFn~ugeY4z z6xz)#1WDYg414?f&?LTgq&*mm2?PPNuPNmZX8FigtY|yfz{m^4D_N56gGd4#JsqQ`%a2YN~c$dsf4Ac0}S>WEpkE zt7fgejE#~s^eMW6W~+3O=&V?HMa~DO$>t=bTT}{d@~21-oiAQbo=*lvYsaZ-C@pcy z*Bjoy`g$^V3qQU-t#^&B*rqrat!UM%m*>y7ylXA4QFN@HP^_`vk>LbwAKFoI)FZo> zOb77u+4<-O)^K9C?bu|y_wbIl*Ur%G&DV*%!dzhq_*cau->J1yaxXkg^$Ga(6L9w` z&}SY%zzZcul<-rg6JG&e+^ph-l9i?*(T{$=i(N#o{cC3 zqUI+yMV3+Q{{9nyL#S0Omfa=&6K~^&vZ6Dl z?V_C8&T(WoRyh_w?nUhtoPCAl$rGUhtDy5H5oSpkH&E-Z_YNW33>AVMDtwv>_cbQ& zwE{<$TAvy{r5V!n9Oaet&0rMp)yxON1G%Kwj6JtK(NaHZ;VnX)YcCyp)k%TX?A?5D z3+4*<69J-{L6zTvnJPO>zoChXHD!2oKQ+az8xIeG zW41nKPE6I$;Esd6^P0w;)@wPZ5o;?b5|$3*ock@`+{|n_2#?k%QZ2}D@;Tc&B$V*& zj|u;LDGA{*+1s^%Y(RKThv1}P3Ryokp7a$&6&?in#@bX#w4TJ+0Wh}P>g0>ROU_eS zNKX0*{JEHyirXW2P&pV#SZG*<d!v{>dUKemFntP!HBg{@g=qX~1vIp>yRk?0vVhqgWe%EQ76W`G`Zxke;(Vt0sXS zK+=O;P_++LDcaK#9_ZnLJrXoEG4+LL&~_QI-%G#!8(L6$Pu=?<1n_%G_x&nQ^rwZg z2&d2A3k0@Xt}%b7E;gH(eW=lRV)Wg&GzL1kuiO7NW*9G5ZILw!xBZb+A2YJEN|1D^ z{6!+U&;ZrC`?ZTeHIZ-rvG87${XiPs;*|i~jnZ>yskl{e|50T%I06^Xbl>`3BUfJ^ zRkk_gr*ES&fnA`AE;#VsXx`3++PMJ$L~YI)CdN|WdJas7O-%w4MMn3XuDz^1lJZ}+>` zCsfaUzF5nGfM7UT-bvF9&Ng1bF5du`9n6?-?h=7F=vF>TzVNq#SZFi)-?M16DZt^h zsz|66G0e7kM3Z~kmeRpB-*dth@f0KEFP>BLRbnrKaS<((D|iiZku3+ykIHUn^ddww zQNo1DLtzl*(v*=qi{A)df8=!eubsueT|M*G$=H%+j7Uv!+_%W3%~j%~qHIC5nHO{Y zhmSz2EeILU?v8Z5(A&< z&QX$M_602+52FoqD?9d495GgyT?0ZLQ?i&f{4j~eV4O)O@Ay29lGt!5Bk!7!5Wb~Q zi{LdKj6uB%9R@uU6X~deNREPVRL$*v_=0(=UG+-c&`g;X3}kdfS7J9Vt)B73X2WrI z1p*^;Om=%}_|?okOeu-QRNgk-S+oHwwz9ZjBeRc-7r$aOeH3r=w!Y&GwTA+Lf%mu* zTTXUSbT~1?^3#c-*E-B5g&dz2aA*iy>NzjV9&Mjh%IgNHZzmThqJ{LH@a<9gk5ioU zv8G;BPoi*_$nV+2ImBaHtbKp4i@kCl=$;YQ2Pqn1bkFm}I+3mbplLL<>W zq+afNu~ErZ&KJcu{m9*%Fr^qbXPcZ$rW-YfB?>P;-Du}B^PmBXNl%@;bvNx2A&F_s z`5ybxoOtwiCR0CO#_#u)c5m!2dED&ypa>fuZUiJ_Jqe@k0{gxuv=EeH#Pb#gJb7Nd zp6oC1sSjDzQFJ_Py$4lH;JDYi z-WM+L6JfAn}l zD(WN6_aYgUC|c2Yz@Cv`E1DxDD;pa?Eqyd{9c}y>ls1S8tb-I=zH$j{l1^n?_-W&v zL-)a~uY5R;HXJYQfm8?QKIH`4wLPhNoTrOPa(1;X!_kmOt03&$@@yuFIGCdAYG{dJ zUrk}wjpR7E`G$WM=#pBT2@0?%AN~<)GOzmS_Le@mU2>Ux2+#>l`*@Lw8<`fp_TK+T zA1ZGmUgOxkuH>a@_BNPWgk}kI3+euMlj&&0wM%O1VQp`rr`pm@Cgt`yX6~mgOJ3V7 zTxWR*w@YEz=0wJMnsf4+FPl+Lccjet(4SzJsIk93q^s@>d`N$&H6`A*W=$kM)?go~ z%PjYR2#jN#0&&f7^h-3@CT?zed2j$$}` z8=-l@i=fEn7iC8A5;JU)8>`kdw=7t{kqLT+rYo`)5ZOe1PmecnSeuX^e8mfoP%;PO zvpQfOc>U8yl=ox$q#X>|&zrR5DkV6Wij-R#T^~26>Z?U_(eIk4LCZ>)_d^yLmk&hx z-vS8+=UDfvU~&1BsMpY|Cq;&Tx~tT>?q+SC_swb?yo0NZDo0M!cF6Hg{}%t%o^MFl zgk~glgUPjK6PGXY4+?ua*0B5oy-l{vZqJ5e4*9bu($k9CQ*|R7gP+%PxC1~kIVwan zJW4UCfobrv`~3aOE0d$5|CYX`$;ZF?>iV;$g6One+DS7w3N+;P zX&wHK62bD+QFr;f~5wu z)|@{gov8jI8W;!#1$hw^@|i93UD;ZZ#q2W;_?*Mvk|Gr_U$~>jh;+CLK1ZofOL`fa z&R-@bZ8a`P-}iojZl8f#I)6x_T-$gbd(KEt^bNcO!XQ}6|E zYV{*q{Iv84RUWZ{a-G%My~!f_>V7P0`RDK$HLKjl#}zrh*u3}3CI<2TWvVsj?geGN z^j`7K>Z9inX9PvqW!p`G>gc<+d^NX)7DUc$v!I$#(VTsLl~U(vDewyW~YaOHJ*?r9b%O(6c| zba8-FaO?=<;6qiF-+D*@IN6jV(IA@@f9EoFE8EW?+y@*XM0o>L{Y^jp6NqpmWL3tNb!g4vr>vZF{kEX zPu_--qNiFXr2eJz4lRe%gd}|*&m5aD zVKr!uahP7Qvpr4xfzX(;rXA&mITe)b0PhA-uY<)D!^cg)c(0?xPVt-f$*uF{*?w-8 zf~Yn^5!bTi-3DVGIpNdk{$NggK|fcDlaEU+^pbZVN|J#SO|PX5m+TyZJu4iyG`=pw zpBwe$7UzuKJ5DEIV4VfqBYUZipFfG=wM6LjeGV+vMSVC-U9jpOF@&K%zr1jneI<41 zFP$UOn@Z3ih);_{UvO@!SKZOLu180>@z8hSxA1K39$t%k;Ayh&4jbhEG}fE2 z)@O!DA^seNborHZ1<-S=fy$#{(2WzPS_AwIOv*5)T9w@2Fv z&3MKIlOt`n{`78qhu*t?Erf#qD7Dby!O^;sDDHP(({@4-apxXYxhdQBVCUo2h8Q~jY@@p$H=FSC-7bY;)vx{9t#aDzl2>)3NOYfiahgwW|G*CnryM=au_C%kSEEgZo=MS_B)=}mJ1&Tg!-fKD0?Zmc^^a|$( znojJQmb$%i)A==zfi!Cq2rs(g8ro%t#XgW&^W{W0m~N2Xgr;AP>s7LxeI;cmMxL!3 zg9qCiBbMG3!l0qqVD*XZWV04bMZbY-Ui5{Ctr}ZYar;EP8=kwkR09Z$u?l(XJwp4X zjHbLsP_A`__yk-oU4*wRw+$OA+XJ=TwFjpB-+|^pILlJJ6m(YyOkHQeuha~8)e<`w zj>eS~-*D}Xmr(g#oMZf|1k z@GsN57}U{E5?b6n(Eu>;2XFwuf1fx<_)!B{uqCFKbp|57f+DOXoS3lQ6cZxBR4qP@ zwZFo@7jb2WlZ&5L0?$1bf2j@@vk5%PxY_fD4RGXON3Oi!5MiePe%1cU>-Y|H#y0`8 z5Dn`kHyi)~5zq=M%fY}>z~Mt{gbeV7)^U3rh9{jI02ng-J3fL4`Wyg&ftH4#zpo^% z?JUF{Jsj*DP0TG^{#!CY0Kh%}d!7G@4TfTiqnu`^6aj#vESUd=-GRPL#=+82!qd&g z#LUgn<$wGhp;$Pm67j#9;-A=mkLLJiZC{~{{C~uDK(T+vcKiqIzq@1p6PxS~_=j^Q zl=ioC=6|65yW;Xs+S^ya|F6G5fq&~S{{i^#b$ZKb?`<-; z9G&)iw{9m(cbAe@zuoDbtS95q@zNK3bL&>4(Hx`1Fl#iDWFx8Gnyb9(w3@?qKl!FP zY;~H+H~rqv&2IX0Wf9+=G<&_v^29({-`OXzDb8CY4dWaU})$VDh+Zha!Z<{CSS=z})=}_Lk{@?%qFzqF;(P#JPR9^ih zeGBMboM^?D!%njGcGT>44Aktj^K+V>NYSU#XdhsAv;MFx@YIN;Zl{&@vJ^<#NGiMI zBRnTh`e)5fuY#{{(_x0@^y}Kn>RNqet-kho<<_n5cAsqRZ8yGq^^6ryN25WuzPvna zp4U%0qto$Gz12Tk9-iD^zU+@j_J`}q?POQhT(VYw5L(ho-$|uHYpvHZ z(w&z{GfVvUw01*Np%50eV7&2DmYk#ZS^sU?PL7BDvt)FdCaseWUbV;FG#T~f4e%iq z{Pi#eQIEQpbsL~m4TTH`-ortW$l$9DP{L;k=nQK- z!TL;wX&0-uounj~tX=_!$wuwQY1%{^85qSc;I!XiMJ{cfV4#)tr1HG~bEn&FE`L^E zNvi+7y87qjS*JI?NG`s3*m(Hh&&lxZIzH7`Ysoih>rHOJ98P*_TVJi?TEP`KOJQ`sa&x^>0*(kc`rc(K1=dVsijC z(Q1ys`IayE<{mIse0$cFHojcEyR5B!5w*6o-O~+;&7Oc!Ehb=0L`_`iS+jRC29sx@ zw6`?Q7Soju-VB{VI|`l;-|S6i@Osc}rPa=2l3oljn&iEZfC9LAg!W8>(ro4vlj3yZ|Wz*{&-Mb z)fO7B_qO(S9_uP2sFiu*Wu7!Aj|T^Q#=rhzna|7N@d%IOUZ-_Btgc*qQS(yA@#|K2 z4UC{M5sIYsp*)MMBG0k_4@TDduK{BKA80jT4M8?|d%2Gx9!BN&R}N(d8;3an;bEpw zti}(k_Z|d9D;8}Oo!*Fr_E*wlS2t< zV`7C=zU#f|_0M|=00wo0)j@nn6|PL#-E>FAvQhj*vCrapUc6J$WK|l(`%;} zRZ>rqDjai18Ti6!%4NQyqtm+Ce!Aq}mR22oK3RFSuy8m5ULjdX)?^ZW3e$Xn&CRv? z5bJtl@Ff0sG)$Xs6en1C0ArX^%p73(R)-+$>Uh8XbNiTCAHP>0hGvF-}m$pZQO^kyx9r44Y zKh$v%lJ)B#Y4y9*WSm2y_qWZi_s${faOW`aeZU36OHkk%XkxTFB5V$cM9jHik)TUq z!n$}htRTX(CI{E*V#@;dS`aoblcOQ_op=C66xjg6YTX(Ro0oxJlt}iC5l*ccKqQ$Q zMr6`tOFFt6Qb-ZCIb!ROg|JUmfmnN4o@dbJEAH*s3n_kay9VY4@h)dm645k({b0~Wn3POUh%41CCo5i=>s(z`Az#M^H^2o)catsxkk5Bm*&%JTC zi^muJUixdjGfL00sx>0>Q-|0z9A)RwimPvOxn~Y!D?2_2nVZ?0+G6rnQ5=a%6$<&y z^D>XW-Fo$WYZq|+Quzf_ zC+rNo8k4#2kFpr?f-+RZ?(l|!jl6N9!8^)G<#!a23h#v9hY1T)RbEiCFgj%C0MJ0K zL%XVy8!c*+F9gjsw4tR@GUC09N}DBogbl*m7T~HXWnemT0a)=pDR*pNjE2nwhM6X% zbFc;IpP0b`)Xl2M3qcgQxu8zz-L_p@5E%HaTY@Gic+e!%c53RBtgHF?`EQ_Vg2CuV zz@f@k{no9yS2FpT7zK{fj6mwgBx{{QxlXvQX+waS1uGS*ax=rO2i*#C9IZh_bZFWu zS4}18vw`st_0h;VWRa|XYYy<-5?$yL=BsvNN@b!`O(S!2{EW%EvNKNUxU#qXdaqI! z5Yj5EaXOX9%KTVMrf1<>Y1z+~auM6A=cisg-u#g23ua0vCa`mBuFS8; z!%B5?T)SPp*gR<8BQw?(Km-vh?qPRuc1-p*8iKaUycXs4Tf7sED;E{rD!2krYxN|255!lqFqk zoI^-=RN>PMV-@R5pxo-Xa%Um?GXZiuD6MAA9!vy3rwz~^v;Y_{@yBTNwS6Ory@eGE zwnBbQvOX-RQmf5yz@GoP;) zSYi~EFa&+BV}M;+BSSS2bcw{wHa;IlbyI0A=$RDE&f=7^Q>c2o_5G#W+C(T~x1RNs9FhR@yvLORjX?8Qq!# zk4E#y1W$$_I}vB`xYGlT1VM8VWb+30F7opc_y(Li0zWv}obwruZ-aVhg+d7E~L+&ve zWdR)tD3lniHgwN;Y6SxMu;D;I`PvP5h*~u3e|6`j7_AnQm#^gqn!tukE^s^9=_M_) z4y+iw>tn?OSEL3={;=1BSpyB+Z;v`{?BKE$WT@%g6c*I}@}sZT=MUZ;Y#!V_#F83U zZsT7sN-lyPw_x!IItB;_vwrG}xYZnk+9*zv8nBah;r99Xn9=1)#|$hJaE^_t{0;mY z%YnU|ZM6Z$B$0mx zeka(t=^|^4ehx6);@*(gz%CkZ5OL#iQ)h4Z#oLk2KvYI^p^}cgk%6MSo#RX6w}IWl zQo_9_GRS~U?*IMZ{Gi=9SUSAN+3Yq?vJI5@?#0gImru6yPFPv?^Kqqtg4!sS#jrB~ z<8P2HQXXu;afw-PzlJO<+XeWB;XQe!M2C4#+F0JS-h(#>Fhr-Va>CIOpN1# ziJ*#wLob||T*=Z+l!WP>6vR)oMT6VO&vAulnB!ivD^5*dJNIzb^PgmX2320T?qH!w z1GK~?#D-r4bq^~^+=qnJSo}q}SdALZmwY!r+oKJqxmXbA?QKg45++g4#)hE#MA%J26yvU2a z z|0sj@K1%DD7@rw1LYJp(t}++(xg!7-=-V2g~I%%H34ooMEu;=Qw zvqxJN{xy-$Irj|Gc+OAHnjAXjc}UALcI&4A!0Tg^hA0iZK`s|sxJ;U0zgQb^7WI3_ zofG&oL{xW!E$2YCg{+={jxMnsU}^FzA`66?U|1UV2g42)Dv-e)BmpPQ;1Kh@SKR$R z(AU_N3DWd|@~c36=*j`wB#OKQ+4eGtI_M7}an*<89<(O);Q@H; z7(LjOF$Y*AI$^$t!ubYGSYi{nT1L(C7?c2?H!qw@vR7#nT6TMa43nj$v*yK;V~NuK z83epjteK+}q5D(z2;lUf#k{GQ-q(6qo`E!6ml4?6TTFt(0Yju&QAe@ zs|a8O!}O;y_Vg4B9<&OnQ(GLJqFLKh15jpAz!%Xlrp_{i0_d`%y?*~S;*m6(Vgg52 zC_$-2id{K^iHV-ET5V`Th#*VYgnbf(Eq5Y z8$sk(QUNunsE^g$BQFJoGfD%*a)f!hL^?VWK|dlIDas}qbVD`_@B)CA-NY}aD)kPa4sUyxa+2Wbz&sTwo5 z=w3o5Plr!rKcf_ls5OLN!O@OlV6*KuuqCtN8jeR(fc2!9r%Q+dISjJbK^;T1NtQ75 zV1TahD9F^{49F@9po&N5=@IuZ?zy7m(V>PS4Es_VA%kna?PK-9Fw?_EFPd_vHSRWt zK6D6eV0!&w=R`C#TS1&}z6WI>dgKi3A10xG55vOJ$Ecu(_gdXCUb2)7LkE}4+jO+M zOE3CMk8_abiAT7obw4@6)}iVKHF*$m3R}_}VOMAnY=s5|SX}OYpmgtk;IbXcbF}s{8q0$OwW^7{qrvNe2zoI z4yhLj$6!a8)e{8u$PS4ZnUs1H6=;cGx-~b@u)DT|-Pw}4RgSly1dRW44y!c53e~$= z(S!&Fen|j^PN9@rOqS&J^R54G{P6Y8-fM~>${iE=fVGO~F1&Q%WLhhfJ^vG=8wG$6 zAdqLLciabJ8s|g0iV%=tYPFFmbh%-7QKnuXhWvoZZ;lYn;VUd}1Og63A7admQ#P*2 zl2>(up`acX`3aJ;AB_4Kw3o!k&1{-JvlkVGcQLc%?(V(jG8)B7B^rrJ2+94Ued`f!0778YS4F$9w z#79U?%`;fVVRME)01*gfC`ogx?SZl&+S7<8cFBT03&x)W+!T9*?w$2y3)_p@cU@FP z?6vMV4m%oe_tUIb86`BhC#U#7Wqdey=!Hor>2dr$Y(=t}t7MCv9_;`s-LQ7R(8bf& z8X<@iEz;PFjS)k`h9v>5Y=n{DcqX7~A2<`#gA;OpRpVX2Dj;(OQq(5+d?buedjwq2 z`eCOn_KemcdJNMO34#}ZA&n~z#1vFdeh`^T#Bu^gksZz_$BWEbgdCTP!q%&jEv6zG zu?!X#;Wj4)5-|++5ZbA!aNgZktxch*^M zga`z!;@Tv(#e$DkKrm8)g#9EZcut3EXi;-fS8x{hA;>`{&joH5j!a#dSS%1?C5#rB z?!xqisTIo=6k(AMdl2$qgcGwEy2nO79*PFf@#qv|1BH+~9Fu&+Wd~cq3A}B+5vT&F zj~st_I1@U8-Cx2-==+2A)r3B{hiEf2Yp=O{bH$663fO#QF+pE@E0upRrqWKaD%-D6a*0pY(-VVCw0O+{7a@ zVSW#>zqV5Bjd``F{b7rG1FDc<)$fCKjUi!XTR0d3N1hii6_#(I;3*;{daxj{aJis= zG^v#Qt#e|tD6<4Ue1Zu!|4y9=n01vov>WY>zt8oU8g>U#! zIsM;56YFylXy%{{5lZ1O59}&h_FH)07y$7TsElrTD~|;LodGq#gGMmrM-A`62T15- ze({-M9UK~=P_v_G3Yk{1SO%67Rt2>QEN5v8 zJ@xu<9u2w8!s5>1!uAC<7o_9wl^_&55E*EFjYjc8B4cDrqGyIHo`nPfs$$A0lFu$K z#C9p+rR{)+J@sLMg~^Ao?#1wF+>{9K!E_0w7J9PxT_AI^3@?Bf6NEp^0k@Z3taT_+ zL!F~tdjnRJ;$#cBDL)~!CMt~>2?G~;iiEa;$hT38XjN2szb--YjU)o?n8DUG>t zl~?j68ZPKX<1z{ryZw_9gcj@oI1eQMJ?o#87d1XQ3+Ft9WzKzpmhgP-B7%x&$x%h# zs-@^Z#Ik_RQpzh1fptfo_U_CD>?)xTyFWx|Y(noC{KvL}#sfb}=2Jyh#u<8s^Jq&D z?>on|0GR24^8oG9762Q`opi*VkY>aQ){`GdtkKeqK5|x@2~_gHR0J$*gMWop)meMRHH_DH2Ig}fcA55Kg0p`{9~~k z?(29|osE_le4!F#TB9Z{T?M@B4~Pa+j55mV8)9j#%D--Tkq27!JZQ-E^U90mt%~ZH z;%(GgED7O6EEZaI!{ROVyH>C-I22G{k)p7zLMs5r)^h^x=zOeYhiK=9NgBqu2BZK-vOiD7 z^U~5%eQANH4!}H=IIC)9;eLJfcwvDtPSt!!-cK#@k$L+(KZ0W-g0ZP|ATXz+CTwNk zhl`NH&Y|bwFf#3NV+2A^Fg(8NjECI!z*iue(DuQz_ZvR zlETU=k1f1}dUyucUkAH;pJwQM;7d!^G9?8xtz&k;FkCc+@)!U<3c(m!1lStDDApi` z*c4~FidqdE-G;`&U+A2!bhP*u?4BPNlZO)c3TVA(@>TNXmqNfkH93SDs8(56S!=KJ zADJYt81*rILCQpk&{LQSrC9bj1Go9gdOyrAtgZ-6RX}Y=%UOZi>=Vc>!xzxnQ|u*A zf=*d4Fpr!Jfj*fKiRhcca~N#fI6_$K8-$4boK|rVbO3c6VImCl%SYNv&p9uc3wQ)| z;iF+3E64H&B?A{yqRF+7atJ&1Qg`!Gi^+r4FCW}rc~Ap|xgZLq2dWKR`Om*bE1cH| zgg}z`@D-FGsS;C4xheE6g&%3*PH%eQPEYz(;*vI$ zey6TxaC{uo7w8mk8g#^9++BXc{s&%7X)X8D;TCQlQj{V^5WHd;BzO2|0wNmZ18n%>k_#tE;v6Za2 z1;cJLJe7UlP4W(erp=oHZ{u;lVFsK&ycyzk?C4QZU}~#PcUsY`;C2tlNfa|IE}lOr z@0SG)4K~@(@~Ix=s!V^+=Lt;`r#2(E{^yB&_GS)4p7n0KN}v{EjqmdKoE2s zAhZd91~~CH8=z6Yp(2eT#v(SbdEx|HBkgxLRA3>!lQ3hQpK_$?0yLRgT$Pv$#Kdl3 z(TWEtRiqGWy%Yomq-pg?B2v*w8Vf>`tU2t^U?WYMq9C7+<-jTfhz|E1VoDR#hNf&-fGi*`=Vm~y zs*$9MpygaRWZyY>3a5t(@dyCq$M}Fm93q5@lW>aqQq^`i-&2YlcgNW&PXh=Ia>_;F zD^9r!-_xcTFxhMlgaE$!DjyI0DHi3U0cdixp)`o7YiffQYsj&L+TJ~40bOH+&sOHT zYpSCvj-)#8Q90c#>S0%|o)(Enxd@Pjs-=_~;uL6+TC+MeRz!fkfvqm>-e0SKla3y1 zn;L3n$HHzWt;RVmh>Zb5aTp~nWCdF{!YPOxsIchie;L^S3vwAJ>Nf26bV^-{h89H* zGU5*s@pK@7AOm8LdiM`iE0@Dy-3Y&g#RlHOz3o@e#YKq0BGpc32o+UGC8#WQC8Ns0 z%KiI?D}P?d4pvq^r!?WHzG8BHb|K(XKrpM5@;&1lk57^7!DG!_759=9Q>CuanDKac z?ZCN^+2<6FWjQ>taKI4#gIb2!jCjR<1=LU5SLLwgBCo?Vf=w9?p(3eoaNBZ%WBee= z4EwEyzL4uT5ruA{Hm)errh%5u5_-k4Khw55_asHIhh))UaD_%ydIfAsbg@W46i6|O zsZ-+{s(Vp0vXR|@jY6hpG`~4i7#So%eB7o&5EP1{K6$QyiS-uI9)qrPU^T8L_?YrZ}Hanhmp2Vw^NP%DyfVofs{h@Stp{$rT&N z!~j`bN@H|FK#B~$wn}K6A2^K^p&TwzrDVMYzC*1GalnwJ>-~@BaD{kdBCZ?MbXoB|uyz7+zxGaei#N7`EWO!O#E=$)BVr z4^sk6aY^DjE-OTDV<+l?M(d?~rX_9#AsXe+aihAgNi=A3#eo);gPi}5E8DMLy?n)d zDDLr9NCc-6iBI_)<7(hsS3rXg8V8eLTINYDF)cSv+j2le4^GM<8OYeeR9G%IL9;2h zzvgGTazdVH)%UL=y8xQsiwcXi2?R7rFqVn#Fezx;3mE?@)+P#M7|O4(lhIBW9Z_Km zq0nR#;T!sLvH=|^mUH|{gT@Rs)_|Qxb1hKOB2ZjSEM-Kjj(vpgz8wMjEuK*IlOOtj@g)zuMH^ZemL}MI= z<{(i0*Xx%rpadvV4U8hZs0nAExZn+1r?Y(bVGIbT#0v_bWPr9fWsAwRW6ZM)Ylq2` zyqZE0nlHlP4nqUNtj*BAA|tat$)>D7X@o?HGNkT~^)_=JGHo+7S7z7{i%WW9YzM<= zbPEuXjuZTgm4Sb^_qOl{ssI`IT=o415!n%#Y=e4@cMeA4ir~0{d~=pl67t540k9Kg z*e~3KPR!+R16K1_hFyOVEJemU;Qt*%7dR2KoK2g&gNSDi0Wc5a9SBZ=kRwsQT80=? zE>8*U288s=x(n_&w!pfIaTx9Zj&@z7EbQXLOM_4R77Vu(&v}V%q##}Leza49vPE5> z$6WOQpAxeK6=?k%k@e;LKwh;D_6Y#U@2ZZ41xX&U(5}_%NCN_wM+vSHPvMejnEo4t ze;hR-EXx1&+%73MM>%VWR^^(rXjjL`pn_CNZu4#%=6$!hYIvYEGN(dwc|#nB04kCz zAh9s(s+~^TW-3zoB_|#<O-nyGf3Cs4U$h6itJY)>$FZQ%%|m93dDd;*~vsM5wyzHcydJtUZ@MN z*=OP~6Py=^E|x2Y8~sTUo%_g4gJo5KkRXFoF+AVvMvE=aCApaVCA|~_!o+ejJ5P}F zJ3VB$>LBf-Wl)x!C)95dTD=*ZBEhbpa?-Z98~0u=faDEg`h}1=te+`QWb+Fv> z4`R!t;*eX?@{h1G(SFE{qrUUaie)RXgiolsaHBw@!;S;xA4?JlY&b1 zgoI|p!oAv;gS^A^6u{2|0GT(&(KWHYrC-F*UehZ}9DdY3k?dU~Y(vko zGmsG)`MAJEN?{4S_{$9djEjF<5Pb(L!sl-%mZZ{=fBvX5si7YMj_EUHg+2fWvWhfX zN8U}=JNlr3OI3_I3fcBVxPsB_e?r%D{#%01gO zc2fY#O(T=8nAN19{HBIBBXNhPu`EPB=8<`5RuT^!fguk#O$vVkj%j;| zG!$%ZVOo^G=I;yaOFl-}$cr5c=(nT5UKcRkp9sYxQ0`f?hg2L0c0mBPjuYx453+XC zw+JRmcD9MU+iDR#htrWNC3Ada)Iyk<<%EaB)Zd8kY&-Bi?%6#BX7D7*HNaOGt!orz~{7mhf}Iy{v$Em3v#FTGA#Q&n!Esx5sBZ#2ePu3z$zu zJ6qiHb6cvcUA4wu^wpxPoE3gM`mgKBcM{}Jg*)6)^6(zr{>WzqeO#jE;f}$bAvlgZ zMEZCpZ-GLG)FiUXmI(fmfN88IU!H+J1u?U@m3-`hbx-3~Y*<6fD8r)(QNocA_$^Xc z0v<~>%r-<#XC7g45#|!+9!a)>lcvZ3T!93qhYu@@#?9secB{pHc(?{rDN|A6dGewD zQOPkF-7yJbbeBcP{m4+mxp%IoRLNmYY)2B9<*cbhnNzJpelRFypohkv2^aJ&{#w7S z(c2gY9}z*ikoDjMBiA;Uprt)9NJZyocqjme@Gn^7DbX^K5j5n z_F)zP)VdnCY8Uh=F0Pz!9Fb6(ybElt==12NvKqOgiS{3)cSB3VxYWPIzoKiHF z8Fqvui>bGw^%|MzRI3Ij17ze=4xpeo8+S%D6gPxfV}Z#cHu% zthSwv7p~@Kx+T#da4q<>nT158u%#eRIe;%PT?52vlc{XrKNe{IOexCIfcB+9y#|9n zOQ=wAjhhjl6X6U!EUl@7cN!$t2%#=)n-wQM54J)9Mn4o*3g=H9F;u6(0aMxU+?Mrf zY^i_>lutV=<{n~VF?`1<7Q2`4mEIm;<94!N;qJn)9I-X?f&JnuNeb8J1(HI9vtWd^ z&=}&InFEbavM&OK@&!s;FnifWBRa~!-WyCm-54Trkb;aYMDN(r-0(DGI&3)Y2~nGn z1bx7r$APU70<`Ci06eKd3$w=c_!pUz+ljIFDFfW@V;aaD>zkC0eA#CS2Ygo@jYxWlNXSOh26ArnracTm104q+kz z5Dxwa6QJ;0?;ht?k*E>5)sSxhdemaF^>l0JFE~CrhxFNW9CUd3AbBx7f8B25r*R)P48|Ls z9kp?{0C2-ViAw!Pq*{l0cTklYbt#8fReaZt-RN*9x}|VRvsi$Pex2rMO^?PWhdIr_ zSqQzCN7G>d7ll0nup0UWX~A2>)mmC?QUYCRDt<49Yx1abB59NmY=pp1#L{2l~b<+vE8uPA8~HZhWT&&(A5jlR_sshMH~0+jTN=VapYa z05_jJ9!<_K(!wzES<%D0SA!N))_V~xcMV&BdF*IJ*16~)B>@$^yQYYhy!JtHIp|rG zrjtGb-k~&`sY%kgEr{n?2N}2QTM6Zuf)2;==fK389H$N&JC-X22GQ>Jsf~;CHDb-t zclA@|-@q0rWE6LEnx@=d45H@c2k2=UbDhG6g~WU&t_?Sq(o>c0Qbe?nsgQz6FBC+k z#oy!tic(paH4Fh6+w+j8K+44pTSp2kBE$+?s75KvrdZ4XD6q^D#TUqcv?6AREE1=wv{ZT$%R}iv(?*~l%kPK!zs$~whh2q4UHKIilyO?R zFjNB;M6rLR3B;%^D81l{3B5s;6#RbWQ2}?y0k^o=IXex4iwZY5pqTbfc&7@y*Rae;f_WMkb}Xyb&yf?-i&{QzVM@}@u@@_cjVJUq`wDlu>JAS_xE zi5rQ=@)y__KY@G532Busv@T9}*jE{3JlDB=OW0gL;7nV$Jh9ImGcqYsZ3^5?(KaV> zxe-$nre`Kdih*#TFmgAiU(l^wL}8SwI1Rh7>kdxsVzi=(s%Rtvm^3tLQqD)oXGr6Y z4LH0(O8AGoLX+@^m!OLWs}BuqUbEw7p(yc>0Iaq)`6(yfLJN>3iY*l5+Y%fyv*dm_ z<^qBc?tmm{pi-$bLwX`AojbhJ_ApI1_UunSh~LQ(;&K9n)r{SPp9(oq;khnsRPV zMl0d;*r@{p;($5l+~cBwK(*q2MUKq5)^<^pIXW%Y6(PV#gTdQ-dHqnaehsgqlJ{8B zxEDL5z#9TAq4nAM%IQ&}-Zw{rEnNe~DJFA3kRahuK<91bhh#<{E~G3Qr^)Tr&v8e# z##=Iz6)qV=N;sS&$lF6Y*S#_HT?%jvvV)luX`fYR;9V+=C_F}zVKsq<=G+18 zzFv+BW|bZ?F(Yfc4^ISbzmi8*4R1MM5m>8uP7!A+vA#rvgtv~l{$3*;p8ki+UZAkI zI-bmq5tWRP1X$VlQB1{}3t>&duOtGZMwq zAP;2qWQz0g=MAz85(n_jp9q#|Cv9AS%_Lp=-+PNOtfpeZO+;0eE{dys6O+D?nJrCb z=oFfkATp%0#*iiN7r_rw5`7xe2B1dzD7e=dz_eYHfaI6v;+{@MVY5};0Q^4Qup^W< z6GcMUF;TZ1q|Mzr)W#V>-KT^V7y|V;Z3weOxI1Z%J~=sX3Vp&V>2|6y=>*YG>T%$I3H!dJW7k2-yM~3roF-lL&y}gC&>oXWl2CvkZXZA9z>*j8z7PwSwuh zY#d&IQ$Z5V4{~CIEj5;>NI_Fn2vJ9;zkI&4be&&h^5UK+PD|p+(w>)VE7a1PZw5R& zUd{U#!qHinsEj!UE+u`qCUub%3@UTw1XUO-h^bVC!~xQm?N3(rk7M{GxK4Qn3*XR- ziQJf*-1r(KmAR1QJC;2;1%Bz9h#Aqx(hXPW!_rCQ{rGM|04C!AOUEjsrUhNb7i>w) zvgqbJ>lJ}LfyI;Djl2uCZ#|M31pdy5Mq`N)LbcDSlP8+23xCA>ZRv4_NC6W@w88iito8(^fGwPho zaC*nvp3J%w1+Z(@ny^xFYB|Ugf*{bQFKgObbLxzD6KtIVMm5~Xz5wRq_4IsJ01MVt zzqcJ{q|~mR?-J2DsRI1hq#P3}wnIvVdU)r#rV68W;CN0ujQBca6gGk|=m_L12JPq5 zj2mnX2H5)L`9u@#)TCXa$UmIgM zfF1Fu+-_wSCX0#@?yHB9Ek&@gU}BNTkCn|lT_$K*OvM6m&@3NP`NM#1<+d$o+{=^cfI8)L~i$`As3kFL&bNJf}Vjff-|&2O7LFI5tg5(bEU%u zrOFZM4=G3O%4JQ6uetEuk;YIgs0K))BM=1WnJDQ@#Rj=3Mz6v-?lWOLyF%jDZX5nx zpuD|4H}^eM6kIBU>n2A$c+a%EFf-u8Q#qs+5YP7mUYnK<*XLf~+Bf=+{ouWp^6w}r zlYmB^F(?c3C(n2fe=f67&&|EoOA(nw*1v(20z4ePbbyyBG$Vw<-SWK74zr>Z?{j1E z`rPXuo+i8E^Aj?ICrs{W<7R{lN_o2w^S2anAxT{{V|o*}(V<93^17TctB_>|=Z*%w zRnqR?nu`w8;-v)Fc-HeHT!2F@6VQRC6fv4(CtSP?ztXq`3nJr00gkC+*5=yT;C?+J z5PD&p-2J1$pd>|90C3oc7`y|OO|ZfXrzFT-=GShEh6aq-R64ZNHfK8=E?A#?3a7Q8 zha>XEGCA#>mbo7l_eLX&M9)SQF$*sdt*}1G?x&)sAD|5Jr zh-h{Z=l}r?$H$QOP%Ki&f-CQhpM(q7GD6iMh?W}N&` z_l_*#=0Q7xR~DL;$=lhsa1z^Qa2QNfZ)v`sPl9+4BqNmX;|5POy&RSUvlC{W^9r^I zJr38U;fQk~9Y3>!wrU>H5@kbD?8xRCuvMg`ARGlm>J}+ja^hlL1)WV{{Qf3aw8)R+ad%G;KR4&ZgV6_LoD{Eu#9y?9I*{W z`Y=p@tJ9Qr(UM%(ct$zOv@=|Udu-)Zab@NYeJILk3 zJxRh1-<@5`1L-Dv2Y(&@zT;QEK=F0`-`jU7jz72GrM2Z6yTbp@c_tQr8-xB}JP>dW z>Vs4K(%+l!ZvP3XMK{;$_wLj--)_|8!~V+Bms?AJYcBoVz#YQ)TwSfzk%RO<;1g;* z+j{YhFi#HGu5CL%Kd(oqS>8PDZlt|CP*pNO*uc~3os;gycdx@MJgwdVA8SAYX<)l+ zZ>&n=FTdG&VXaH!8L(Jxou|FWgH5;@MjOl4u5`#2w!Ybp>rbf;6CZC<#s(!#FlubF znHSsty~lPMueQOuai{XZ=AA?KD8;M$f4>97u)i1Ed#@fho^L(=cIO3)dXfx$yPZ74 z=&^Hv-1YhvY;1Nh1ZswWW5f?qu+zY#`(HptuiXEf|E{g^-v^(`8~OLa1Nklgu1d)V z_xa`_|B(`_Qgih_y$zFHAfqXBS2l32kpVFKmAE_Y$U4=Iv{UKge$?rV?nF{y7FQLw zIG|s^#wvgY4%quAg9?r16@ZAxvjG~E$1e(x=s*4nOcWlauylkW-%kG8A2TB$Zrc#k zcDREHTaxTTe&^`(m)|28?E4*B_j%(cuW)4NKp&dNWsGME3=mv={w#im`?#TlY(o(f znKBGg3a&j^xp=U$qHR57)y>UK8>aM>KgzgPA3V5N{T%4b5pnJe>I&Yie!1pV1$*~bK3@ZrUs|_{f6398CoM!*p3#Oh_g?+2lu!y+8chDqiQL6t#B60AMxub3L}rmVk%xPDWT-4F)+j zZzWua0`Cv=Q)xMYw~eYz0EFOHo%}=prFFpAx+2Lqggbt9I5pw0RAr-sjxYML1bss+ z9znG1z8~DviVIwY(4YSJDL;}^BfuOM|j!na4rIDIkt3fq6)mE=cx z{@t^^oo5iks5>FX;N_E-3=juSP!K#`ZnvYk3AfFqxQ0s3EZ`1Cx7ee{#;AD$tNIvG zF*ZVIrby_E{5q2Nrz&HRfzb-hX+`06{=-o|;4tUhv8ZYG<%tl@(WC<}ns}Y>SrUF! zly6>V9@kuSc-Mvgv$(KwzcZgKD2U6|hbwm}p zvMy&e3V`w^`Ctv=r!>dEQDcS?ok4I9PxyR?`JQWk#)O=X&R}x`zq@R}q}$85v~o-m z2mfdrJnAp5yQFp5;;B-YgwP>Ou3bSrs0}Vr&|kGjk1`X_7vA%{0AAz#=~ohHjv-DK zb0DR@w&1Gx(61$iB#omBF1j1-D=W!_J~U%QWy{%*@tJI$0HI)VLCUfk)NFC*C|XQ# zL&(8s{b0mD`wQ72xJC5>lME%hZo((X--J&w@#Aadxcjz3x`;~UB5Ei_=@Z_JSj!H& zy5^J>(A_@~_z&z?uw4dkoFW6wgc}k;6a0!dRO5N|VgANh6x0rNcyJL2ocR}Xg|Nv& z_$nD1_!yaGTdsN)gDx#9p&ww9mxbekM7loE0HDtswl38b!U!8S07?~OE(hgCCz$~+ z00<|VK>VFom+-p)z|^w-Rf4`G*lnW00Fyuun5G`xpzOkQy%?Ylvs$Z1mM)kdh4$)2 zMO1hSF(E=jd6^o5vuvP#Bn^8I7BZM>d?{*3h0V(ZB zl);A`kB}BQn?^mq9eq2T#(?4go|v*tm!={OxT!H0iPFt}Q|Zn$cv7CVW2AQzGBRW3 z6e4mcW|9yKSl^mLS#Zv`??X$Uj`M`mLK7L?4QitMYNjKevMXExj0l#<0T}f<;3ct- zKM6;}8SRbrqN$p|wy+#JxHkm7Ct?Y<>zgqL5|`_f&HV$S0$k6aiL-D+mtA$DoHjBN zt1Adau{c>;#R)mKG|NaXDWha>sE>Pc#EE6}*lz8zdtiuj-_TuJKV8 zV7$W3mIGMe&yyQRdHOt2C$=V^G;k=PxvZHtu=#kS z>}vL&JPXI-|A1d(A&dP4caTiDp_8A|@ldcv<=c4yA`I$!*8E#6CzmUW$n{C0CWJ!- z!bY4S1Cs?I8(ZoSyJ*6=2aSWE4-=&($bMMK&X0x7oz(Ac&R50#b8xVX|KVAXjlWCy z|3MukYutPaSmZ3)3YieTwQKrUfNO4W&@hLU1KcK5Lo@ibia!xyt;(Q+Xj-T)%7~0J zCp!wXcr{6L{4i;3j0q3>0rKWGc-0e?w*AVT6b>#r+n|>KDtX4)dR%x;i>tqRo4)0l za9n#P?7&)NZ#u~8EV4zwoT++*ZS#)90y+jI53&ug#wG-YJG{PqBPMOpcIV~_1}1~t zX+TcWMCAO+&b<}Elcnvl%Utx$R%p}!V3;A;&P5ebL5wktpE5UoMR=F)NE($19?Oj8LdXgOJ7)OzOagJ(AT`o(0sl@iVp3A z0{p-W7tFZ#2DcC~cbyDOW;hnRUW*~jm=85I)-+>tX`M!2Vl%X1QCtXMX&yVKJf@=2 zd*SZD63VaN^75H?2I(CnJ&RpaLASI4hEl#``1TGT{y3)neFu@#V!NdH3Ty!K`(wf{ zD9~8BsgO&DVPVe(TLDw3eTjQrMYBAWDGkGlzM`LSpg@WnSCHR^jXWk(p(a@{=r7f~ z3DE?uy~Q*A`Lo&xF>4x>=D77xHL~Ilz;Q4$Oj3zq**j27+ra%LAQzLktUHiyTaq3O zaKNI)Xet~|LXrWbL7<)75*kgm0LVr2RN--wy28k1j1iG3h0$ZkwdWW64!K9d?!Ujh zxO9lNhxk<9{Og*fQnxkb9AFMS%FkF&jiy*YzOt{Ag}-ta<_|pT?<4yy_AS}oLrrqv z$>wwm3L9bxYeb0z{D&V9yx<|h6;;s?8S3ot)o}8Dcq}C<3x8cWL&gN*kY)_B9giBd zivCfwB=G5Qbm7^k!wQ#jB5hZE0kuWUN0HDG)z&%g%SA_)uQhi~)kS`oGWU;|X3%U4 zQDe*;BnL;~sv$MZPY{9(YS#1{KZqIxbOjM_3J(c;6}=6#@4_sLYB?&9knj03F-t=B z<=r3HTsC~W1D@vI=2}LpSW{Z7z83e@&da+0FxF(%V2g~pmH{g57sr9?ZZ(rm++F>G z$+N1JPP?jWDbJcaUt1Q>R<&lEvbnHew?d$!1#Zh|IR^Y62Tqwe7iU1t8HaZYNO0|9 zbY_>w6ijS+7%g(Ks2y4k6@_|OFOfBRQe+MVgVo4oI&8*b`k5tVy2vYl)=&CP=LZbcqr&9~k%=JZl^5QMF_!DdAwbApiqwGV z^%jBiD~YPuiUNc)y6C)OGQPJYBQ)6J=2?ye1@0EIf)@Md^!wm~9|Jv5()QNL(*1X_ zr?MX^Pu&-05y|^X>kQiiPAsxfy5Jc`&tnfbRS!5A#AKYj0P^*=i)&W|o2Mni*8`I+ z%h-MIMKF!wIBXEa9E;Hdexn6KOq)l>m(z#3Bc$&eAUGGH-fNi<(ugLeus)^Dt0EQ@ zGv!T2kr*{TLF7}&M5a%!shT7h5QOk+(VPO8mZvZ)SBTr95l8ad6G;kyIK~byITD>o zPL??O&;!WLeACyhQLT?9E-q2R$>oV{y{wDM@jBgKsE_Sy8G;2-W@-|lc;6_-vj~Q@ zEl`=WxBV(%2-IBcMD9gZ-oYyBY{B)XXCsE2AgLH`S{anoX91aqS!1xTsEwPck>NxW zn6Vh7Il1PK!sJbx5PWF<^hIR^-Vz$w7XgDNkZ~I)*(iA82RVrs;$Sk7O8pJ0;V2Uf z(N#;-H-?H|L2B0%8SPlpCQ3>yX$aR!dsRU*AAlM(quH;=?R~AysY6*jqDV#P%OHvY z64JGEQ<$hT9AUVI>znXgzJ+-%1V9;O605qhCcYHnl&{;XqL^lHIqX<4+d+Jq(FoV^--w7Y5H%jm& z$u{I$hX$SRU&t&*;U&7hFj9GE^dNoV9hXPBl{oLf6}-<}JK%R}=}~xbEXQ#qfXKqJ zEisr%L@p`s4Y52*$Ysa75gOz^svuzIy*i#cXG^PG&9lInRYidm0Iq@( z9I9GbTE$dU5VvPZ;%v)hpPVkzUR4i8k>TtggPmdu6S-@qj31(A3r7fTj)j)PZjOwR zDpa-ndp-F9p$0I!iO40{cgduX2?yt!PjNO7+=kZ;VyQ#(z*5cy=&X|E?++pi8Igb#snS=Q({B>p1tm!Pl(V69%iz;iU| zY>8|gu;bEhSlfYtUEeZAB}f`viX7_nO&t@h{W9nzh~6?L`J@2kre&cyk%~0>%LNJu z30?8LfN+W!5lql0riVrb$bXB>QA}%u>{2j$APJOIAWcmSYTC_}x@n5TRwMOTQ~QL5 ztFOZDS9-4sxsKQepz(ba&T`3K@^Q%33DQ7}C87NZK<%8IBnYo+z|9;C5e#ck5IXI$ zB&-oJb0Q#wVvfg|h3$+t8rF8z1cpPZCD%h}T`GtL5_MvOnG9skZd>%`#M0|^^)xuD z#fD-32wdl*3y7ZuhN2u5d{>tJ$Q|V(4TyD=)EZAuxxJcq9}KW)Q!x?Vl=?(9UV{J}p5y*soK-Uvp26~WN-yzlZ5`R@^D*|*R$c#V}oOwduT{De#&vs5@N zP0uXx90tTz^Ekc1nagc;NGqC=Y(^{2K6Humi*^uEPBr4ZAdgsjJv~(^&go@@kT1hb z68_fj!n)9Svh(`!v#p)y+pn-D-9W`thu(x3sE(0GuY1{0S$vuGp6)!`+kVy9effIt z%8JV(>+#&>W~Vn8kM?NV(su69emX=lfpqu{T#thdMyauFJL80_8PK$D8u4K(hs;k~ zHHs(7^V*1J4+nVj)8#whr~%9MQsuk?$wa3=ZxG<*58z0Q((%kpoeXDsu@Mg?CRJjI zah~gyWF3U|;a^c67FJwnb+fYxKGs9Yq=tHQ7Ur#y#$qU7fGI{saRen;qSI%X0G^~` zK+ug&O61TyzVZx~W6>iEEJWNnZ3!Uvs(Y>X1f_i1%9<-$O$%58H)PNUw$!Tui}&8V z&If>JFss0k<_O-K*mpqhy$ehQg)XAo`M5G7tZ)VWd#o(g<8L2#Z?t31x&{((T#uA z&=!B7e{jJ_{V@$yF*q zKAV5el!jEETF91*9DD|MH4!B2h4Ea?IhZ;3{v9#ez-0yGD+9h_Mr*qr11Z7fOd*nG zD%y*ZwhbH;5N|~`Rot0lG?Da__$>x`h{*r5+8uaJJ56-6;YlfIt&mgJ(L#vLnj+PE zEwrdxrn?_XG03EFg#us&z13^)FWraDl4bJP1t>>yYj<_v-|pFtVdM>Q^cA=_*;P`J@B2XociLxMUP1u+=f zoiYLoh&ZMV>MdOJ^9f|yF&(lJ3~`g;Ot6D1c6?vN0wzhzOfN=w93S#9>rI!z(Lk_^ z91BxZktUwNy^+U0Y1M0Wz6m+xdZ?ygZr)Knzpi1#g0RK;H|ZtcIQncn`w_hA2g6v6 z#}}#NAqbPU3syH@S<2i>D{o@2Dwn<{4shzY&$8Orm+J@F z-ABy?)Pm9D4DzE-Gv2_zZyNU*&E2X@or|#7*z6`IV~WY{Mi#rG!4Ax_JO^qis`$u) zoycEYSw2|m7%ONkDNO`Bmn6_6TZZc3pJI8&YJqp4>+%jWqw`>&6DypxkQ=|ykhqW8 zs~EVhvTq5Sn?QHYd*8>h^#o!6muG$I((i5Dis~}+4GcfUd}3JOOwUF0R-RGJ3|#HJ z--G%-vUMH)vbK2t*Ja^CG;uX(>uQ+I3r3~bfD~1pVMEHb&=0U16>JUOQV{=$_()*q zWaM@tV_3m%bt79tG#p;UR?IzPEfyu!0))1QeM)P{1ccjWfhk75PXkg7A_k+GYp|xp zgp>sJ+>cPCbj1T4^ zPAua82S`No_k;OENECR-(`TPRh$;#3L56o|ZlIY>5sVfwoHjHAz`b4=#v(@HO_vly z?wZ%_hc}h_!!RPz84Dvir%+NbV^Nw5M6CH(pO97c5=>^_l{^TR$}!@j;+A2_VcNwZ zXjp|Y@_gl^-&jY)+MI$g4Qz*;g**nM%Z>T|$-2lZab)}Eu8G+Df1f|RTboaoe>1_l zG43H^yg5i2;~t~Ze54bGA}#^ z;_4JRK6g<|jRUK4P=|OAb2t7yKw_WT=DSv3-i_b9`*HB@&G21oSbNuMzH6uN((K*w z@Lgy0uGdBS-s=)C!Sn!XU9W&YR=^%B^VdrBGOu|mYsitRJHoa!De}zIN+UJC8+H#4F7CI*8i`+P z53lcPLf6f$P@hcdrYvl}lSo94WpNCG;y4E*D(ZE`*CZ!6N30UOBJgG+xXicMum64Q z*6%Gz=Y||XU3@;5vo-1cdn6(@c75DdG8`cUI%_QHfB)YFN#T$L|opJYaRWu%7qOYLi7>BT1yU~X=u zp#l~|bRzb8hq(Y7iG?bxsq1$0CpaAMWw_U9yCd>5^eSmiP`rbNy~o+suZo4M|1>7g$L(|9EafnJNT` zkI#;Ju)Hg^M}RANhVNx+2gQji88nH=a@scZ;^9+9#96e+ro#<=Zk^xk^sAg_Dl$Y?r=aJS2$TtO2x zpdr%rySU{9`lBV`WK70E9f^f`&q=IIMiu`ZbjfHqzf)Cm0>VEu$uy*L3mGU7E&H$I zC35Z^Yc)YvF<~v=`neC-OYnaP)e_^3zrT4n@(1nq|8zC5@(A}1zM-37V_vsuNDTAI zDK10Wmlu7LpU}KN%jFa4Rywz{g7H8H3jX`*RVq;@N0*633GL>)$Nw;$ zOB01jRF2`&qcaQY$@CCgU$(zvp{qEkLY&VZ(pN$H`fKyF-+ynpOYmF2WZg*ax*cCD zD$(hPU&l%5L^}DCM&;4gmGNG<#`n8^Po0;QJ6f4e#%x<+U8^i>n03W!P6P+#P6en! z5&|TmoxpgYK&otq&(hR8WY@=G?>9UGCz2qei>n^1IwH6 zP(B>Co9sv7^_JpJ;wEEhGQ#J-gEScp=XWYiPC)n%OOuN{A*~5(H+WTQDKKg0$v^KX z%J0WdfxOqgTVqLw(D_0Lk|bSY%s8AJN8=G}U%jPx{gYqdQfM@EyTBK=dnZV^a2gU$ zCB`R=P0;2M4#}v+CahsOb0v2Ggs_j<#w>^#97Q-W_g+Hizl_m|Bb;JiJ*dui+A!ZE z7)UuKLV)agwc#zej(>0H-8g-UFoBs;& zRI1L#YpN&$4$?ld;Jibu8`a&~va+>#T;kJ&--wFvygA_*h;_PFvZP~E5iS{LkGb9* zFb;lVXLX81CyuF3BmY?D9TOfS*%`Nu-fVW=$!EsabyXgZv4rK$HALA z@R{C&n~Xg2m(3VEa~E8f#P^e9AOdqL4x;bZ&i|+P7@nq`lT%#YiLiw8PJ48EJ!XXr z8(J@pB6TU^lSPjz{cFc1%wE2=`y^~QQ}*O(YbBx=9hIQqHyD)kJ8Mkxu}mscH+u%^ zNT#wV%s&|AgqJX-0X@)&$OEq&rMOi*8xK>tGhFm}sOU&2h@0yXd;-~gh^%6i+|QYS z`!+<8^o&kCaqpe=krNHqjiL%}!IT8f=N(k0mX9niOq`J6{zqh< zIjk}@1^C=1|NZASNQ%LnWTf96`%`m$}?l&Mp8+r6TP!ip_Nc3ys;9beB6YK zX-1LX=Dh0n$k6p6c5l?W&f+aiUumq)PqES@eSVHWb|R#Pgl%sLS1?zfFdbU9ED76| zC9j*%dA7+1lch?53DUs7hY=Z=_}!Y118m0JZ=BN0G{F1>Yiy(W!*Cq3bWD|9-Txnn zuJ4=72TVuPyyGa~s0|f`2P`w1(*BTFOkR1>KWBK}QHuLS-Q-UCZf42Z7}@rtyh(hA zqPVEZviY1ggC5{9@$Gu@w1eEvENIj(;(y!>RE;yeO_}~eD}{v6+%O=xL-EAnM&x5F zwt?HZG*Dqh-n>}ryQ^_Ud zd5xi*{G#91E4BSqr2e;5_lRChN4VU+hOF4A?*tXv`4N}f)w_Mj&sBX#veOI=AAI(d zScD2CSqw{pS#fwg#)da4DET=^1!G?WLgo+m=hc=-=-<;mmq!`9j1xNZ7r@&@e{w96 z9rI55dC8-cwWDyU!4VR|qb|N+$hI(XNv4#?O&FC>r6SA>>{Y5vFU*^( zi>en-faR!GKYQ+dRz|FGF>)*HQT6Rk^ax!evZ3)db#;2|BJ ze8Z(=)ymt!7koy{u0PAE9C_wI&=y9^?ytoI zkSF`{{`sZ6N#9)aX8nzoh&XR36TP!iN)R7irJp$b=kdX6It8ivF@kSW&`i`x)qph=j- zB!B5QvKB7l>{Z8=eQV%%s=^nbGacXtO1vc|I!3no{yEAAo@WX@gTTN8N}0{0&CN*- zJncqFy_}~WCQrRjZ0gEqxixBdh<6JXsX4tGBW%ZI;3B{tpQe~*Y(}`s3cinKXmc^? zcHX3nuTSJmB?&#ltufdun^3o~cI3+H)@ifj13+z8`LbYy6VWRG=iGgDLyj-+D9ww|8jRLN&tk0FeCLHBBv4(&&mb8vE^}o&s@Ga7l z(E^?pljFgn$OpK&H^XLS_69Nf$aF}rlnS_65zSS&P!^N>D{K7cKL7dL(3IdmOf;uv z!+zwam3cRCWnpclQqvZ#u=R%udhywt#kYDP5q_{Yz*x63b)3aK%T$z}WA(}Z0OlJg zu;<)Q#R*4v=OM@N!0i&QS2`J;GVIFV@cz3tRA;#JLzd+<);QcFf5Ye=USSNSW{95(6CjOkJ? z52<>u$8edEC9pfrahVW~2hoh7Rik~-_j6e& zR6cVbffD}_DvZPBpt^!=>vDd`=kJxR;AyiLrhE#C=C6!GV`9CM%<`Hg`utG3%^RGg zV3`iw$2RVC+rslW_;iSo=?~0~W8$TJdD0oew$$f(Mp(!u?^f_t(~?Ds4JK%}Hpx ziH*gS1MF=B1T^rn87s>_)tFnMXJKccXRc|K^h)F~cq8{e3bFHCp7v=*yC@p@6G#F#;VUA;GHm!V2#dwEfS!$Ru~N1KsbL zKqlBJzh}C2Sq6(_>0-=46b{aT(mD>|4*#PF6YD-Z@!dzf6OcH%t;Nv>mbp|K!aR$w zqY8}jM{JERupJL+NuDIii9855I*AZZ;#4y|A$u)S=Xqyz8f6Sfo7v@h@?SqByW&43 zOM3mo)0am-f@J}~DG?PROZ|GXe^8k}g#WdE_fE~uK63AanXWQ_2N@0_Pq_^fZDFlQ zb3jY5wStchVa!kM3hcBwgma_``FR9WdxD4@B54W``+whB`kS3X-vk6S`@G8_fwt0` zrf_%#l0iSqI+D|fZWY)uEp`j+;3U0B^%RC4*FqpVgT9ncK(2rb zC_Nj%k|$1UQla52xb#R<0eK$@?$|g5<}fq(giOF00{MGZA2dg&^&`kq^gNhNN!SZY z-WD1MKAOmY+w&QPdMx-5Nu66Eu2A7PwcXg_=go^_2p@vUiteKQRk`DK3N%xS!;vKL zU=3mw6TwtcA>oPLT^FpmKy^L(#W^BgMuz`&IQ!@ zr9pOr(IZh9VRE;IKr(PW+heD3yz3Y3WT?9uEoBo9GKH<$sfUhnvqb5NOs)Xm@jt%d zw%nT;G=T+}J99(stYYtF*o~9Hd`x5R4B)V&Fy9zP-0ufkYIE7*3L43Wospl$ebUY?rJTwf9gP0<7%1kQX67Wol6?oe;hp*=0co?R)RQ_G&mK ze2^WyKD@_+y<59G4On!rCrDs#h78NN_h!_rgOL(G@opR=Q&G0u9CR#$l_amaBbKlw zxz}=Ip>yY~c|p@M)SYbOvz0q2*dOxG;a#vCCL>h2+zLoGhVGv9`&(G1{bse6Lf6 ztu2OYG3e<6hB?j;W78c=Wg0D?OThxOu7$QupCtb!mOB^LZI7|sABM2%pL->e<&XUZBR zU~&7I=7#r8Xs(9=b^9PS?Ny3%CVI)EjbueMuxS*CXimu1@Qs*T%V!}h1EqG@^o7SM z>PVZMl`1qbof+*ehtDw3OTIFFe#(eu!h}^ZLIfNv>y_XNa*k=jJ;J`47s)~gk1`EO z6{L*^K$G#h`GrnR0Gx?VSi8*#ESQ^Fj#w{gm6Ma-)8PkM!oziPHFzCoJt!ViQZ}+c zm>Tj3LWjZbfxLCpW3KN>$;|O=i{9rd0*vFax#38OYVfNk#uuqXWP`K?OEPd}GmH~4 zj!f=mO`TbCIaGGdKq4Gy4>3LpqQkZ@*X`gIQv8-^z4KEjk4g2jmE>++fzSjE9C4fYPM1wZY=05i z+*7L~nj<*{f;sE-$V7@QjQXQycPDDbi4BdcvO3rr96LdS&1zsXJt3`_PQ&m0^`1xB zz`zH9!KgUHiK-}CO#PMZ{CYd^UDC29TZ4-+IcF;_AZXfwquG?_WxojWPO3CE9?rKLlD=LVm9Ttxc6JMA>G8 z#1!HK!^ZnBAkP*DBzs9&g5&NzO`LU>{lVgFvG1AEY_S-3-ja9`$&4b&vn44OrsjW` zFpI&f085W$E+5=LTs4ZG5LOEt@Rbs)$vJmdSFEk~l~&onzk;-CUaTvHRZ?LdAuk&W zc6^@chSo9^b%}b4y1*DUR6mB@3)1IMOc(XCT%xugQH64S2FPLq!5<`~5AjmEV|piA z{3j&Fyna@1B9yn+4zj2&o+%P4ObIHY%CKJN+L!i~VF1n6sT}m@D&^3o39J03I*={x zDe`K(`HHPjUQpBux1mruR>74Gy6}ph7&{L+i{NT_oFU&nyA^nxsO7e2kS_bqgxV@uzK?pZ>0T!k^Szk$%L z3}L3sEuTe@^IWOFiJ<)tL3wlGTB8wm%}@Tp@e=N6*H8%vo580HZW0okdsqgLN1iD?i%gfOyUmcD!2f#ZU$Ht4r_cXlZc{GGjP4o7_Febx>(aTvNfc-MT#@3oD0 zIC(Qf(9!0y{CGE54sUMn4^9r3Tgyz%zWdCw-Due4QUe!M;I&kgEI9hUv4r3@tPyJ^OY-9Hs zS|SuP<^SsDS?w!R0`P~|K8r4(vmBe%<23OG`MM+YcOM7hfI_+d_|YIm>A;MC;q;;5 z>M+@T27mgL;EvlC-7yu$Vw6S{zQIoap%@Q3J;{Rey}<#1jR5bjjr=gLDQpL7jd`z_ zT8}tDBW@3}_9D9kqOIXNhg(_p2s!r$z^p^F-rSe;>4i)MIv<92kZ^GAMMj_d8V8WG zz1xZV$%`>S8fk3ad&DzV1{LO-fvbk+NyH`Uc*`UX8L_NPZh-$wlF5We&lvZPFlI?a zDUmi9i-fLw)rLonOcZ`+ezZfGRUfUZ>fKfO35+#tJ3lT88$MwMW#H|UdYN(_GKtf8jx=#AT5BO+YYg`7)~SYI^FMz!Ru**|3C^n?~s z0E{a-VtC9Ki5^O(K16bLjS+hS;((fG84qVwD~=N>j6i3MehK!=ycG-}ko+}mpeR~c zlAxK0S4ayRcrQo{*g>n|_kIBkiIEEqxlrhe3SJ#u$=QHe#_O5v*b~?y@n9jyRitPL zGrBKMjkNfy$|v7JZ2Mpdl%n2j=@W7t{>OtjSIeLxFxK8(j`(98*Y<=XH6jL5n3^F1 zUKQOduq~@0_?stNds`9}(RjV{w{6VGXNbOP=<6tA_?m!WZxWe-CG*!Nth*shc%fYk zB4#3&vMU805A6=5AZqR(Ji2qpviWX{Vro8~CNL6+-@UL%AbA#tiTSXbf)}>a3t?MV z#}2ceI3#YAPa(*KQmrf*$^wKSh5*cA|14RM>_ZDk*R#OAYpyyK#sa`_iRj8J(GU3L zrwSY9##t089ZUc%y9bY{)@MW#+qP>PL&3>3!A&R5mfc96bZ9`Lg&l-8l1To`aX*&O zO)wh?@qfaoK!n`YlUEXdildD`rwMZUNOf_f6KV;0M1*D4u5LxvavXO;V5GE*ur-`E z@=J&DLF!2hB4*%LK>3TKQ5`uLWx^y9B;{}|FP61-B>?TWqDwoLFZ$ux088hwm`NIXnHtq24#!dATxjP;F;X*C;Mi;e4wKBw5p{H zMcSD0nWgZQlgi$QA6J$Ws7pMjs3(rG*sw0KCeQl#OE!&zL7oAYl`}|m_$(S(Hj*F3nctt>kRKz_5l(RPWI;RPzEu~=8P|_5#&dC3%f#kfm=J$d6P~?c@I?!bc!IM}S!eW=Nd#{oI*YY}c{jw;(bme69*|~BJtr{T zbPI7*5`~-LHm(}1xAppRb@R5Q*-K$Dm3Eh5!z^EN9s@l?eDpag@qN83)CAB z65%vaH-jI`-cGY8i4~7Bi7M+%Ea!BT! zR8DYJ%(-N+WJF;lSx+jfmE>2{n=Le97@lsXj^7puJgXnEm$-x^$eu;_He|7dIi2U$ zGW0aV3ih$i#7t1agw#JJZAm;MZdpcZ-YQO(sii13`D&1uur}`WBwd+kORbixEE=Iz z#SsTsInI!~KvRMGRVeH65P#^pj>=s~IXe`U z0cl^ECl{s;LWa}3o&!S?qcE{}q*Ub)Vzf9B_*HcadCH+gAiWPIRNVR^qQo`orE@gSGfeljRhe{K~}viQ+p{x3dcv)%0YGyy;hhQuAp|+ z;+)Bo>waRk)gCmZF9AQ;u5QrMWO4xK-<)|-xt(@E=F)slHnJ@68myx*9IkEd5#W6x z(brl27tDF*-Wyt7SX3K)z#J2Rn=A1A<=E+3)Vk?8)%^c z9&(dkG%-SR9$U^MNn`uCe>^;zPT#E|BkrkeU^1dNtd$MFZ?dVL*l0<+`Vexq zYY{rUKmKHbRobPc_tqB=Zhih3|2HmFM#0&1Jmm0lnf`q5;N`>Kvj-2KAPGJN2=AEy z^xq?_mzf%Sy0#KK+mhLk37+8rL#CeQ-$;Wff8y569s~^J!d4`HYX~WTX~ju;v4^zK zyNYEH8Bzmm1ksFtO~UYxmYyN_$y03Vh!0k;=YY%04+aBEpD9O*R`yD3NzFb5O3V~I zeSo5HEDI}gr;q$n;L4J052yoqurO! z4aHK`#l|Ku>Mg=HIOiV*ECQJ@SQo>irV!dR0tpgisv}o-W8RZxa2f|7)^u(O5Otv9 zZV@g!X>6kcX0!8^vu>+>+PifSGstClf_}~Hk&xaP$czI1!!6V@{*9XpydbWFNRhnB zEeGfuBv?a>jaHINyoI!bt-FKKdwRb`a zD$)Zh97^@Ko|)b;&Y3^}lqPChiy6|Aaji9m2ee6!GmXJ7h(mtjZ7wDwCMxR<^W(l| z;l*G#EzlX=(8EG!#vkd-PR0_3qk2GvvDVK~7W!7smxSLa@+p#Ik*TVV=hvF|O_2tH z4j{Lm{RbdGQt{En945NcF-?)9Nsizo$v7vILr!L$EL92|?4lBm#MX!zQ^k;M->?c@ zWs-SBjG4rJf`LPT`SvGb*!$x?Jk&#k^o0n0bR3aQZB_+aD`b6RC#uNTc^rXG%Bq=b zFtR35RHp5Ii2PramGx2(hRZy^#OtTA#6@pB#HW|!_*CxSuFnZCbAU?zIo_&Z0I1ee z&S=0Na&IRGOc)0{q&tDWe(byf;8%v!dwK5whqG#G<(-hur_#q^E40UnP7`gIX-!@K zDsP%d;+RUM0F;0appaNOSIbtAt~a=1K$uj~dA7H@{RL|)cN0y?^6~2P8AaO&r1#s>L)vk^#< z7sTHhgHQyi(F7z_#E;)50A-tL#4rct_n*_i)L`~0df1q$gs-^s9NgWMzNxh0^dxtr z;;JCoAXP`cHPTj)~Q9!gSogc3>1rZGy)mvr6w_NO_?eSC!M_cf)xXbc%fE zkzOm@u9jS@)3j|8!Aznn*6Kh&eT(9_NlBKVr74&?B-x|)k*gM>8cKW1ltKm39g)(0-MAt5gw30S z+g6(yl$a}n!Fw^SaNOJK-tu{&Yb)veZyC!1>MKZ9)d zj?7H?fZ@g=b||pKam+)5#**iX>S182X7Ulilu^|Y9=NDG|{i!xkz zA#m2>2^=iU)$dv&0#Pf#?=R3PW%xUPiLDVGi>=Y7n0#sB7)!`VNY_5|m9f$)rVT|L zW1TrlSy9A}?gHLKOf?Nj3|b)8>hJBOKbbZ5PHb0^2%Y0WG!z@E7)#MTo6wGb!av%r zTYtGVhMSr`h9)2!es<@NnitH@OBK;XJXU!=QM>~oCS`@ms!W`G#ZVL~laefGiafB< zBTgQKvR@SXDJn&}0KA>Ix~4`m#h7MMgnJk==Rwzl(-e}cD|>@mKSs8jU-Uuo%*=im ztio!5{on$u9RPSyG}u;45WJ*G_10*ubweNQ1Z!C6Cs{_(ii#g624g^&+bng7LS^K@ z_UTlr9x9-fa>SLxYqA~sz^t~!$2e}IIOSyna9YC;h-Si(=7{eu) z4AtU{DgfdoN0H-6y@zG!@$v1}TV$C03YTy2B9zxF1zF5OnvBYHrqH-^+)D8CS_9Xj z3vXg90jH^n0x1Bjg0geF8AK$uv?V<}d8;o+QF(^Qi<6CIipcT=Far!|gQ@|= zZCv7sfqBHU#HsBL*`iodn%yykpjoXl_eO^dmFe{IlP&dB#8DgWL$$9`xkg3UCKe_M zo-WU*3Yg*Ki8qPT0j8yGVBY3vRX}U*68$n!L1?;7+-Lz)G3&Y`vMdUTt`5two`^I8 z`>NVC%8h;ITz(T4`krUm(K%aa2`GG8O+w$R~=o3J9A*$TQn)n#F(HpdC@4!dy|M0pI3U+Vr zy8|2_v)1?B-@WO!5A8u0t&rXU+K}!*XIiv@74L}TLc8MVmTb1`z8rZgS0}fC>sK@_)n(60HvsV6YG?BLj zKEW%6Imigl4NRsz-j?m&W>Y&EwPhjD(mUm*m6M@Ll8I$VBmKCv=!#n&t^8~NOlQde z_*pPgq9!NKTH%pNiNt$oFGe09NSB&Ki#|z+`N^62A5VCbZQwaDCh{&31iReuS9^5m zstY0j5PdMPYfbD2+-r z!=U=s=y|API!gwcXr_i&q14|9m#8`!EA(?H>&ZbhI=M0ffDYwt0WCVif4zWz*9t84oc)B>pP6A8joSZau>&R10vUkp+@$aKexhncp z)R$=bfHV`l*}0jP&DRCi>V$Zg3}Q`EiOX*0Je8cuX!h~ysqh;4#wI3DC#@+3^$)eG zXls!Lg?s$a{KkfmOVI>@4u)ce2!FsH!SIn`Tl;1?w)Oa;1qG&cYuvdt=-j%>Zaukk z>)EXf+4Cj)g-$_*L=Y?{sy&VI3NR9->x^>NdMhg&Es%$O0t_fr`9lbON}c zIJnFi4hAEZMAGCY=vo_D zta-X%sV?M(d>p2U+jFv$*H`GAhgDh>G%L~=iF-YV`%RR$D)@DIVL6G7&1Gq@igHVg2dzDU4trJJ z&I{2hq25Q6*$dFwa+fZsYQ4}zuQ&Kqn;@GeSoJ(TP>PsmNb)%3q-x5{iv-?{_@_><1e)w7Ek8iuS9Ku(?TE_5X z8p@YXcc90gunt&^oLTwK;rHFQNP_m)i_#mun1L81FlZhEU|81)>c?0?d2zKybW*Q&|W!Ycj$^cNn2 zuEA?a3P%2Bp*w)P<|FtnUS+S&rk7$3ML;kf0h~kCqpN$?V7x=(nEku?8EQ~OzBeD_ zKfoPtz^+hw`NQ$-hhy!?|D8w#p#M^)v$geua0^dAW0HvVeW_g+y460lFGNcqK6i(a z@txttD2G~>Bi45RuKj>>u^13jfdvhKu46@)i!D_SsM3Y?;}Nmd1&g88t;0gktBiq3 z&s1U(Q35gjh=~iE#teW-ebUagA$epMxRe-lga!P@Kp3TSSPsE;>^oCyD{>!=>tc1_8ZnVMus#eF6>CGY~DzVB!s)e;s@=4|cYc|T% zb$4wVsncF+_-9#Od}?pD!b5g6)BG5s%P>qHKnTX&+)&+Sfe4a!R@WHxWnnPhQ9?+; zJwGYfUbQ@izjQqaYmnpk8D;N%anS*(Aa9+S|kkm}*Tp%aW~b)Ajm0z6$M z88yl&H7g}Gz7qxsY|8~5{uTb|ASW-L{u&G6)(4lyR4iJ#7Nf!R7|8xGKJOx6WH{{Z zoevqdb-=%wRu9(%kaF6N0NH=AitkAj8e2ym(B0Xg!CX!`XgkQi#(LaU`d2PTq-^`A z^k~=8wEgJ<#`Z02qwXAf2Iyf!p zYxGO)wh8tzn{)ln3Bx8^#|`B6dhzF7FckAixn8`;8pmZk2g2rAC2aVSEz7y)*3oGK z1%g3xYl(Lks^vC8!Ztk423?9f<+!|~!dDlo!Exnwza74G5w<-W9UZogMo{8GcDTWd zAHxyd-9fx(BuRm0L%{Kr!O}HN(;I%q%ID;8U53?Qbg02AS+da_{sJ3@JF;+YKAyfn zdR~PFsDcaYVkjWo%pL4TqAG3%Ue7KC)h-qDB)~0C5r@$XgPa~d6O3~jApCHR1 z(Q{Z`&P*f#zhkHWkTrG7UUfYRraiwcb8Y}LmZyyPmhL7W$xAJv$};_#b!?9z%W$6k zQHC*Z+++hw6DOPHO5(pU9)ddhf5&~cTBwAq^Btz07yA^6Mr>P$-}6?jV6#HeeUu&I zzj$4P{%@)I%KKksx#9@VA;^mnpt5%udKcbq@Ge)GtU{L{gq8NHPb<;k9xL(n#qZzN4pt$#oy5>0abkW;*{J+)DZ>*Fa=?l!2QZ#0#5Uxv77ac|_ z>MkZisqnY)0fQe3+l|JjjZlC$B7^e$YIk#bdPi=|R)T@K4*;VMo6)155TFp4!{w9D? zu9j;No4%OwtH9!<p{2h z@0LeoB#B7;GT1*BR=wdY1G`Xe?~UkIeJ6|``VVYhrc%A#ObG}^YdwC8w$#YQpe0!55DEEtJGCN;KC4gHA_5G|6i4;5A#Tf-(Jz9$8vJTYi*R zP^HB$FRK}81~KisZ@K%g|D3}I=}4}ouP2;Y!$dA~cleNrH?_=J^?wOo`bI?Ax&GSm zv9fFwQe8;=XBNC}Z^2oeVP)-*fe5=XJwmGj7%DVNV6*f{O}2oZp*%tZev@tgPbsLS zSWGmGIDg8zF;SP4(+vWp(KZ~!JZR9=bnOvp*scs`Jdb{yi8$W2xlpGd@Jx|YE(mi9 zGRkp?;J*dSSR%z&iHXW?I!8G}>*wcQ>&(lGHw`-Y|KEKJvGQk}sGjI`Q$hMh-g%ri$>V$|lJ}NCJWs9&{cvPC)_|>i%X%0;`1h1w=0( z;quQ%8ew*|1QNckfjOY3a@N;N2O#fyD_77ANU(QGccrPFD`c>+!e$$n61uFuDjkhx zuB7IVwRBk*E)VK}seH@JZo5Ji1EQzo2*tlyGZ&A~AV9t1J^k_dbTPR0WoRCnQX{*u zk!9Hda@^{3-0%rj*8t>0!=LZ~@+Vx7PT^ES3i|M4L>$@}sl?v#RXVgE(<9(W!h9Jy z$CN^R1C%{TzDW!@L&k47y-Jzoq?T6W{yU^aKp%Z5;F|gpDayn=Xf^F^T8C^ znI~Onf8FdrUd2P(05x7IpAeAKS^o$Ttr^~?v|i~PEykc`^{ZgizMNqif4t&+L|*EF3Y&A5k8QRjfws(HKc1L;a+8^%07_H5ox$w{nC53T=98 zq&*Hmh{m!+Hh0lHZeg)=t?V?a=l`4J^tF&3Nr%S|4~_J|0X^cGf{1QB$T#6@rto?O z)3S4{PEc%ZqPxvI@Dw0Je#=J|AS`OvJvqyE{ZqZXC$=0FFx2##3bGh+Bkijwu6-^x z_+(;uh5>{-IvL@R64I}SSUwSEO7NT8wPL7%B^8Tf-nKDuT?)V!cp*hjRXP3I%ofDc zeMctFeJ3BGDWQ{1lc;M?VVxwlOc8araz1O4bQ1o=jgcdbPajisP;?u7iFAe@l3#!X zaiI;FQW>38(db$X+<(_e`shf^!{)ua7xx5*cXux8-yah!CJigN+}OZ~p1PT7Gnmau zIflr-XkaOxDqwBmEyel#s>4RDdvGl!oxS}uJ0uxy>pvM@g}TF+q3-TLbeY||yL0yW zJzUmTHBw#g*_HfTf`B75B$g?tT}&$T6<&hf&Tjohv&uB)lrfX$Ud#!vj|gmBz!e=E zk-6M$CAt6N#@sH7bp1@^H{>Uy0LP0GN@gR3-u!A+uk)7#P*!mns+rFYQ{o zEJ=shD*1fSJ45OqB>55m*RzKvKBOL3-IR3E1Dl=HMcXJOSM(F5Y!E8cbk(VvgU0h% z6^^-vzFezXr!J0Wjffs!eDNb9qRTH2CEKP99M*F~B0QG40th8nG}raK0ED&hiqjkb zo6nOu3{VRWIreeN0eI#Nd%HyYml!@|?&{Xzzf8%C*`!G} zg`z5)tC=NAjsKmpq#{QS<-Niaa82gk&A;Z70ChGz*=@mJwDlG*FxDQi*SyC6aYo

Z0Pu=r>b zF#l95V16WQ2sHIJ&T5o)on<&RdM8H?rmXU1Qnpld?AFSTc>ZU#lQCReH(_Uflm8A+ z#n%gx#1@@8C=ANMRtH=`7^|F_VMe^~rqeT~qrHG0JlYOPJ}uf#Kf$FVrr8S(ya#V( z4-8Ze+kXZ#F{BzzGN@KxyQG&?o>q>WmtYl^+Z*fDKgY}EdHeoWhUDGbeEA&j3k=8d zN&>8ixE{h%Gz8!jTr1pCo{*pdQE;bp%kyv7#H>73(?mZTefV((W(HA?t+C5g<)n4h zXLZes(i^UCJoOeXVf}2TQ;$pR(NEgEFFo-ycY1Wb6hCLLzE-+Aib1K#lta)aB^%W| z6DSHSbFi$3h3CpowB_3IN)vk0Ta7u_A#g|$4$u2lea{bpgP7DK__iQlHs7_hh{4^~ z5#k3mpb{u#rZ~1yYkjM~7>uU=ibf0gkT=_Dmt=2r0zteXvu~qKRKZ=Bn2yAH~c{@f=dsa<6w93Z)5AF2(7WfVp5};=mEy;6QUU@N> z_5xss)|N4;rZzDnsQMG3tIHv5UhjuEWgH4wmhQ4X`Hcn)y?!Qw1t&&P$7jRicj-#` zcZoO?x<&x!B+qW+o(C5^BaBXb%Y_Q3VCw+fu^8y_4g@v68WsT^pX5U=nx0ay_|qo+ z)1!_FjG1NP+{H?xJEB`I}{;k?yv`6b&KWcTekdq zL{!)}p*f|LfDg=+Ic?G!hewvU0(h{|Z{=_1{QuOIWYg*Hk>fr&*C`DsHm^Z6g8e!$ z!t0r0<$>L{4u;nK`;ZthiCQo=j$rG1XC$`F;U^l*#ti*Z2lJTa^cuM<3whVp&R&_` zR=C9``y^&B7BUAd0a@Ym>KhPahFIUw=+kVovx%FJMEt9{)xa<}86sm=+Xwqk{SuQz z7kQmuJ(SPe9R$hl+}@GH`?znKL~Ezj{p)lj??w4)i3s_{9nuYe!gn4zGCJ7AY&66! zmomYGybPQ%S)_pOzW+o6nAN4p*YfA!h9u&l#Yd|qU(gMfDuj2QM zI2Oy_7w|){SNN9S#2q*yS3N|FNT$WFCKoxUOl(D=5eNwNHQ)*U3i;&;d9{Q+8S6m3 z!RUD2PAV4I@WR2Cu{ zPmK#|D<3`t@T)#R^Kx7sJUzq&LfF)9Luy zR}|h0f`8djCOy4GPn5kYN#RTSHFIMLFdUi>#t1;&;b6fGs~7$G*}a|nKeTtA;cD*j z^c+`abfS9$-WqNn-hX>6Uw7G){(0~4`%mES)c!;) zQ}RLnvhuC_?N4AxyN~}@$>{2!O^;ojFG~x>n-g5Yj|NdoktDZ?j_OV*h~ijV4ZoG& zDX8K$V2!$~gS)a@4UZzv`8j>8*4E42Lb#Uu{jV*JgInX=OGIQCt4V&XE%60OKghgb zIl$@pAmmh3Qz?1Aw!>MIw(-tbLE4wTTc7+)Lc-Vda43f{*$|MPd%+K1`RzaSJrg;1 zs-(fL4;fvqDjO~MuSHpPp}u*?rkhlU&*iQlf}%GHDCHzVT%M0%(E=jz+*MMGxM%}RFkTMJN!QV(+4YjJyP&vObebN@jQc7F z-`VX(8k<&%Xp8tSx0R_phR{-szB4tx6iil?WFofKjV4QwqJ;P(0`;Kvf1H@P>V%pk zy|@|`iYZ8QoRf&AhC%3UZov7L$Ja+nSQRCL3WLUggr@oDaw$?FL09s%q*7qKUy$rsYCSct}GjCa|u+Iuu0l z_pYk#P>2$h{a0okhD<>j;cZAh;=6=vRtykt-95W~4>!pA76kr=f*YqVq(_2KYoFpP za*lNQ+v7r(ov8U$_)WyRpG?;YudSOxB1Ffzq=l;|mDCnq&^R@YucjkJ0z(uY!tLwT zaE`2rF%J1|U*8O2yJESn zq{6HeVwumS#-W0Z+2HaaLN5Kwwglg0j78gOV`HfdAw~lTLQn~io$)8I8rc>_PSSGa zLsXBT!?^31Bl@SZ5-2Bg^+3Iz6y$me$)zaHut(wov;jmFlB3yR|6g;ZGHdziOk({_ z`Y`B(m0jvshq{4Hk<$Awy+@Dh_ox*A;(OH00ndKK+skO7=CB|go=ypScWkgJJIM4p%7n~>ZdeYzx!+mj{q5MbAQ%PG?0Q3-hgV`wS$P*wDlN1ND;e#L?s)P_P#%z);t`SEyz~lIO-ra|yiTinYF!dMbbN-?X{FF(oJQvH) zN1LC_~{P0L%Rk z#O;zV26SP;KS6FWmKjhSo23{`ik%BeAw89CxCzHYt>NYCQl}+$YI~M>i@YxqvvEVr z(*beWBKofOp^e`pcXg0gvDa%D0(nyp3@8fiX#l<$P`2UigdG38_~C`- zV<>%+FJHY(s~PkEw&KPPIH$R_GM>ZNu{cU7IoA=sr+6>6+|{sm7aRe8tuj zgF%||mG_fYXo1YUZS-S9zo@ClU4rG8e((^j%9X96N@}V>!BV_p$+RU;r9NV|%q@Y1 zzg;I{=mH@eYk!*0_w}97>8uT*08Wt96rd>ZQx?8uay{9_)+sc}v`OxX9r}X-BWB|Z znh5Sw&m@GYk7hSEP@#v!4s*EnP4S3Tj#1&=J)ECS2i@$?NEjjU>~7RVKKZjgTun!l zQI3}>v;K{Zd?A@I@D3$H6*c`8b1q{LyxlcNwhH`UT)d@fJU(MIBcj*~V8cN~Y^7sltZ4-AAHbp7+JKRaFU#Tiec zfyc2J!Yh69(WwcbmsnR%W`UT~_oL|okQ5*zh7SOU!Yz}6r+Yr1UR+)UFY;(MrgZZKBQ-}cicI9SfE0!TN zdUW2$T4I@>!oar|o&hl!i}iVyR0p~PERPg&5-q{U_=7e+aV zZx{rEJ`9lOBTz75#X8fC$@8EvRZ0Qbx#U+tH9RLf7@e32m8ygU;B#(hPHN!}0;Pgk z&HBoUh2%j_n{+mzcfqDMx_|>BQ8gt5XdUo{7+};SlZoK*!6BTd!DvpvKhPAgPfj_B9KIHcyj;-m6fh3-C8C;1^haH10mh5OCV z))aFvIHMDj>9?@~XfZegFz)>Ffd2arrxPqj4v+-K_E)&>-t&NRl6TFX9fNt(z)oR6 z_No-}uw+ovDoz#vIC%#MOfwv39;*QG+rtdp07n;Ck#qWm0 z3pu!VANS951+*|~w~-4`Pywqgj#kjpOP@Rt!Eu4pjh7k@C~#6NsMa&s)0$Qk>V^rS zQurH)`@e9?nU9WjJ1QROFR2cuf~Yq4xU7_ms>frhREMpT6KvEY+`K^WWET6LCV-G< zYC9bi_i!--T=Vvu1_td(UgKh35BTxHt8j5-yS=+FmK}61(fvTcF8M}#nnUoNpFy{L zKUtigW3bB>9wvZ)D~VsArDw<|q;A>_TCy?7;m2u4bE6Rcr$IkNQL2qFTV7MaKdkhyo> zn~53Q01Ru;cWXA!dG@#A7Xu;{`^&D?+9-CfEs$qq-fp1^9z;O<+2}$ZsKjMs>{JTn z&Ph9^IRC)~QS=-Zq*{B7rFyi3Kz zwqaVB4SUmzxyE7uSSc^;lfHt>N~AZgTkxJOeQR~JK&~~>KkG&^B^*MfSMuLc6KRr= z-%msfO8cizzX1#V=&)5w+djc94(&VTVi|>_MVWj9<=2t5Qv5dl!QMc}7e&G*c7*L_ z?Rr|3?RJ%SQLD%E5qm)&bOiE8`3NBiecYqA!6}`@g*t{^42N(e+*y8ZBq1l%gF5C9 zonZA>H zx|J+q*1D<%4_$>o1aP)yRcp)K^Je?`CChrU^_|O0}DoR%_(&m)j04f^7e5uOhIi8pWCmT$MDv4cA zXYZh>`@4FSB6*C$da<~ml0F$;ieSYd1qz5L00aLGI6y6>si+dkc@rL%)H4}D>bR91 zO`s1068znBdAEiD+(iZ z2f3ai^|SI*)b7Ilv~(pkJL%)}9_ajrUDrf8bYyb1=qhy#sR)N45HX>OuP98d)UxGo z+{k!cZ~)^BRP)6JZX9%(^^Xzl0_X)xWw~_~T=HjfRw5(XiueM3TSO72fde&_IioMp z1_*4yI_Hsx7(Xq!xZ9!>F_)L%dZlpJerx<2*<%_V|rWSYV;;GR_W|()n>e zD7u4MDrAz*Wi}j7-&ZHYEtUe1igGdzhy1SrRg7C%^D=~^turY>(ek)ruhs`Di``6{4RTAjs!P`SVZz+OOr#;`PpblYKq zMwfU0+#oI-TI|hEls;fbt!7u_nCUfArvBuLs9NIo`1tb~US2!J&~uP&H<9fcFxk zj&IRBW3$&YeD-?ypt+?!mE2OXH|ZR>vi8Zo6ctK~gEuhcwWL=)O5<7-@Nfk!ad9;4 z43K#t2cdN4Q%NJQK#F-9T{$~tKVTT03l)(C8M>~q+7)M{!H52s&8&;HV7FpU>A4u zJxG@09^SHCj6rgORQtiL3d_*s{b+`0N|7@jzWC<#v*)h>9Ca5%fmKgw43`dg?N_=K zAe@f!TH_Z38|Gq$%SV47w%$VYtu^FIIYg|og^X})fFo?H4J@x3!JM)P7b+To($>Yc zi73O{ACwdgCP?qo6L$$<K8;rRvco#nwWE!Q&?hr3=MUC#O!8LRpA1ul_K z;H(EHf1Y}M9MQ03;poYy)b=CNd)Or9Ww}9Gb3`-WPyomZv7}5*L+%O(ctlTj8AgUn z>%kL%@K~-EvC_x`4Y!Zo&;dm9@?_*=VS2z3`tH{4MeZZZFj3s3fal#MJ~B3=4t?1UInqBb&Fz6cBqt%q**w!vP1j_NB&!hM__8hlXKkW z_0)JT{Q${8>wAd+q2q+0ga*=po)^MW_T`QF_V0f8?vjLhli$H<@Sem}R4I6a4QIHx zw*OVyf6aO@X9MB5du)Ae)wT1S=NjZ#OJOB_tm@%g5h@|}X(RT~U*PyQ z9tk{1cU#{)efIPqpdt**y11r84_SNIJ?&<{{|ttiS@z|Z-G3j8*vi)qsM|U@^YvK)IWB zz-cFjfhB9f`aRa~CG{1e!3ahxxLQ!mbs73-HoZh;xZsj?ofz{RI3L(Wa24K!p%?i>hbR!R zMGH{c)6}K&yeMq9mwR9Deb?GH@-l0H+9RZa7y+0>7{m{s zvf&4AI;1|sv6<)eALyv6AX~5rJ+mR56HD-D;c|dPX9_$3=P86Jq_O)bKtUYqc-J8N zn$=O7qS%(!Q&c@T0)eZqQ845XEG0Bi^4O`f2NmMW5=N1V7N`%#&7Cg+A;Pm2hFBKC zeEn+grto-0x3!~#qn^uay$y+Fmo5_Hk8Hj_8+`@_2(Ba`NW~3z{d+6 ztQc>i<4wRzaeuSoK*A)fqK$@vtSd#ZV;00T`jMk+N42p#$e5`VR-C%rAGpq11B-OW z<5@*G{0tlB@r#$w9>fDm#xUPus0{-chL;W8fkDVr8gyaQe`yu!9bVMHq3LBHRPd z-CYcid1Qg%h)u*oABGaj*1UWj?p?c}T)lco7bk*8;~HA#*vzjy740-B`MzI{$j86{ zHj*4k+O~FW3wUJZP}fMDWpaMTwY4f)+fIAfk<92NJX*N!5|FS< zfv;4j7CWVLu>%gk|5ZmJiuzd>{i{{05z5N$T(UO_Zs#1no6A--P>0siT>nD{8442jPiHuL)qcE9uD7{buQ{itR0SEjN;r-E=R)ZQMMFF*M~#h z(mF7>bztXM2?y#)H3PtLw{0uM9QUe~Ld0Dk2(B)cj6j$$wBl$)O_!P!6U~5lz0%p( zM79cqzwo*aK0Z@?2R9sJrq^xe zU$dh=?F{P@Iprf5g4f(_Ry*06#(qo{4?y6a#_H;`gVzEo+NEG&Tg|rh*dgaoU=$y) zpD-*5h&WoDO6Ws!*=igGozEyVJUpd@cUK%DQ+=kg zLQTVi6{|o6TXlamU&eU!qc#Qww^NJM1NTn_f0?SF&&c$6tfzJ~z^7YHU) zw1Cvm&0Yh2x7r$1xg}9WU=Jb-$hzWf!|>WHK7((_H&4hb%gs6w6R{Hn?ywkoJSexcKm8^y06&sx z&})_?pr)~?c4X~0tJH!(M06^CSWcV7;xBd(Y_9kO)D33n!Q3uNjql~0G z>ik|)fT1j=@R;no5-i%u)S4(e`UruT;-t28?Es!U(wWka7I97hE$red0fAKM%K$=> zE|igov<^eEm1ckK!$Lfv9=i-lM0Op53aTrPWvNyq7IIvPk-s*vaCOGw@Kz(E6qiPC zOJd!^$`Ar70pGw<+JV{x!va`}K2yq_jblo_69H7K@ZB!@w8KE%tEfq}>Vl-WLS&Z~ zl?DoZs$VBnOt(%;j1<=0gdK&GGC+qJzE>`+)Mxcry9y!+krp?DkJaN#7Ri>3W;nma zEQ%O@=g<+aL^NC?Vc0E#*DN4FXhy-r#)V)6uOhkG6*?Cv87k8e7dtJJ{6!<|Qu7;? zqrt*BWS2HTEaRJ{tgt2BB#6ZV|Bz}fO9!IT=Z+F+HyQ7NHxf7D&dz_-gPMQAgP^Wgr$eUS5E|rqCr|Spq z%ifuho}7W`N-oI%BwpMKqY@&b1f=ooG^rFQgN0+XISL|58}xarH=Ii;$gkm%fpNV~}iyL1=vrcMBr z^f>%*f%A{G+O7X#xN1wf7>T{7re9=$cn^U1kWz37Z0!u^$2*nEbo)%JIVgPQhd!I^ z>hSg*1c<<&0U=WOP0t-85&5UbvNkIcx8?w)6|mhyD|Jo=l_j^W;x&eVad<##tUUx- zFa`Q{GyWW62Ll=KlM(uGIjr8p!ms@RJ3$h~LZSi>gd`5#N%w+g$OxCVdfSFQf!)FV zLo2TL!3LvneLy2HFO!Y=2kH0{0hTkk+V!*1MPN%1MMNU3^Eq`nIRsU={wfg*1 zOAQ~)H*3A@dp@hZb{s^KxmbnQSZFk^|M+UypBtUQhTM@yi{p30xi|J%v+Z9v?aE%|}he285;xOxap2THOR?&YxKgM9q=L*x*_ z4eyf$;)=ZNJLx7s@CvyN1~8#7243MQ&VN1%E+8cDTFUQN4bO9EWe1%|q&L^-nEl+? zKuiw~BH3o=J-%o!RA1d7zOCR;1b0eorJIsNH-j(f43?u2Jw_giO#B~Ad7>_)IXD5u zWb$2-wa0`~KHfE989`^=&`{j9p&pQVxpQT?^0VpRk;0}Q+i({r17T7dGQRPrQO@DT zCpE1latL^sIs2D75ug8;?(@H~0U)~PQ}drUC**4Ead0T#2p;7N4-<$k<#ZHxO@W|+ z*%FCZjQ{{#W!U>y)SA^k z1a1|i@f#bWIK$HJp3uH>L7{f{X5FeVNW%Uvi%2A(yMKnA7+AR(g9w8|m8v^6t;=Kfz5i`ye6kHBRXP{bBuOTwMSpErBZzJ4IF%o^L#E zp>;DU7ndP|_^BLeg^%O@MH?}vM}t0nMiDs8r<8TdH~_5^U31y!@bXk?Vxjqo=#tAl z852ukM06rarJo!e?CZwb4{4`B`T!5R{KJ*Iq9!d+@59v-2mt3e|51;N2@;N79xX=a z^fzR2yU+_S&x??3`&stro6sftbm%Ou-aFGi@jaiC=XarUnoLa<6Nh5^i_bn|9OCb@ zF$^QT<%26cQ4h2aC)3608J>gck7>oF!w+1fW`mrlbJA8(P`HuoI0UR8ioN$O6MlWT z!jXNU(REmX;^exVD!kYbw8aP%iFt#^7Z6hxGLrEqUnBwucRI~|ILtxDu@jan3uuqv z5WUx5ts{vjRLixe!dvZ8U4j)NolDIB!l+;17OxPMgyT*0Fw+< z>Vg1|7X&z|BM=+Ni&iG7LdOe&95#yaDT=G~dWO&gI}F3AV5*~ig|3hjtSVxw^gcW; z6PTv?s-T`Sdh$`RVdB)`MkDj0b|-AceP~k7CJ_QAIEvRL)_Ng>n;#4}5dpI_ZKtF6 zgv`g;@jgu$ZyZOD9iCX;G(8wg0JzgnsA>pF2LYYa_MFZ|#t3+D%e>DXo#}mn7bJ#b zIa^f0Dk1@+@K(%?*1}Q}Y$&G8ThBOuyoX)$#8!nmb@ zs>Bcm^_5H4p}2h9vxq*(b4H8sU(m(@B6~yrsn7*GWtdS;Cst(jxRZhrJaE&s;zVJH zr$DBzGuV2MArw4v`|FH$zI>=5P`aq0BW@t@s$%;@&T?hh*K#yLm<v%zXkyt$tJA()f^t$F0OPk&o$ziks^tNX8TKO*utVjb%Ih)=DZq&F{5?23No z9hy}!=ES)8g4VT||AL7gP$0PymJzR-_S9-wSW!yq-2L(T1-g_7$wi*fmiQyEOLg0{ zs5$>lofLSUWEE2HXE5Rn;Fk|$b&TP%Ti<58eIs#p@9L>#Ah}dwg;o|h0FhX2c2K7aV zAB2PpfnG-DX_-_C;+F5FP#~Wt)8HA9dyTYA`%t;cznkbKpCBrWTL3Qm4FWI0mAj1q zT~(dLtVZ+&lL~$~i?rmu6bcK~aO9RNp-8HnPfzE#7GhREuj0E}jYm=o0Mchn7R;-q zWm3s5#v{bICYKM%wY7bWB~qbsKWx>?<*{m@0&KGIkkt5gWEk!p2823EP|lQc2Ah)>gg@)KXA278RgimK(} zWFVJ!L|?RZ&;^NUfu<7Ok0;PUB`~pQ=5tNXB3z{`u32ZiKZ^#fc5Noza3o05rd?xF zxUE)po+Qn+JKCg*m2Ag+vidPxmx`I^rl+j|LYM|7>Pt!BDdj@_`HD$!PM8ALudGl#oZR9Q#+y4E`SxeG1xaErR|}##+%R%fo=O@bsk_VZT7fcJ zS?hLXm8Si?kDN;~{id{{Q6UY838vj>+Gjsz)g(;7D6Er`_X`PwYzipjjZ3&|F*tb9 z={GO|kB&r9kcq+283PhczkCIE!pE3kwNii(8QXBN2v$oYj@ZTMKhKnQs(z6I1V=E` z_ 99: + return '--:--' + return '%02d:%02d' % (eta_mins, eta_secs) + + @staticmethod + def calc_speed(start, now, bytes): + dif = now - start + if bytes == 0 or dif < 0.001: # One millisecond + return '%10s' % '---b/s' + return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif)) + + @staticmethod + def best_block_size(elapsed_time, bytes): + new_min = max(bytes / 2.0, 1.0) + new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB + if elapsed_time < 0.001: + return long(new_max) + rate = bytes / elapsed_time + if rate > new_max: + return long(new_max) + if rate < new_min: + return long(new_min) + return long(rate) + + @staticmethod + def parse_bytes(bytestr): + """Parse a string indicating a byte quantity into a long integer.""" + matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) + if matchobj is None: + return None + number = float(matchobj.group(1)) + multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) + return long(round(number * multiplier)) + + def add_info_extractor(self, ie): + """Add an InfoExtractor object to the end of the list.""" + self._ies.append(ie) + ie.set_downloader(self) + + def add_post_processor(self, pp): + """Add a PostProcessor object to the end of the chain.""" + self._pps.append(pp) + pp.set_downloader(self) + + def to_screen(self, message, skip_eol=False): + """Print message to stdout if not in quiet mode.""" + assert type(message) == type(u'') + if not self.params.get('quiet', False): + terminator = [u'\n', u''][skip_eol] + output = message + terminator + + if 'b' not in self._screen_file.mode or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr + output = output.encode(preferredencoding(), 'ignore') + self._screen_file.write(output) + self._screen_file.flush() + + def to_stderr(self, message): + """Print message to stderr.""" + print >>sys.stderr, message.encode(preferredencoding()) + + def to_cons_title(self, message): + """Set console/terminal window title to message.""" + if not self.params.get('consoletitle', False): + return + if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): + # c_wchar_p() might not be necessary if `message` is + # already of type unicode() + ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) + elif 'TERM' in os.environ: + sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding())) + + def fixed_template(self): + """Checks if the output template is fixed.""" + return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None) + + def trouble(self, message=None): + """Determine action to take when a download problem appears. + + Depending on if the downloader has been configured to ignore + download errors or not, this method may throw an exception or + not when errors are found, after printing the message. + """ + if message is not None: + self.to_stderr(message) + if not self.params.get('ignoreerrors', False): + raise DownloadError(message) + self._download_retcode = 1 + + def slow_down(self, start_time, byte_counter): + """Sleep if the download speed is over the rate limit.""" + rate_limit = self.params.get('ratelimit', None) + if rate_limit is None or byte_counter == 0: + return + now = time.time() + elapsed = now - start_time + if elapsed <= 0.0: + return + speed = float(byte_counter) / elapsed + if speed > rate_limit: + time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit) + + def temp_name(self, filename): + """Returns a temporary filename for the given filename.""" + if self.params.get('nopart', False) or filename == u'-' or \ + (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): + return filename + return filename + u'.part' + + def undo_temp_name(self, filename): + if filename.endswith(u'.part'): + return filename[:-len(u'.part')] + return filename + + def try_rename(self, old_filename, new_filename): + try: + if old_filename == new_filename: + return + os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) + except (IOError, OSError), err: + self.trouble(u'ERROR: unable to rename file') + + def try_utime(self, filename, last_modified_hdr): + """Try to set the last-modified time of the given file.""" + if last_modified_hdr is None: + return + if not os.path.isfile(encodeFilename(filename)): + return + timestr = last_modified_hdr + if timestr is None: + return + filetime = timeconvert(timestr) + if filetime is None: + return filetime + try: + os.utime(filename, (time.time(), filetime)) + except: + pass + return filetime + + def report_writedescription(self, descfn): + """ Report that the description file is being written """ + self.to_screen(u'[info] Writing video description to: ' + descfn) + + def report_writesubtitles(self, srtfn): + """ Report that the subtitles file is being written """ + self.to_screen(u'[info] Writing video subtitles to: ' + srtfn) + + def report_writeinfojson(self, infofn): + """ Report that the metadata file has been written """ + self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn) + + def report_destination(self, filename): + """Report destination filename.""" + self.to_screen(u'[download] Destination: ' + filename) + + def report_progress(self, percent_str, data_len_str, speed_str, eta_str): + """Report download progress.""" + if self.params.get('noprogress', False): + return + self.to_screen(u'\r[download] %s of %s at %s ETA %s' % + (percent_str, data_len_str, speed_str, eta_str), skip_eol=True) + self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' % + (percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip())) + + def report_resuming_byte(self, resume_len): + """Report attempt to resume at given byte.""" + self.to_screen(u'[download] Resuming download at byte %s' % resume_len) + + def report_retry(self, count, retries): + """Report retry in case of HTTP error 5xx""" + self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) + + def report_file_already_downloaded(self, file_name): + """Report file has already been fully downloaded.""" + try: + self.to_screen(u'[download] %s has already been downloaded' % file_name) + except (UnicodeEncodeError), err: + self.to_screen(u'[download] The file has already been downloaded') + + def report_unable_to_resume(self): + """Report it was impossible to resume download.""" + self.to_screen(u'[download] Unable to resume') + + def report_finish(self): + """Report download finished.""" + if self.params.get('noprogress', False): + self.to_screen(u'[download] Download completed') + else: + self.to_screen(u'') + + def increment_downloads(self): + """Increment the ordinal that assigns a number to each file.""" + self._num_downloads += 1 + + def prepare_filename(self, info_dict): + """Generate the output filename.""" + try: + template_dict = dict(info_dict) + template_dict['epoch'] = unicode(long(time.time())) + template_dict['autonumber'] = unicode('%05d' % self._num_downloads) + filename = self.params['outtmpl'] % template_dict + return filename + except (ValueError, KeyError), err: + self.trouble(u'ERROR: invalid system charset or erroneous output template') + return None + + def _match_entry(self, info_dict): + """ Returns None iff the file should be downloaded """ + + title = info_dict['title'] + matchtitle = self.params.get('matchtitle', False) + if matchtitle and not re.search(matchtitle, title, re.IGNORECASE): + return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"' + rejecttitle = self.params.get('rejecttitle', False) + if rejecttitle and re.search(rejecttitle, title, re.IGNORECASE): + return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"' + return None + + def process_info(self, info_dict): + """Process a single dictionary returned by an InfoExtractor.""" + + reason = self._match_entry(info_dict) + if reason is not None: + self.to_screen(u'[download] ' + reason) + return + + max_downloads = self.params.get('max_downloads') + if max_downloads is not None: + if self._num_downloads > int(max_downloads): + raise MaxDownloadsReached() + + filename = self.prepare_filename(info_dict) + + # Forced printings + if self.params.get('forcetitle', False): + print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace') + if self.params.get('forceurl', False): + print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace') + if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict: + print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace') + if self.params.get('forcedescription', False) and 'description' in info_dict: + print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace') + if self.params.get('forcefilename', False) and filename is not None: + print filename.encode(preferredencoding(), 'xmlcharrefreplace') + if self.params.get('forceformat', False): + print info_dict['format'].encode(preferredencoding(), 'xmlcharrefreplace') + + # Do nothing else if in simulate mode + if self.params.get('simulate', False): + return + + if filename is None: + return + + try: + dn = os.path.dirname(encodeFilename(filename)) + if dn != '' and not os.path.exists(dn): # dn is already encoded + os.makedirs(dn) + except (OSError, IOError), err: + self.trouble(u'ERROR: unable to create directory ' + unicode(err)) + return + + if self.params.get('writedescription', False): + try: + descfn = filename + u'.description' + self.report_writedescription(descfn) + descfile = open(encodeFilename(descfn), 'wb') + try: + descfile.write(info_dict['description'].encode('utf-8')) + finally: + descfile.close() + except (OSError, IOError): + self.trouble(u'ERROR: Cannot write description file ' + descfn) + return + + if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']: + # subtitles download errors are already managed as troubles in relevant IE + # that way it will silently go on when used with unsupporting IE + try: + srtfn = filename.rsplit('.', 1)[0] + u'.srt' + self.report_writesubtitles(srtfn) + srtfile = open(encodeFilename(srtfn), 'wb') + try: + srtfile.write(info_dict['subtitles'].encode('utf-8')) + finally: + srtfile.close() + except (OSError, IOError): + self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) + return + + if self.params.get('writeinfojson', False): + infofn = filename + u'.info.json' + self.report_writeinfojson(infofn) + try: + json.dump + except (NameError,AttributeError): + self.trouble(u'ERROR: No JSON encoder found. Update to Python 2.6+, setup a json module, or leave out --write-info-json.') + return + try: + infof = open(encodeFilename(infofn), 'wb') + try: + json_info_dict = dict((k,v) for k,v in info_dict.iteritems() if not k in ('urlhandle',)) + json.dump(json_info_dict, infof) + finally: + infof.close() + except (OSError, IOError): + self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn) + return + + if not self.params.get('skip_download', False): + if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)): + success = True + else: + try: + success = self._do_download(filename, info_dict) + except (OSError, IOError), err: + raise UnavailableVideoError + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self.trouble(u'ERROR: unable to download video data: %s' % str(err)) + return + except (ContentTooShortError, ), err: + self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) + return + + if success: + try: + self.post_process(filename, info_dict) + except (PostProcessingError), err: + self.trouble(u'ERROR: postprocessing: %s' % str(err)) + return + + def download(self, url_list): + """Download a given list of URLs.""" + if len(url_list) > 1 and self.fixed_template(): + raise SameFileError(self.params['outtmpl']) + + for url in url_list: + suitable_found = False + for ie in self._ies: + # Go to next InfoExtractor if not suitable + if not ie.suitable(url): + continue + + # Suitable InfoExtractor found + suitable_found = True + + # Extract information from URL and process it + ie.extract(url) + + # Suitable InfoExtractor had been found; go to next URL + break + + if not suitable_found: + self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url) + + return self._download_retcode + + def post_process(self, filename, ie_info): + """Run the postprocessing chain on the given file.""" + info = dict(ie_info) + info['filepath'] = filename + for pp in self._pps: + info = pp.run(info) + if info is None: + break + + def _download_with_rtmpdump(self, filename, url, player_url): + self.report_destination(filename) + tmpfilename = self.temp_name(filename) + + # Check for rtmpdump first + try: + subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT) + except (OSError, IOError): + self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run') + return False + + # Download using rtmpdump. rtmpdump returns exit code 2 when + # the connection was interrumpted and resuming appears to be + # possible. This is part of rtmpdump's normal usage, AFAIK. + basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename] + args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)] + if self.params.get('verbose', False): + try: + import pipes + shell_quote = lambda args: ' '.join(map(pipes.quote, args)) + except ImportError: + shell_quote = repr + self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args)) + retval = subprocess.call(args) + while retval == 2 or retval == 1: + prevsize = os.path.getsize(encodeFilename(tmpfilename)) + self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True) + time.sleep(5.0) # This seems to be needed + retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1]) + cursize = os.path.getsize(encodeFilename(tmpfilename)) + if prevsize == cursize and retval == 1: + break + # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those + if prevsize == cursize and retval == 2 and cursize > 1024: + self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.') + retval = 0 + break + if retval == 0: + self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(encodeFilename(tmpfilename))) + self.try_rename(tmpfilename, filename) + return True + else: + self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval) + return False + + def _do_download(self, filename, info_dict): + url = info_dict['url'] + player_url = info_dict.get('player_url', None) + + # Check file already present + if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False): + self.report_file_already_downloaded(filename) + return True + + # Attempt to download using rtmpdump + if url.startswith('rtmp'): + return self._download_with_rtmpdump(filename, url, player_url) + + tmpfilename = self.temp_name(filename) + stream = None + + # Do not include the Accept-Encoding header + headers = {'Youtubedl-no-compression': 'True'} + basic_request = urllib2.Request(url, None, headers) + request = urllib2.Request(url, None, headers) + + # Establish possible resume length + if os.path.isfile(encodeFilename(tmpfilename)): + resume_len = os.path.getsize(encodeFilename(tmpfilename)) + else: + resume_len = 0 + + open_mode = 'wb' + if resume_len != 0: + if self.params.get('continuedl', False): + self.report_resuming_byte(resume_len) + request.add_header('Range','bytes=%d-' % resume_len) + open_mode = 'ab' + else: + resume_len = 0 + + count = 0 + retries = self.params.get('retries', 0) + while count <= retries: + # Establish connection + try: + if count == 0 and 'urlhandle' in info_dict: + data = info_dict['urlhandle'] + data = urllib2.urlopen(request) + break + except (urllib2.HTTPError, ), err: + if (err.code < 500 or err.code >= 600) and err.code != 416: + # Unexpected HTTP error + raise + elif err.code == 416: + # Unable to resume (requested range not satisfiable) + try: + # Open the connection again without the range header + data = urllib2.urlopen(basic_request) + content_length = data.info()['Content-Length'] + except (urllib2.HTTPError, ), err: + if err.code < 500 or err.code >= 600: + raise + else: + # Examine the reported length + if (content_length is not None and + (resume_len - 100 < long(content_length) < resume_len + 100)): + # The file had already been fully downloaded. + # Explanation to the above condition: in issue #175 it was revealed that + # YouTube sometimes adds or removes a few bytes from the end of the file, + # changing the file size slightly and causing problems for some users. So + # I decided to implement a suggested change and consider the file + # completely downloaded if the file size differs less than 100 bytes from + # the one in the hard drive. + self.report_file_already_downloaded(filename) + self.try_rename(tmpfilename, filename) + return True + else: + # The length does not match, we start the download over + self.report_unable_to_resume() + open_mode = 'wb' + break + # Retry + count += 1 + if count <= retries: + self.report_retry(count, retries) + + if count > retries: + self.trouble(u'ERROR: giving up after %s retries' % retries) + return False + + data_len = data.info().get('Content-length', None) + if data_len is not None: + data_len = long(data_len) + resume_len + data_len_str = self.format_bytes(data_len) + byte_counter = 0 + resume_len + block_size = 1024 + start = time.time() + while True: + # Download and write + before = time.time() + data_block = data.read(block_size) + after = time.time() + if len(data_block) == 0: + break + byte_counter += len(data_block) + + # Open file just in time + if stream is None: + try: + (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) + assert stream is not None + filename = self.undo_temp_name(tmpfilename) + self.report_destination(filename) + except (OSError, IOError), err: + self.trouble(u'ERROR: unable to open for writing: %s' % str(err)) + return False + try: + stream.write(data_block) + except (IOError, OSError), err: + self.trouble(u'\nERROR: unable to write data: %s' % str(err)) + return False + block_size = self.best_block_size(after - before, len(data_block)) + + # Progress message + speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len) + if data_len is None: + self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA') + else: + percent_str = self.calc_percent(byte_counter, data_len) + eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) + self.report_progress(percent_str, data_len_str, speed_str, eta_str) + + # Apply rate limit + self.slow_down(start, byte_counter - resume_len) + + if stream is None: + self.trouble(u'\nERROR: Did not get any data blocks') + return False + stream.close() + self.report_finish() + if data_len is not None and byte_counter != data_len: + raise ContentTooShortError(byte_counter, long(data_len)) + self.try_rename(tmpfilename, filename) + + # Update file modification time + if self.params.get('updatetime', True): + info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None)) + + return True diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py new file mode 100644 index 000000000..c9c563599 --- /dev/null +++ b/youtube_dl/InfoExtractors.py @@ -0,0 +1,3076 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import datetime +import HTMLParser +import httplib +import netrc +import os +import re +import socket +import time +import urllib +import urllib2 +import email.utils + +try: + import cStringIO as StringIO +except ImportError: + import StringIO + +# parse_qs was moved from the cgi module to the urlparse module recently. +try: + from urlparse import parse_qs +except ImportError: + from cgi import parse_qs + +try: + import lxml.etree +except ImportError: + pass # Handled below + +try: + import xml.etree.ElementTree +except ImportError: # Python<2.5: Not officially supported, but let it slip + warnings.warn('xml.etree.ElementTree support is missing. Consider upgrading to Python >= 2.5 if you get related errors.') + +from Utils import * + + +class InfoExtractor(object): + """Information Extractor class. + + Information extractors are the classes that, given a URL, extract + information from the video (or videos) the URL refers to. This + information includes the real video URL, the video title and simplified + title, author and others. The information is stored in a dictionary + which is then passed to the FileDownloader. The FileDownloader + processes this information possibly downloading the video to the file + system, among other possible outcomes. The dictionaries must include + the following fields: + + id: Video identifier. + url: Final video URL. + uploader: Nickname of the video uploader. + title: Literal title. + stitle: Simplified title. + ext: Video filename extension. + format: Video format. + player_url: SWF Player URL (may be None). + + The following fields are optional. Their primary purpose is to allow + youtube-dl to serve as the backend for a video search function, such + as the one in youtube2mp3. They are only used when their respective + forced printing functions are called: + + thumbnail: Full URL to a video thumbnail image. + description: One-line video description. + + Subclasses of this one should re-define the _real_initialize() and + _real_extract() methods and define a _VALID_URL regexp. + Probably, they should also be added to the list of extractors. + """ + + _ready = False + _downloader = None + + def __init__(self, downloader=None): + """Constructor. Receives an optional downloader.""" + self._ready = False + self.set_downloader(downloader) + + def suitable(self, url): + """Receives a URL and returns True if suitable for this IE.""" + return re.match(self._VALID_URL, url) is not None + + def initialize(self): + """Initializes an instance (authentication, etc).""" + if not self._ready: + self._real_initialize() + self._ready = True + + def extract(self, url): + """Extracts URL information and returns it in list of dicts.""" + self.initialize() + return self._real_extract(url) + + def set_downloader(self, downloader): + """Sets the downloader for this IE.""" + self._downloader = downloader + + def _real_initialize(self): + """Real initialization process. Redefine in subclasses.""" + pass + + def _real_extract(self, url): + """Real extraction process. Redefine in subclasses.""" + pass + + +class YoutubeIE(InfoExtractor): + """Information extractor for youtube.com.""" + + _VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?!view_play_list|my_playlists|artist|playlist)(?:(?:(?:v|embed|e)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))?)?([0-9A-Za-z_-]+)(?(1).+)?$' + _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' + _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en' + _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' + _NEXT_URL_RE = r'[\?&]next_url=([^&]+)' + _NETRC_MACHINE = 'youtube' + # Listed in order of quality + _available_formats = ['38', '37', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13'] + _available_formats_prefer_free = ['38', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13'] + _video_extensions = { + '13': '3gp', + '17': 'mp4', + '18': 'mp4', + '22': 'mp4', + '37': 'mp4', + '38': 'video', # You actually don't know if this will be MOV, AVI or whatever + '43': 'webm', + '44': 'webm', + '45': 'webm', + } + _video_dimensions = { + '5': '240x400', + '6': '???', + '13': '???', + '17': '144x176', + '18': '360x640', + '22': '720x1280', + '34': '360x640', + '35': '480x854', + '37': '1080x1920', + '38': '3072x4096', + '43': '360x640', + '44': '480x854', + '45': '720x1280', + } + IE_NAME = u'youtube' + + def report_lang(self): + """Report attempt to set language.""" + self._downloader.to_screen(u'[youtube] Setting language') + + def report_login(self): + """Report attempt to log in.""" + self._downloader.to_screen(u'[youtube] Logging in') + + def report_age_confirmation(self): + """Report attempt to confirm age.""" + self._downloader.to_screen(u'[youtube] Confirming age') + + def report_video_webpage_download(self, video_id): + """Report attempt to download video webpage.""" + self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id) + + def report_video_info_webpage_download(self, video_id): + """Report attempt to download video info webpage.""" + self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id) + + def report_video_subtitles_download(self, video_id): + """Report attempt to download video info webpage.""" + self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id) + + def report_information_extraction(self, video_id): + """Report attempt to extract video information.""" + self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id) + + def report_unavailable_format(self, video_id, format): + """Report extracted video URL.""" + self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format)) + + def report_rtmp_download(self): + """Indicate the download will use the RTMP protocol.""" + self._downloader.to_screen(u'[youtube] RTMP download detected') + + def _closed_captions_xml_to_srt(self, xml_string): + srt = '' + texts = re.findall(r'([^<]+)', xml_string, re.MULTILINE) + # TODO parse xml instead of regex + for n, (start, dur_tag, dur, caption) in enumerate(texts): + if not dur: dur = '4' + start = float(start) + end = start + float(dur) + start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000) + end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000) + caption = re.sub(ur'(?u)&(.+?);', htmlentity_transform, caption) + caption = re.sub(ur'(?u)&(.+?);', htmlentity_transform, caption) # double cycle, inentional + srt += str(n) + '\n' + srt += start + ' --> ' + end + '\n' + srt += caption + '\n\n' + return srt + + def _print_formats(self, formats): + print 'Available formats:' + for x in formats: + print '%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')) + + def _real_initialize(self): + if self._downloader is None: + return + + username = None + password = None + downloader_params = self._downloader.params + + # Attempt to use provided username and password or .netrc data + if downloader_params.get('username', None) is not None: + username = downloader_params['username'] + password = downloader_params['password'] + elif downloader_params.get('usenetrc', False): + try: + info = netrc.netrc().authenticators(self._NETRC_MACHINE) + if info is not None: + username = info[0] + password = info[2] + else: + raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) + except (IOError, netrc.NetrcParseError), err: + self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err)) + return + + # Set language + request = urllib2.Request(self._LANG_URL) + try: + self.report_lang() + urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err)) + return + + # No authentication to be performed + if username is None: + return + + # Log in + login_form = { + 'current_form': 'loginForm', + 'next': '/', + 'action_login': 'Log In', + 'username': username, + 'password': password, + } + request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form)) + try: + self.report_login() + login_results = urllib2.urlopen(request).read() + if re.search(r'(?i)]* name="loginForm"', login_results) is not None: + self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password') + return + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err)) + return + + # Confirm age + age_form = { + 'next_url': '/', + 'action_confirm': 'Confirm', + } + request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form)) + try: + self.report_age_confirmation() + age_results = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err)) + return + + def _real_extract(self, url): + # Extract original video URL from URL with redirection, like age verification, using next_url parameter + mobj = re.search(self._NEXT_URL_RE, url) + if mobj: + url = 'http://www.youtube.com/' + urllib.unquote(mobj.group(1)).lstrip('/') + + # Extract video id from URL + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + video_id = mobj.group(2) + + # Get video webpage + self.report_video_webpage_download(video_id) + request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id) + try: + video_webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) + return + + # Attempt to extract SWF player URL + mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) + if mobj is not None: + player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) + else: + player_url = None + + # Get video info + self.report_video_info_webpage_download(video_id) + for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: + video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' + % (video_id, el_type)) + request = urllib2.Request(video_info_url) + try: + video_info_webpage = urllib2.urlopen(request).read() + video_info = parse_qs(video_info_webpage) + if 'token' in video_info: + break + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err)) + return + if 'token' not in video_info: + if 'reason' in video_info: + self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0].decode('utf-8')) + else: + self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason') + return + + # Start extracting information + self.report_information_extraction(video_id) + + # uploader + if 'author' not in video_info: + self._downloader.trouble(u'ERROR: unable to extract uploader nickname') + return + video_uploader = urllib.unquote_plus(video_info['author'][0]) + + # title + if 'title' not in video_info: + self._downloader.trouble(u'ERROR: unable to extract video title') + return + video_title = urllib.unquote_plus(video_info['title'][0]) + video_title = video_title.decode('utf-8') + video_title = sanitize_title(video_title) + + # simplified title + simple_title = simplify_title(video_title) + + # thumbnail image + if 'thumbnail_url' not in video_info: + self._downloader.trouble(u'WARNING: unable to extract video thumbnail') + video_thumbnail = '' + else: # don't panic if we can't find it + video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0]) + + # upload date + upload_date = u'NA' + mobj = re.search(r'id="eow-date.*?>(.*?)', video_webpage, re.DOTALL) + if mobj is not None: + upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) + format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y'] + for expression in format_expressions: + try: + upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d') + except: + pass + + # description + try: + lxml.etree + except NameError: + video_description = u'No description available.' + mobj = re.search(r'', video_webpage) + if mobj is not None: + video_description = mobj.group(1).decode('utf-8') + else: + html_parser = lxml.etree.HTMLParser(encoding='utf-8') + vwebpage_doc = lxml.etree.parse(StringIO.StringIO(video_webpage), html_parser) + video_description = u''.join(vwebpage_doc.xpath('id("eow-description")//text()')) + # TODO use another parser + + # closed captions + video_subtitles = None + if self._downloader.params.get('writesubtitles', False): + self.report_video_subtitles_download(video_id) + request = urllib2.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) + try: + srt_list = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) + else: + srt_lang_list = re.findall(r'lang_code="([\w\-]+)"', srt_list) + if srt_lang_list: + if self._downloader.params.get('subtitleslang', False): + srt_lang = self._downloader.params.get('subtitleslang') + elif 'en' in srt_lang_list: + srt_lang = 'en' + else: + srt_lang = srt_lang_list[0] + if not srt_lang in srt_lang_list: + self._downloader.trouble(u'WARNING: no closed captions found in the specified language') + else: + request = urllib2.Request('http://video.google.com/timedtext?hl=en&lang=%s&v=%s' % (srt_lang, video_id)) + try: + srt_xml = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) + else: + video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8')) + else: + self._downloader.trouble(u'WARNING: video has no closed captions') + + # token + video_token = urllib.unquote_plus(video_info['token'][0]) + + # Decide which formats to download + req_format = self._downloader.params.get('format', None) + + if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): + self.report_rtmp_download() + video_url_list = [(None, video_info['conn'][0])] + elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: + url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',') + url_data = [parse_qs(uds) for uds in url_data_strs] + url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data) + url_map = dict((ud['itag'][0], ud['url'][0]) for ud in url_data) + + format_limit = self._downloader.params.get('format_limit', None) + available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats + if format_limit is not None and format_limit in available_formats: + format_list = available_formats[available_formats.index(format_limit):] + else: + format_list = available_formats + existing_formats = [x for x in format_list if x in url_map] + if len(existing_formats) == 0: + self._downloader.trouble(u'ERROR: no known formats available for video') + return + if self._downloader.params.get('listformats', None): + self._print_formats(existing_formats) + return + if req_format is None or req_format == 'best': + video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality + elif req_format == 'worst': + video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality + elif req_format in ('-1', 'all'): + video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats + else: + # Specific formats. We pick the first in a slash-delimeted sequence. + # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. + req_formats = req_format.split('/') + video_url_list = None + for rf in req_formats: + if rf in url_map: + video_url_list = [(rf, url_map[rf])] + break + if video_url_list is None: + self._downloader.trouble(u'ERROR: requested format not available') + return + else: + self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info') + return + + for format_param, video_real_url in video_url_list: + # At this point we have a new video + self._downloader.increment_downloads() + + # Extension + video_extension = self._video_extensions.get(format_param, 'flv') + + try: + # Process video information + self._downloader.process_info({ + 'id': video_id.decode('utf-8'), + 'url': video_real_url.decode('utf-8'), + 'uploader': video_uploader.decode('utf-8'), + 'upload_date': upload_date, + 'title': video_title, + 'stitle': simple_title, + 'ext': video_extension.decode('utf-8'), + 'format': (format_param is None and u'NA' or format_param.decode('utf-8')), + 'thumbnail': video_thumbnail.decode('utf-8'), + 'description': video_description, + 'player_url': player_url, + 'subtitles': video_subtitles + }) + except UnavailableVideoError, err: + self._downloader.trouble(u'\nERROR: unable to download video') + + +class MetacafeIE(InfoExtractor): + """Information Extractor for metacafe.com.""" + + _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*' + _DISCLAIMER = 'http://www.metacafe.com/family_filter/' + _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user' + _youtube_ie = None + IE_NAME = u'metacafe' + + def __init__(self, youtube_ie, downloader=None): + InfoExtractor.__init__(self, downloader) + self._youtube_ie = youtube_ie + + def report_disclaimer(self): + """Report disclaimer retrieval.""" + self._downloader.to_screen(u'[metacafe] Retrieving disclaimer') + + def report_age_confirmation(self): + """Report attempt to confirm age.""" + self._downloader.to_screen(u'[metacafe] Confirming age') + + def report_download_webpage(self, video_id): + """Report webpage download.""" + self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id) + + def _real_initialize(self): + # Retrieve disclaimer + request = urllib2.Request(self._DISCLAIMER) + try: + self.report_disclaimer() + disclaimer = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err)) + return + + # Confirm age + disclaimer_form = { + 'filters': '0', + 'submit': "Continue - I'm over 18", + } + request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form)) + try: + self.report_age_confirmation() + disclaimer = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err)) + return + + def _real_extract(self, url): + # Extract id and simplified title from URL + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + video_id = mobj.group(1) + + # Check if video comes from YouTube + mobj2 = re.match(r'^yt-(.*)$', video_id) + if mobj2 is not None: + self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1)) + return + + # At this point we have a new video + self._downloader.increment_downloads() + + simple_title = mobj.group(2).decode('utf-8') + + # Retrieve video webpage to extract further information + request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id) + try: + self.report_download_webpage(video_id) + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err)) + return + + # Extract URL, uploader and title from webpage + self.report_extraction(video_id) + mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage) + if mobj is not None: + mediaURL = urllib.unquote(mobj.group(1)) + video_extension = mediaURL[-3:] + + # Extract gdaKey if available + mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) + if mobj is None: + video_url = mediaURL + else: + gdaKey = mobj.group(1) + video_url = '%s?__gda__=%s' % (mediaURL, gdaKey) + else: + mobj = re.search(r' name="flashvars" value="(.*?)"', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract media URL') + return + vardict = parse_qs(mobj.group(1)) + if 'mediaData' not in vardict: + self._downloader.trouble(u'ERROR: unable to extract media URL') + return + mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0]) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract media URL') + return + mediaURL = mobj.group(1).replace('\\/', '/') + video_extension = mediaURL[-3:] + video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2)) + + mobj = re.search(r'(?im)(.*) - Video', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract title') + return + video_title = mobj.group(1).decode('utf-8') + video_title = sanitize_title(video_title) + + mobj = re.search(r'(?ms)By:\s*(.+?)<', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract uploader nickname') + return + video_uploader = mobj.group(1) + + try: + # Process video information + self._downloader.process_info({ + 'id': video_id.decode('utf-8'), + 'url': video_url.decode('utf-8'), + 'uploader': video_uploader.decode('utf-8'), + 'upload_date': u'NA', + 'title': video_title, + 'stitle': simple_title, + 'ext': video_extension.decode('utf-8'), + 'format': u'NA', + 'player_url': None, + }) + except UnavailableVideoError: + self._downloader.trouble(u'\nERROR: unable to download video') + + +class DailymotionIE(InfoExtractor): + """Information Extractor for Dailymotion""" + + _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^_/]+)_([^/]+)' + IE_NAME = u'dailymotion' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_download_webpage(self, video_id): + """Report webpage download.""" + self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id) + + def _real_extract(self, url): + # Extract id and simplified title from URL + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + # At this point we have a new video + self._downloader.increment_downloads() + video_id = mobj.group(1) + + video_extension = 'flv' + + # Retrieve video webpage to extract further information + request = urllib2.Request(url) + request.add_header('Cookie', 'family_filter=off') + try: + self.report_download_webpage(video_id) + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err)) + return + + # Extract URL, uploader and title from webpage + self.report_extraction(video_id) + mobj = re.search(r'(?i)addVariable\(\"sequence\"\s*,\s*\"([^\"]+?)\"\)', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract media URL') + return + sequence = urllib.unquote(mobj.group(1)) + mobj = re.search(r',\"sdURL\"\:\"([^\"]+?)\",', sequence) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract media URL') + return + mediaURL = urllib.unquote(mobj.group(1)).replace('\\', '') + + # if needed add http://www.dailymotion.com/ if relative URL + + video_url = mediaURL + + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract title') + return + video_title = unescapeHTML(mobj.group('title').decode('utf-8')) + video_title = sanitize_title(video_title) + simple_title = simplify_title(video_title) + + mobj = re.search(r'(?im)[^<]+?]+?>([^<]+?)', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract uploader nickname') + return + video_uploader = mobj.group(1) + + try: + # Process video information + self._downloader.process_info({ + 'id': video_id.decode('utf-8'), + 'url': video_url.decode('utf-8'), + 'uploader': video_uploader.decode('utf-8'), + 'upload_date': u'NA', + 'title': video_title, + 'stitle': simple_title, + 'ext': video_extension.decode('utf-8'), + 'format': u'NA', + 'player_url': None, + }) + except UnavailableVideoError: + self._downloader.trouble(u'\nERROR: unable to download video') + + +class GoogleIE(InfoExtractor): + """Information extractor for video.google.com.""" + + _VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*' + IE_NAME = u'video.google' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_download_webpage(self, video_id): + """Report webpage download.""" + self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id) + + def _real_extract(self, url): + # Extract id from URL + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + return + + # At this point we have a new video + self._downloader.increment_downloads() + video_id = mobj.group(1) + + video_extension = 'mp4' + + # Retrieve video webpage to extract further information + request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id) + try: + self.report_download_webpage(video_id) + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) + return + + # Extract URL, uploader, and title from webpage + self.report_extraction(video_id) + mobj = re.search(r"download_url:'([^']+)'", webpage) + if mobj is None: + video_extension = 'flv' + mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract media URL') + return + mediaURL = urllib.unquote(mobj.group(1)) + mediaURL = mediaURL.replace('\\x3d', '\x3d') + mediaURL = mediaURL.replace('\\x26', '\x26') + + video_url = mediaURL + + mobj = re.search(r'(.*)', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract title') + return + video_title = mobj.group(1).decode('utf-8') + video_title = sanitize_title(video_title) + simple_title = simplify_title(video_title) + + # Extract video description + mobj = re.search(r'([^<]*)', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video description') + return + video_description = mobj.group(1).decode('utf-8') + if not video_description: + video_description = 'No description available.' + + # Extract video thumbnail + if self._downloader.params.get('forcethumbnail', False): + request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id))) + try: + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) + return + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video thumbnail') + return + video_thumbnail = mobj.group(1) + else: # we need something to pass to process_info + video_thumbnail = '' + + try: + # Process video information + self._downloader.process_info({ + 'id': video_id.decode('utf-8'), + 'url': video_url.decode('utf-8'), + 'uploader': u'NA', + 'upload_date': u'NA', + 'title': video_title, + 'stitle': simple_title, + 'ext': video_extension.decode('utf-8'), + 'format': u'NA', + 'player_url': None, + }) + except UnavailableVideoError: + self._downloader.trouble(u'\nERROR: unable to download video') + + +class PhotobucketIE(InfoExtractor): + """Information extractor for photobucket.com.""" + + _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)' + IE_NAME = u'photobucket' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_download_webpage(self, video_id): + """Report webpage download.""" + self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id) + + def _real_extract(self, url): + # Extract id from URL + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + return + + # At this point we have a new video + self._downloader.increment_downloads() + video_id = mobj.group(1) + + video_extension = 'flv' + + # Retrieve video webpage to extract further information + request = urllib2.Request(url) + try: + self.report_download_webpage(video_id) + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) + return + + # Extract URL, uploader, and title from webpage + self.report_extraction(video_id) + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract media URL') + return + mediaURL = urllib.unquote(mobj.group(1)) + + video_url = mediaURL + + mobj = re.search(r'(.*) video by (.*) - Photobucket', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract title') + return + video_title = mobj.group(1).decode('utf-8') + video_title = sanitize_title(video_title) + simple_title = simplify_title(video_title) + + video_uploader = mobj.group(2).decode('utf-8') + + try: + # Process video information + self._downloader.process_info({ + 'id': video_id.decode('utf-8'), + 'url': video_url.decode('utf-8'), + 'uploader': video_uploader, + 'upload_date': u'NA', + 'title': video_title, + 'stitle': simple_title, + 'ext': video_extension.decode('utf-8'), + 'format': u'NA', + 'player_url': None, + }) + except UnavailableVideoError: + self._downloader.trouble(u'\nERROR: unable to download video') + + +class YahooIE(InfoExtractor): + """Information extractor for video.yahoo.com.""" + + # _VALID_URL matches all Yahoo! Video URLs + # _VPAGE_URL matches only the extractable '/watch/' URLs + _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?' + _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?' + IE_NAME = u'video.yahoo' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_download_webpage(self, video_id): + """Report webpage download.""" + self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id) + + def _real_extract(self, url, new_video=True): + # Extract ID from URL + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + return + + # At this point we have a new video + self._downloader.increment_downloads() + video_id = mobj.group(2) + video_extension = 'flv' + + # Rewrite valid but non-extractable URLs as + # extractable English language /watch/ URLs + if re.match(self._VPAGE_URL, url) is None: + request = urllib2.Request(url) + try: + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) + return + + mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: Unable to extract id field') + return + yahoo_id = mobj.group(1) + + mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: Unable to extract vid field') + return + yahoo_vid = mobj.group(1) + + url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id) + return self._real_extract(url, new_video=False) + + # Retrieve video webpage to extract further information + request = urllib2.Request(url) + try: + self.report_download_webpage(video_id) + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) + return + + # Extract uploader and title from webpage + self.report_extraction(video_id) + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video title') + return + video_title = mobj.group(1).decode('utf-8') + simple_title = simplify_title(video_title) + + mobj = re.search(r'

(.*)

', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video uploader') + return + video_uploader = mobj.group(1).decode('utf-8') + + # Extract video thumbnail + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video thumbnail') + return + video_thumbnail = mobj.group(1).decode('utf-8') + + # Extract video description + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video description') + return + video_description = mobj.group(1).decode('utf-8') + if not video_description: + video_description = 'No description available.' + + # Extract video height and width + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video height') + return + yv_video_height = mobj.group(1) + + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video width') + return + yv_video_width = mobj.group(1) + + # Retrieve video playlist to extract media URL + # I'm not completely sure what all these options are, but we + # seem to need most of them, otherwise the server sends a 401. + yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents + yv_bitrate = '700' # according to Wikipedia this is hard-coded + request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id + + '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height + + '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797') + try: + self.report_download_webpage(video_id) + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) + return + + # Extract media URL from playlist XML + mobj = re.search(r'', webpage, re.MULTILINE) + if mobj is not None: + video_description = mobj.group(1) + else: + html_parser = lxml.etree.HTMLParser() + vwebpage_doc = lxml.etree.parse(StringIO.StringIO(webpage), html_parser) + video_description = u''.join(vwebpage_doc.xpath('id("description")//text()')).strip() + # TODO use another parser + + # Extract upload date + video_upload_date = u'NA' + mobj = re.search(r'', webpage) + if mobj is not None: + video_upload_date = mobj.group(1) + + # Vimeo specific: extract request signature and timestamp + sig = config['request']['signature'] + timestamp = config['request']['timestamp'] + + # Vimeo specific: extract video codec and quality information + # TODO bind to format param + codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')] + for codec in codecs: + if codec[0] in config["video"]["files"]: + video_codec = codec[0] + video_extension = codec[1] + if 'hd' in config["video"]["files"][codec[0]]: quality = 'hd' + else: quality = 'sd' + break + else: + self._downloader.trouble(u'ERROR: no known codec found') + return + + video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ + %(video_id, sig, timestamp, quality, video_codec.upper()) + + try: + # Process video information + self._downloader.process_info({ + 'id': video_id, + 'url': video_url, + 'uploader': video_uploader, + 'upload_date': video_upload_date, + 'title': video_title, + 'stitle': simple_title, + 'ext': video_extension, + 'thumbnail': video_thumbnail, + 'description': video_description, + 'player_url': None, + }) + except UnavailableVideoError: + self._downloader.trouble(u'ERROR: unable to download video') + + +class GenericIE(InfoExtractor): + """Generic last-resort information extractor.""" + + _VALID_URL = r'.*' + IE_NAME = u'generic' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_download_webpage(self, video_id): + """Report webpage download.""" + self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.') + self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id) + + def report_following_redirect(self, new_url): + """Report information extraction.""" + self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url) + + def _test_redirect(self, url): + """Check if it is a redirect, like url shorteners, in case restart chain.""" + class HeadRequest(urllib2.Request): + def get_method(self): + return "HEAD" + + class HEADRedirectHandler(urllib2.HTTPRedirectHandler): + """ + Subclass the HTTPRedirectHandler to make it use our + HeadRequest also on the redirected URL + """ + def redirect_request(self, req, fp, code, msg, headers, newurl): + if code in (301, 302, 303, 307): + newurl = newurl.replace(' ', '%20') + newheaders = dict((k,v) for k,v in req.headers.items() + if k.lower() not in ("content-length", "content-type")) + return HeadRequest(newurl, + headers=newheaders, + origin_req_host=req.get_origin_req_host(), + unverifiable=True) + else: + raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp) + + class HTTPMethodFallback(urllib2.BaseHandler): + """ + Fallback to GET if HEAD is not allowed (405 HTTP error) + """ + def http_error_405(self, req, fp, code, msg, headers): + fp.read() + fp.close() + + newheaders = dict((k,v) for k,v in req.headers.items() + if k.lower() not in ("content-length", "content-type")) + return self.parent.open(urllib2.Request(req.get_full_url(), + headers=newheaders, + origin_req_host=req.get_origin_req_host(), + unverifiable=True)) + + # Build our opener + opener = urllib2.OpenerDirector() + for handler in [urllib2.HTTPHandler, urllib2.HTTPDefaultErrorHandler, + HTTPMethodFallback, HEADRedirectHandler, + urllib2.HTTPErrorProcessor, urllib2.HTTPSHandler]: + opener.add_handler(handler()) + + response = opener.open(HeadRequest(url)) + new_url = response.geturl() + + if url == new_url: return False + + self.report_following_redirect(new_url) + self._downloader.download([new_url]) + return True + + def _real_extract(self, url): + if self._test_redirect(url): return + + # At this point we have a new video + self._downloader.increment_downloads() + + video_id = url.split('/')[-1] + request = urllib2.Request(url) + try: + self.report_download_webpage(video_id) + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) + return + except ValueError, err: + # since this is the last-resort InfoExtractor, if + # this error is thrown, it'll be thrown here + self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + return + + self.report_extraction(video_id) + # Start with something easy: JW Player in SWFObject + mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) + if mobj is None: + # Broaden the search a little bit + mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + return + + # It's possible that one of the regexes + # matched, but returned an empty group: + if mobj.group(1) is None: + self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + return + + video_url = urllib.unquote(mobj.group(1)) + video_id = os.path.basename(video_url) + + # here's a fun little line of code for you: + video_extension = os.path.splitext(video_id)[1][1:] + video_id = os.path.splitext(video_id)[0] + + # it's tempting to parse this further, but you would + # have to take into account all the variations like + # Video Title - Site Name + # Site Name | Video Title + # Video Title - Tagline | Site Name + # and so on and so forth; it's just not practical + mobj = re.search(r'(.*)', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract title') + return + video_title = mobj.group(1).decode('utf-8') + video_title = sanitize_title(video_title) + simple_title = simplify_title(video_title) + + # video uploader is domain name + mobj = re.match(r'(?:https?://)?([^/]*)/.*', url) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract title') + return + video_uploader = mobj.group(1).decode('utf-8') + + try: + # Process video information + self._downloader.process_info({ + 'id': video_id.decode('utf-8'), + 'url': video_url.decode('utf-8'), + 'uploader': video_uploader, + 'upload_date': u'NA', + 'title': video_title, + 'stitle': simple_title, + 'ext': video_extension.decode('utf-8'), + 'format': u'NA', + 'player_url': None, + }) + except UnavailableVideoError, err: + self._downloader.trouble(u'\nERROR: unable to download video') + + +class YoutubeSearchIE(InfoExtractor): + """Information Extractor for YouTube search queries.""" + _VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+' + _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc' + _youtube_ie = None + _max_youtube_results = 1000 + IE_NAME = u'youtube:search' + + def __init__(self, youtube_ie, downloader=None): + InfoExtractor.__init__(self, downloader) + self._youtube_ie = youtube_ie + + def report_download_page(self, query, pagenum): + """Report attempt to download playlist page with given number.""" + query = query.decode(preferredencoding()) + self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum)) + + def _real_initialize(self): + self._youtube_ie.initialize() + + def _real_extract(self, query): + mobj = re.match(self._VALID_URL, query) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) + return + + prefix, query = query.split(':') + prefix = prefix[8:] + query = query.encode('utf-8') + if prefix == '': + self._download_n_results(query, 1) + return + elif prefix == 'all': + self._download_n_results(query, self._max_youtube_results) + return + else: + try: + n = long(prefix) + if n <= 0: + self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) + return + elif n > self._max_youtube_results: + self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n)) + n = self._max_youtube_results + self._download_n_results(query, n) + return + except ValueError: # parsing prefix as integer fails + self._download_n_results(query, 1) + return + + def _download_n_results(self, query, n): + """Downloads a specified number of results for a query""" + + video_ids = [] + pagenum = 0 + limit = n + + while (50 * pagenum) < limit: + self.report_download_page(query, pagenum+1) + result_url = self._API_URL % (urllib.quote_plus(query), (50*pagenum)+1) + request = urllib2.Request(result_url) + try: + data = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download API page: %s' % str(err)) + return + api_response = json.loads(data)['data'] + + new_ids = list(video['id'] for video in api_response['items']) + video_ids += new_ids + + limit = min(n, api_response['totalItems']) + pagenum += 1 + + if len(video_ids) > n: + video_ids = video_ids[:n] + for id in video_ids: + self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id) + return + + +class GoogleSearchIE(InfoExtractor): + """Information Extractor for Google Video search queries.""" + _VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+' + _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en' + _VIDEO_INDICATOR = r' self._max_google_results: + self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n)) + n = self._max_google_results + self._download_n_results(query, n) + return + except ValueError: # parsing prefix as integer fails + self._download_n_results(query, 1) + return + + def _download_n_results(self, query, n): + """Downloads a specified number of results for a query""" + + video_ids = [] + pagenum = 0 + + while True: + self.report_download_page(query, pagenum) + result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum*10) + request = urllib2.Request(result_url) + try: + page = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) + return + + # Extract video identifiers + for mobj in re.finditer(self._VIDEO_INDICATOR, page): + video_id = mobj.group(1) + if video_id not in video_ids: + video_ids.append(video_id) + if len(video_ids) == n: + # Specified n videos reached + for id in video_ids: + self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id) + return + + if re.search(self._MORE_PAGES_INDICATOR, page) is None: + for id in video_ids: + self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id) + return + + pagenum = pagenum + 1 + + +class YahooSearchIE(InfoExtractor): + """Information Extractor for Yahoo! Video search queries.""" + _VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+' + _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s' + _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"' + _MORE_PAGES_INDICATOR = r'\s*Next' + _yahoo_ie = None + _max_yahoo_results = 1000 + IE_NAME = u'video.yahoo:search' + + def __init__(self, yahoo_ie, downloader=None): + InfoExtractor.__init__(self, downloader) + self._yahoo_ie = yahoo_ie + + def report_download_page(self, query, pagenum): + """Report attempt to download playlist page with given number.""" + query = query.decode(preferredencoding()) + self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum)) + + def _real_initialize(self): + self._yahoo_ie.initialize() + + def _real_extract(self, query): + mobj = re.match(self._VALID_URL, query) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) + return + + prefix, query = query.split(':') + prefix = prefix[8:] + query = query.encode('utf-8') + if prefix == '': + self._download_n_results(query, 1) + return + elif prefix == 'all': + self._download_n_results(query, self._max_yahoo_results) + return + else: + try: + n = long(prefix) + if n <= 0: + self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) + return + elif n > self._max_yahoo_results: + self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n)) + n = self._max_yahoo_results + self._download_n_results(query, n) + return + except ValueError: # parsing prefix as integer fails + self._download_n_results(query, 1) + return + + def _download_n_results(self, query, n): + """Downloads a specified number of results for a query""" + + video_ids = [] + already_seen = set() + pagenum = 1 + + while True: + self.report_download_page(query, pagenum) + result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum) + request = urllib2.Request(result_url) + try: + page = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) + return + + # Extract video identifiers + for mobj in re.finditer(self._VIDEO_INDICATOR, page): + video_id = mobj.group(1) + if video_id not in already_seen: + video_ids.append(video_id) + already_seen.add(video_id) + if len(video_ids) == n: + # Specified n videos reached + for id in video_ids: + self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id) + return + + if re.search(self._MORE_PAGES_INDICATOR, page) is None: + for id in video_ids: + self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id) + return + + pagenum = pagenum + 1 + + +class YoutubePlaylistIE(InfoExtractor): + """Information Extractor for YouTube playlists.""" + + _VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*' + _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' + _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&list=PL%s&' + _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*' + _youtube_ie = None + IE_NAME = u'youtube:playlist' + + def __init__(self, youtube_ie, downloader=None): + InfoExtractor.__init__(self, downloader) + self._youtube_ie = youtube_ie + + def report_download_page(self, playlist_id, pagenum): + """Report attempt to download playlist page with given number.""" + self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum)) + + def _real_initialize(self): + self._youtube_ie.initialize() + + def _real_extract(self, url): + # Extract playlist id + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid url: %s' % url) + return + + # Single video case + if mobj.group(3) is not None: + self._youtube_ie.extract(mobj.group(3)) + return + + # Download playlist pages + # prefix is 'p' as default for playlists but there are other types that need extra care + playlist_prefix = mobj.group(1) + if playlist_prefix == 'a': + playlist_access = 'artist' + else: + playlist_prefix = 'p' + playlist_access = 'view_play_list' + playlist_id = mobj.group(2) + video_ids = [] + pagenum = 1 + + while True: + self.report_download_page(playlist_id, pagenum) + url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum) + request = urllib2.Request(url) + try: + page = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) + return + + # Extract video identifiers + ids_in_page = [] + for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page): + if mobj.group(1) not in ids_in_page: + ids_in_page.append(mobj.group(1)) + video_ids.extend(ids_in_page) + + if re.search(self._MORE_PAGES_INDICATOR, page) is None: + break + pagenum = pagenum + 1 + + playliststart = self._downloader.params.get('playliststart', 1) - 1 + playlistend = self._downloader.params.get('playlistend', -1) + if playlistend == -1: + video_ids = video_ids[playliststart:] + else: + video_ids = video_ids[playliststart:playlistend] + + for id in video_ids: + self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id) + return + + +class YoutubeUserIE(InfoExtractor): + """Information Extractor for YouTube users.""" + + _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)' + _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s' + _GDATA_PAGE_SIZE = 50 + _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d' + _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]' + _youtube_ie = None + IE_NAME = u'youtube:user' + + def __init__(self, youtube_ie, downloader=None): + InfoExtractor.__init__(self, downloader) + self._youtube_ie = youtube_ie + + def report_download_page(self, username, start_index): + """Report attempt to download user page.""" + self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' % + (username, start_index, start_index + self._GDATA_PAGE_SIZE)) + + def _real_initialize(self): + self._youtube_ie.initialize() + + def _real_extract(self, url): + # Extract username + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid url: %s' % url) + return + + username = mobj.group(1) + + # Download video ids using YouTube Data API. Result size per + # query is limited (currently to 50 videos) so we need to query + # page by page until there are no video ids - it means we got + # all of them. + + video_ids = [] + pagenum = 0 + + while True: + start_index = pagenum * self._GDATA_PAGE_SIZE + 1 + self.report_download_page(username, start_index) + + request = urllib2.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)) + + try: + page = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) + return + + # Extract video identifiers + ids_in_page = [] + + for mobj in re.finditer(self._VIDEO_INDICATOR, page): + if mobj.group(1) not in ids_in_page: + ids_in_page.append(mobj.group(1)) + + video_ids.extend(ids_in_page) + + # A little optimization - if current page is not + # "full", ie. does not contain PAGE_SIZE video ids then + # we can assume that this page is the last one - there + # are no more ids on further pages - no need to query + # again. + + if len(ids_in_page) < self._GDATA_PAGE_SIZE: + break + + pagenum += 1 + + all_ids_count = len(video_ids) + playliststart = self._downloader.params.get('playliststart', 1) - 1 + playlistend = self._downloader.params.get('playlistend', -1) + + if playlistend == -1: + video_ids = video_ids[playliststart:] + else: + video_ids = video_ids[playliststart:playlistend] + + self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" % + (username, all_ids_count, len(video_ids))) + + for video_id in video_ids: + self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id) + + +class DepositFilesIE(InfoExtractor): + """Information extractor for depositfiles.com""" + + _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)' + IE_NAME = u'DepositFiles' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_download_webpage(self, file_id): + """Report webpage download.""" + self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id) + + def report_extraction(self, file_id): + """Report information extraction.""" + self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id) + + def _real_extract(self, url): + # At this point we have a new file + self._downloader.increment_downloads() + + file_id = url.split('/')[-1] + # Rebuild url in english locale + url = 'http://depositfiles.com/en/files/' + file_id + + # Retrieve file webpage with 'Free download' button pressed + free_download_indication = { 'gateway_result' : '1' } + request = urllib2.Request(url, urllib.urlencode(free_download_indication)) + try: + self.report_download_webpage(file_id) + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % str(err)) + return + + # Search for the real file URL + mobj = re.search(r'
(Attention.*?)', webpage, re.DOTALL) + if (mobj is not None) and (mobj.group(1) is not None): + restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip() + self._downloader.trouble(u'ERROR: %s' % restriction_message) + else: + self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url) + return + + file_url = mobj.group(1) + file_extension = os.path.splitext(file_url)[1][1:] + + # Search for file title + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract title') + return + file_title = mobj.group(1).decode('utf-8') + + try: + # Process file information + self._downloader.process_info({ + 'id': file_id.decode('utf-8'), + 'url': file_url.decode('utf-8'), + 'uploader': u'NA', + 'upload_date': u'NA', + 'title': file_title, + 'stitle': file_title, + 'ext': file_extension.decode('utf-8'), + 'format': u'NA', + 'player_url': None, + }) + except UnavailableVideoError, err: + self._downloader.trouble(u'ERROR: unable to download file') + + +class FacebookIE(InfoExtractor): + """Information Extractor for Facebook""" + + _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P\d+)(?:.*)' + _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&' + _NETRC_MACHINE = 'facebook' + _available_formats = ['video', 'highqual', 'lowqual'] + _video_extensions = { + 'video': 'mp4', + 'highqual': 'mp4', + 'lowqual': 'mp4', + } + IE_NAME = u'facebook' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def _reporter(self, message): + """Add header and report message.""" + self._downloader.to_screen(u'[facebook] %s' % message) + + def report_login(self): + """Report attempt to log in.""" + self._reporter(u'Logging in') + + def report_video_webpage_download(self, video_id): + """Report attempt to download video webpage.""" + self._reporter(u'%s: Downloading video webpage' % video_id) + + def report_information_extraction(self, video_id): + """Report attempt to extract video information.""" + self._reporter(u'%s: Extracting video information' % video_id) + + def _parse_page(self, video_webpage): + """Extract video information from page""" + # General data + data = {'title': r'\("video_title", "(.*?)"\)', + 'description': r'
(.*?)
', + 'owner': r'\("video_owner_name", "(.*?)"\)', + 'thumbnail': r'\("thumb_url", "(?P.*?)"\)', + } + video_info = {} + for piece in data.keys(): + mobj = re.search(data[piece], video_webpage) + if mobj is not None: + video_info[piece] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape")) + + # Video urls + video_urls = {} + for fmt in self._available_formats: + mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage) + if mobj is not None: + # URL is in a Javascript segment inside an escaped Unicode format within + # the generally utf-8 page + video_urls[fmt] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape")) + video_info['video_urls'] = video_urls + + return video_info + + def _real_initialize(self): + if self._downloader is None: + return + + useremail = None + password = None + downloader_params = self._downloader.params + + # Attempt to use provided username and password or .netrc data + if downloader_params.get('username', None) is not None: + useremail = downloader_params['username'] + password = downloader_params['password'] + elif downloader_params.get('usenetrc', False): + try: + info = netrc.netrc().authenticators(self._NETRC_MACHINE) + if info is not None: + useremail = info[0] + password = info[2] + else: + raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) + except (IOError, netrc.NetrcParseError), err: + self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err)) + return + + if useremail is None: + return + + # Log in + login_form = { + 'email': useremail, + 'pass': password, + 'login': 'Log+In' + } + request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form)) + try: + self.report_login() + login_results = urllib2.urlopen(request).read() + if re.search(r'', login_results) is not None: + self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') + return + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err)) + return + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + video_id = mobj.group('ID') + + # Get video webpage + self.report_video_webpage_download(video_id) + request = urllib2.Request('https://www.facebook.com/video/video.php?v=%s' % video_id) + try: + page = urllib2.urlopen(request) + video_webpage = page.read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) + return + + # Start extracting information + self.report_information_extraction(video_id) + + # Extract information + video_info = self._parse_page(video_webpage) + + # uploader + if 'owner' not in video_info: + self._downloader.trouble(u'ERROR: unable to extract uploader nickname') + return + video_uploader = video_info['owner'] + + # title + if 'title' not in video_info: + self._downloader.trouble(u'ERROR: unable to extract video title') + return + video_title = video_info['title'] + video_title = video_title.decode('utf-8') + video_title = sanitize_title(video_title) + + simple_title = simplify_title(video_title) + + # thumbnail image + if 'thumbnail' not in video_info: + self._downloader.trouble(u'WARNING: unable to extract video thumbnail') + video_thumbnail = '' + else: + video_thumbnail = video_info['thumbnail'] + + # upload date + upload_date = u'NA' + if 'upload_date' in video_info: + upload_time = video_info['upload_date'] + timetuple = email.utils.parsedate_tz(upload_time) + if timetuple is not None: + try: + upload_date = time.strftime('%Y%m%d', timetuple[0:9]) + except: + pass + + # description + video_description = video_info.get('description', 'No description available.') + + url_map = video_info['video_urls'] + if len(url_map.keys()) > 0: + # Decide which formats to download + req_format = self._downloader.params.get('format', None) + format_limit = self._downloader.params.get('format_limit', None) + + if format_limit is not None and format_limit in self._available_formats: + format_list = self._available_formats[self._available_formats.index(format_limit):] + else: + format_list = self._available_formats + existing_formats = [x for x in format_list if x in url_map] + if len(existing_formats) == 0: + self._downloader.trouble(u'ERROR: no known formats available for video') + return + if req_format is None: + video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality + elif req_format == 'worst': + video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality + elif req_format == '-1': + video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats + else: + # Specific format + if req_format not in url_map: + self._downloader.trouble(u'ERROR: requested format not available') + return + video_url_list = [(req_format, url_map[req_format])] # Specific format + + for format_param, video_real_url in video_url_list: + + # At this point we have a new video + self._downloader.increment_downloads() + + # Extension + video_extension = self._video_extensions.get(format_param, 'mp4') + + try: + # Process video information + self._downloader.process_info({ + 'id': video_id.decode('utf-8'), + 'url': video_real_url.decode('utf-8'), + 'uploader': video_uploader.decode('utf-8'), + 'upload_date': upload_date, + 'title': video_title, + 'stitle': simple_title, + 'ext': video_extension.decode('utf-8'), + 'format': (format_param is None and u'NA' or format_param.decode('utf-8')), + 'thumbnail': video_thumbnail.decode('utf-8'), + 'description': video_description.decode('utf-8'), + 'player_url': None, + }) + except UnavailableVideoError, err: + self._downloader.trouble(u'\nERROR: unable to download video') + +class BlipTVIE(InfoExtractor): + """Information extractor for blip.tv""" + + _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$' + _URL_EXT = r'^.*\.([a-z0-9]+)$' + IE_NAME = u'blip.tv' + + def report_extraction(self, file_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) + + def report_direct_download(self, title): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title)) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + if '?' in url: + cchar = '&' + else: + cchar = '?' + json_url = url + cchar + 'skin=json&version=2&no_wrap=1' + request = urllib2.Request(json_url) + self.report_extraction(mobj.group(1)) + info = None + try: + urlh = urllib2.urlopen(request) + if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download + basename = url.split('/')[-1] + title,ext = os.path.splitext(basename) + title = title.decode('UTF-8') + ext = ext.replace('.', '') + self.report_direct_download(title) + info = { + 'id': title, + 'url': url, + 'title': title, + 'stitle': simplify_title(title), + 'ext': ext, + 'urlhandle': urlh + } + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err)) + return + if info is None: # Regular URL + try: + json_code = urlh.read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % str(err)) + return + + try: + json_data = json.loads(json_code) + if 'Post' in json_data: + data = json_data['Post'] + else: + data = json_data + + upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') + video_url = data['media']['url'] + umobj = re.match(self._URL_EXT, video_url) + if umobj is None: + raise ValueError('Can not determine filename extension') + ext = umobj.group(1) + + info = { + 'id': data['item_id'], + 'url': video_url, + 'uploader': data['display_name'], + 'upload_date': upload_date, + 'title': data['title'], + 'stitle': simplify_title(data['title']), + 'ext': ext, + 'format': data['media']['mimeType'], + 'thumbnail': data['thumbnailUrl'], + 'description': data['description'], + 'player_url': data['embedUrl'] + } + except (ValueError,KeyError), err: + self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err)) + return + + self._downloader.increment_downloads() + + try: + self._downloader.process_info(info) + except UnavailableVideoError, err: + self._downloader.trouble(u'\nERROR: unable to download video') + + +class MyVideoIE(InfoExtractor): + """Information Extractor for myvideo.de.""" + + _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*' + IE_NAME = u'myvideo' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_download_webpage(self, video_id): + """Report webpage download.""" + self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id) + + def _real_extract(self,url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._download.trouble(u'ERROR: invalid URL: %s' % url) + return + + video_id = mobj.group(1) + + # Get video webpage + request = urllib2.Request('http://www.myvideo.de/watch/%s' % video_id) + try: + self.report_download_webpage(video_id) + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) + return + + self.report_extraction(video_id) + mobj = re.search(r'', + webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract media URL') + return + video_url = mobj.group(1) + ('/%s.flv' % video_id) + + mobj = re.search('([^<]+)', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract title') + return + + video_title = mobj.group(1) + video_title = sanitize_title(video_title) + + simple_title = simplify_title(video_title) + + try: + self._downloader.process_info({ + 'id': video_id, + 'url': video_url, + 'uploader': u'NA', + 'upload_date': u'NA', + 'title': video_title, + 'stitle': simple_title, + 'ext': u'flv', + 'format': u'NA', + 'player_url': None, + }) + except UnavailableVideoError: + self._downloader.trouble(u'\nERROR: Unable to download video') + +class ComedyCentralIE(InfoExtractor): + """Information extractor for The Daily Show and Colbert Report """ + + _VALID_URL = r'^(:(?Ptds|thedailyshow|cr|colbert|colbertnation|colbertreport))|(https?://)?(www\.)?(?Pthedailyshow|colbertnation)\.com/full-episodes/(?P.*)$' + IE_NAME = u'comedycentral' + + def report_extraction(self, episode_id): + self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id) + + def report_config_download(self, episode_id): + self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id) + + def report_index_download(self, episode_id): + self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id) + + def report_player_url(self, episode_id): + self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + if mobj.group('shortname'): + if mobj.group('shortname') in ('tds', 'thedailyshow'): + url = u'http://www.thedailyshow.com/full-episodes/' + else: + url = u'http://www.colbertnation.com/full-episodes/' + mobj = re.match(self._VALID_URL, url) + assert mobj is not None + + dlNewest = not mobj.group('episode') + if dlNewest: + epTitle = mobj.group('showname') + else: + epTitle = mobj.group('episode') + + req = urllib2.Request(url) + self.report_extraction(epTitle) + try: + htmlHandle = urllib2.urlopen(req) + html = htmlHandle.read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err)) + return + if dlNewest: + url = htmlHandle.geturl() + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url) + return + if mobj.group('episode') == '': + self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url) + return + epTitle = mobj.group('episode') + + mMovieParams = re.findall('(?:[^/]+)/(?P[^/?]+)[/?]?.*$' + IE_NAME = u'escapist' + + def report_extraction(self, showName): + self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName) + + def report_config_download(self, showName): + self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName) + + def _real_extract(self, url): + htmlParser = HTMLParser.HTMLParser() + + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + showName = mobj.group('showname') + videoId = mobj.group('episode') + + self.report_extraction(showName) + try: + webPage = urllib2.urlopen(url).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download webpage: ' + unicode(err)) + return + + descMatch = re.search('[0-9]+)/(?P.*)$' + IE_NAME = u'collegehumor' + + def report_webpage(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) + + def _real_extract(self, url): + htmlParser = HTMLParser.HTMLParser() + + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + video_id = mobj.group('videoid') + + self.report_webpage(video_id) + request = urllib2.Request(url) + try: + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) + return + + m = re.search(r'id="video:(?P[0-9]+)"', webpage) + if m is None: + self._downloader.trouble(u'ERROR: Cannot extract internal video ID') + return + internal_video_id = m.group('internalvideoid') + + info = { + 'id': video_id, + 'internal_id': internal_video_id, + } + + self.report_extraction(video_id) + xmlUrl = 'http://www.collegehumor.com/moogaloop/video:' + internal_video_id + try: + metaXml = urllib2.urlopen(xmlUrl).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % str(err)) + return + + mdoc = xml.etree.ElementTree.fromstring(metaXml) + try: + videoNode = mdoc.findall('./video')[0] + info['description'] = videoNode.findall('./description')[0].text + info['title'] = videoNode.findall('./caption')[0].text + info['stitle'] = simplify_title(info['title']) + info['url'] = videoNode.findall('./file')[0].text + info['thumbnail'] = videoNode.findall('./thumbnail')[0].text + info['ext'] = info['url'].rpartition('.')[2] + info['format'] = info['ext'] + except IndexError: + self._downloader.trouble(u'\nERROR: Invalid metadata XML file') + return + + self._downloader.increment_downloads() + + try: + self._downloader.process_info(info) + except UnavailableVideoError, err: + self._downloader.trouble(u'\nERROR: unable to download video') + + +class XVideosIE(InfoExtractor): + """Information extractor for xvideos.com""" + + _VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)' + IE_NAME = u'xvideos' + + def report_webpage(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) + + def _real_extract(self, url): + htmlParser = HTMLParser.HTMLParser() + + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + video_id = mobj.group(1).decode('utf-8') + + self.report_webpage(video_id) + + request = urllib2.Request(r'http://www.xvideos.com/video' + video_id) + try: + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) + return + + self.report_extraction(video_id) + + + # Extract video URL + mobj = re.search(r'flv_url=(.+?)&', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video url') + return + video_url = urllib2.unquote(mobj.group(1).decode('utf-8')) + + + # Extract title + mobj = re.search(r'(.*?)\s+-\s+XVID', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video title') + return + video_title = mobj.group(1).decode('utf-8') + + + # Extract video thumbnail + mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]/[a-fA-F0-9]/[a-fA-F0-9]/([a-fA-F0-9.]+jpg)', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video thumbnail') + return + video_thumbnail = mobj.group(1).decode('utf-8') + + + + self._downloader.increment_downloads() + info = { + 'id': video_id, + 'url': video_url, + 'uploader': None, + 'upload_date': None, + 'title': video_title, + 'stitle': simplify_title(video_title), + 'ext': 'flv', + 'format': 'flv', + 'thumbnail': video_thumbnail, + 'description': None, + 'player_url': None, + } + + try: + self._downloader.process_info(info) + except UnavailableVideoError, err: + self._downloader.trouble(u'\nERROR: unable to download ' + video_id) + + +class SoundcloudIE(InfoExtractor): + """Information extractor for soundcloud.com + To access the media, the uid of the song and a stream token + must be extracted from the page source and the script must make + a request to media.soundcloud.com/crossdomain.xml. Then + the media can be grabbed by requesting from an url composed + of the stream token and uid + """ + + _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)' + IE_NAME = u'soundcloud' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_webpage(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) + + def _real_extract(self, url): + htmlParser = HTMLParser.HTMLParser() + + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + # extract uploader (which is in the url) + uploader = mobj.group(1).decode('utf-8') + # extract simple title (uploader + slug of song title) + slug_title = mobj.group(2).decode('utf-8') + simple_title = uploader + '-' + slug_title + + self.report_webpage('%s/%s' % (uploader, slug_title)) + + request = urllib2.Request('http://soundcloud.com/%s/%s' % (uploader, slug_title)) + try: + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) + return + + self.report_extraction('%s/%s' % (uploader, slug_title)) + + # extract uid and stream token that soundcloud hands out for access + mobj = re.search('"uid":"([\w\d]+?)".*?stream_token=([\w\d]+)', webpage) + if mobj: + video_id = mobj.group(1) + stream_token = mobj.group(2) + + # extract unsimplified title + mobj = re.search('"title":"(.*?)",', webpage) + if mobj: + title = mobj.group(1) + + # construct media url (with uid/token) + mediaURL = "http://media.soundcloud.com/stream/%s?stream_token=%s" + mediaURL = mediaURL % (video_id, stream_token) + + # description + description = u'No description available' + mobj = re.search('track-description-value"><p>(.*?)</p>', webpage) + if mobj: + description = mobj.group(1) + + # upload date + upload_date = None + mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage) + if mobj: + try: + upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d') + except Exception, e: + print str(e) + + # for soundcloud, a request to a cross domain is required for cookies + request = urllib2.Request('http://media.soundcloud.com/crossdomain.xml', std_headers) + + try: + self._downloader.process_info({ + 'id': video_id.decode('utf-8'), + 'url': mediaURL, + 'uploader': uploader.decode('utf-8'), + 'upload_date': upload_date, + 'title': simple_title.decode('utf-8'), + 'stitle': simple_title.decode('utf-8'), + 'ext': u'mp3', + 'format': u'NA', + 'player_url': None, + 'description': description.decode('utf-8') + }) + except UnavailableVideoError: + self._downloader.trouble(u'\nERROR: unable to download video') + + +class InfoQIE(InfoExtractor): + """Information extractor for infoq.com""" + + _VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$' + IE_NAME = u'infoq' + + def report_webpage(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) + + def _real_extract(self, url): + htmlParser = HTMLParser.HTMLParser() + + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + self.report_webpage(url) + + request = urllib2.Request(url) + try: + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) + return + + self.report_extraction(url) + + + # Extract video URL + mobj = re.search(r"jsclassref='([^']*)'", webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video url') + return + video_url = 'rtmpe://video.infoq.com/cfx/st/' + urllib2.unquote(mobj.group(1).decode('base64')) + + + # Extract title + mobj = re.search(r'contentTitle = "(.*?)";', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video title') + return + video_title = mobj.group(1).decode('utf-8') + + # Extract description + video_description = u'No description available.' + mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage) + if mobj is not None: + video_description = mobj.group(1).decode('utf-8') + + video_filename = video_url.split('/')[-1] + video_id, extension = video_filename.split('.') + + self._downloader.increment_downloads() + info = { + 'id': video_id, + 'url': video_url, + 'uploader': None, + 'upload_date': None, + 'title': video_title, + 'stitle': simplify_title(video_title), + 'ext': extension, + 'format': extension, # Extension is always(?) mp4, but seems to be flv + 'thumbnail': None, + 'description': video_description, + 'player_url': None, + } + + try: + self._downloader.process_info(info) + except UnavailableVideoError, err: + self._downloader.trouble(u'\nERROR: unable to download ' + video_url) + +class MixcloudIE(InfoExtractor): + """Information extractor for www.mixcloud.com""" + _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)' + IE_NAME = u'mixcloud' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_download_json(self, file_id): + """Report JSON download.""" + self._downloader.to_screen(u'[%s] Downloading json' % self.IE_NAME) + + def report_extraction(self, file_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) + + def get_urls(self, jsonData, fmt, bitrate='best'): + """Get urls from 'audio_formats' section in json""" + file_url = None + try: + bitrate_list = jsonData[fmt] + if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list: + bitrate = max(bitrate_list) # select highest + + url_list = jsonData[fmt][bitrate] + except TypeError: # we have no bitrate info. + url_list = jsonData[fmt] + + return url_list + + def check_urls(self, url_list): + """Returns 1st active url from list""" + for url in url_list: + try: + urllib2.urlopen(url) + return url + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + url = None + + return None + + def _print_formats(self, formats): + print 'Available formats:' + for fmt in formats.keys(): + for b in formats[fmt]: + try: + ext = formats[fmt][b][0] + print '%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1]) + except TypeError: # we have no bitrate info + ext = formats[fmt][0] + print '%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1]) + break + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + # extract uploader & filename from url + uploader = mobj.group(1).decode('utf-8') + file_id = uploader + "-" + mobj.group(2).decode('utf-8') + + # construct API request + file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json' + # retrieve .json file with links to files + request = urllib2.Request(file_url) + try: + self.report_download_json(file_url) + jsonData = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % str(err)) + return + + # parse JSON + json_data = json.loads(jsonData) + player_url = json_data['player_swf_url'] + formats = dict(json_data['audio_formats']) + + req_format = self._downloader.params.get('format', None) + bitrate = None + + if self._downloader.params.get('listformats', None): + self._print_formats(formats) + return + + if req_format is None or req_format == 'best': + for format_param in formats.keys(): + url_list = self.get_urls(formats, format_param) + # check urls + file_url = self.check_urls(url_list) + if file_url is not None: + break # got it! + else: + if req_format not in formats.keys(): + self._downloader.trouble(u'ERROR: format is not available') + return + + url_list = self.get_urls(formats, req_format) + file_url = self.check_urls(url_list) + format_param = req_format + + # We have audio + self._downloader.increment_downloads() + try: + # Process file information + self._downloader.process_info({ + 'id': file_id.decode('utf-8'), + 'url': file_url.decode('utf-8'), + 'uploader': uploader.decode('utf-8'), + 'upload_date': u'NA', + 'title': json_data['name'], + 'stitle': simplify_title(json_data['name']), + 'ext': file_url.split('.')[-1].decode('utf-8'), + 'format': (format_param is None and u'NA' or format_param.decode('utf-8')), + 'thumbnail': json_data['thumbnail_url'], + 'description': json_data['description'], + 'player_url': player_url.decode('utf-8'), + }) + except UnavailableVideoError, err: + self._downloader.trouble(u'ERROR: unable to download file') + +class StanfordOpenClassroomIE(InfoExtractor): + """Information extractor for Stanford's Open ClassRoom""" + + _VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$' + IE_NAME = u'stanfordoc' + + def report_download_webpage(self, objid): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, objid)) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + if mobj.group('course') and mobj.group('video'): # A specific video + course = mobj.group('course') + video = mobj.group('video') + info = { + 'id': simplify_title(course + '_' + video), + } + + self.report_extraction(info['id']) + baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/' + xmlUrl = baseUrl + video + '.xml' + try: + metaXml = urllib2.urlopen(xmlUrl).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % unicode(err)) + return + mdoc = xml.etree.ElementTree.fromstring(metaXml) + try: + info['title'] = mdoc.findall('./title')[0].text + info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text + except IndexError: + self._downloader.trouble(u'\nERROR: Invalid metadata XML file') + return + info['stitle'] = simplify_title(info['title']) + info['ext'] = info['url'].rpartition('.')[2] + info['format'] = info['ext'] + self._downloader.increment_downloads() + try: + self._downloader.process_info(info) + except UnavailableVideoError, err: + self._downloader.trouble(u'\nERROR: unable to download video') + elif mobj.group('course'): # A course page + unescapeHTML = HTMLParser.HTMLParser().unescape + + course = mobj.group('course') + info = { + 'id': simplify_title(course), + 'type': 'playlist', + } + + self.report_download_webpage(info['id']) + try: + coursepage = urllib2.urlopen(url).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download course info page: ' + unicode(err)) + return + + m = re.search('<h1>([^<]+)</h1>', coursepage) + if m: + info['title'] = unescapeHTML(m.group(1)) + else: + info['title'] = info['id'] + info['stitle'] = simplify_title(info['title']) + + m = re.search('<description>([^<]+)</description>', coursepage) + if m: + info['description'] = unescapeHTML(m.group(1)) + + links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage)) + info['list'] = [ + { + 'type': 'reference', + 'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage), + } + for vpage in links] + + for entry in info['list']: + assert entry['type'] == 'reference' + self.extract(entry['url']) + else: # Root page + unescapeHTML = HTMLParser.HTMLParser().unescape + + info = { + 'id': 'Stanford OpenClassroom', + 'type': 'playlist', + } + + self.report_download_webpage(info['id']) + rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php' + try: + rootpage = urllib2.urlopen(rootURL).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download course info page: ' + unicode(err)) + return + + info['title'] = info['id'] + info['stitle'] = simplify_title(info['title']) + + links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage)) + info['list'] = [ + { + 'type': 'reference', + 'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage), + } + for cpage in links] + + for entry in info['list']: + assert entry['type'] == 'reference' + self.extract(entry['url']) + +class MTVIE(InfoExtractor): + """Information extractor for MTV.com""" + + _VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$' + IE_NAME = u'mtv' + + def report_webpage(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + if not mobj.group('proto'): + url = 'http://' + url + video_id = mobj.group('videoid') + self.report_webpage(video_id) + + request = urllib2.Request(url) + try: + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) + return + + mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract song name') + return + song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1')) + mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract performer') + return + performer = unescapeHTML(mobj.group(1).decode('iso-8859-1')) + video_title = performer + ' - ' + song_name + + mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to mtvn_uri') + return + mtvn_uri = mobj.group(1) + + mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract content id') + return + content_id = mobj.group(1) + + videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri + self.report_extraction(video_id) + request = urllib2.Request(videogen_url) + try: + metadataXml = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % str(err)) + return + + mdoc = xml.etree.ElementTree.fromstring(metadataXml) + renditions = mdoc.findall('.//rendition') + + # For now, always pick the highest quality. + rendition = renditions[-1] + + try: + _,_,ext = rendition.attrib['type'].partition('/') + format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate'] + video_url = rendition.find('./src').text + except KeyError: + self._downloader.trouble('Invalid rendition field.') + return + + self._downloader.increment_downloads() + info = { + 'id': video_id, + 'url': video_url, + 'uploader': performer, + 'title': video_title, + 'stitle': simplify_title(video_title), + 'ext': ext, + 'format': format, + } + + try: + self._downloader.process_info(info) + except UnavailableVideoError, err: + self._downloader.trouble(u'\nERROR: unable to download ' + video_id) diff --git a/youtube_dl/PostProcessing.py b/youtube_dl/PostProcessing.py new file mode 100644 index 000000000..90ac07af4 --- /dev/null +++ b/youtube_dl/PostProcessing.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import os +import subprocess +import sys +import time + +from Utils import * + + +class PostProcessor(object): + """Post Processor class. + + PostProcessor objects can be added to downloaders with their + add_post_processor() method. When the downloader has finished a + successful download, it will take its internal chain of PostProcessors + and start calling the run() method on each one of them, first with + an initial argument and then with the returned value of the previous + PostProcessor. + + The chain will be stopped if one of them ever returns None or the end + of the chain is reached. + + PostProcessor objects follow a "mutual registration" process similar + to InfoExtractor objects. + """ + + _downloader = None + + def __init__(self, downloader=None): + self._downloader = downloader + + def set_downloader(self, downloader): + """Sets the downloader for this PP.""" + self._downloader = downloader + + def run(self, information): + """Run the PostProcessor. + + The "information" argument is a dictionary like the ones + composed by InfoExtractors. The only difference is that this + one has an extra field called "filepath" that points to the + downloaded file. + + When this method returns None, the postprocessing chain is + stopped. However, this method may return an information + dictionary that will be passed to the next postprocessing + object in the chain. It can be the one it received after + changing some fields. + + In addition, this method may raise a PostProcessingError + exception that will be taken into account by the downloader + it was called from. + """ + return information # by default, do nothing + +class AudioConversionError(BaseException): + def __init__(self, message): + self.message = message + +class FFmpegExtractAudioPP(PostProcessor): + + def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False): + PostProcessor.__init__(self, downloader) + if preferredcodec is None: + preferredcodec = 'best' + self._preferredcodec = preferredcodec + self._preferredquality = preferredquality + self._keepvideo = keepvideo + + @staticmethod + def get_audio_codec(path): + try: + cmd = ['ffprobe', '-show_streams', '--', encodeFilename(path)] + handle = subprocess.Popen(cmd, stderr=file(os.path.devnull, 'w'), stdout=subprocess.PIPE) + output = handle.communicate()[0] + if handle.wait() != 0: + return None + except (IOError, OSError): + return None + audio_codec = None + for line in output.split('\n'): + if line.startswith('codec_name='): + audio_codec = line.split('=')[1].strip() + elif line.strip() == 'codec_type=audio' and audio_codec is not None: + return audio_codec + return None + + @staticmethod + def run_ffmpeg(path, out_path, codec, more_opts): + if codec is None: + acodec_opts = [] + else: + acodec_opts = ['-acodec', codec] + cmd = ['ffmpeg', '-y', '-i', encodeFilename(path), '-vn'] + acodec_opts + more_opts + ['--', encodeFilename(out_path)] + try: + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout,stderr = p.communicate() + except (IOError, OSError): + e = sys.exc_info()[1] + if isinstance(e, OSError) and e.errno == 2: + raise AudioConversionError('ffmpeg not found. Please install ffmpeg.') + else: + raise e + if p.returncode != 0: + msg = stderr.strip().split('\n')[-1] + raise AudioConversionError(msg) + + def run(self, information): + path = information['filepath'] + + filecodec = self.get_audio_codec(path) + if filecodec is None: + self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe') + return None + + more_opts = [] + if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'): + if self._preferredcodec == 'm4a' and filecodec == 'aac': + # Lossless, but in another container + acodec = 'copy' + extension = self._preferredcodec + more_opts = ['-absf', 'aac_adtstoasc'] + elif filecodec in ['aac', 'mp3', 'vorbis']: + # Lossless if possible + acodec = 'copy' + extension = filecodec + if filecodec == 'aac': + more_opts = ['-f', 'adts'] + if filecodec == 'vorbis': + extension = 'ogg' + else: + # MP3 otherwise. + acodec = 'libmp3lame' + extension = 'mp3' + more_opts = [] + if self._preferredquality is not None: + more_opts += ['-ab', self._preferredquality] + else: + # We convert the audio (lossy) + acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec] + extension = self._preferredcodec + more_opts = [] + if self._preferredquality is not None: + more_opts += ['-ab', self._preferredquality] + if self._preferredcodec == 'aac': + more_opts += ['-f', 'adts'] + if self._preferredcodec == 'm4a': + more_opts += ['-absf', 'aac_adtstoasc'] + if self._preferredcodec == 'vorbis': + extension = 'ogg' + if self._preferredcodec == 'wav': + extension = 'wav' + more_opts += ['-f', 'wav'] + + prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups + new_path = prefix + sep + extension + self._downloader.to_screen(u'[ffmpeg] Destination: ' + new_path) + try: + self.run_ffmpeg(path, new_path, acodec, more_opts) + except: + etype,e,tb = sys.exc_info() + if isinstance(e, AudioConversionError): + self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message) + else: + self._downloader.to_stderr(u'ERROR: error running ffmpeg') + return None + + # Try to update the date time for extracted audio file. + if information.get('filetime') is not None: + try: + os.utime(encodeFilename(new_path), (time.time(), information['filetime'])) + except: + self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file') + + if not self._keepvideo: + try: + os.remove(encodeFilename(path)) + except (IOError, OSError): + self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file') + return None + + information['filepath'] = new_path + return information diff --git a/youtube_dl/Utils.py b/youtube_dl/Utils.py new file mode 100644 index 000000000..f924b98f7 --- /dev/null +++ b/youtube_dl/Utils.py @@ -0,0 +1,375 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import gzip +import htmlentitydefs +import HTMLParser +import locale +import os +import re +import sys +import zlib +import urllib2 +import email.utils + +try: + import cStringIO as StringIO +except ImportError: + import StringIO + +std_headers = { + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:5.0.1) Gecko/20100101 Firefox/5.0.1', + 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', + 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', + 'Accept-Encoding': 'gzip, deflate', + 'Accept-Language': 'en-us,en;q=0.5', +} + +def preferredencoding(): + """Get preferred encoding. + + Returns the best encoding scheme for the system, based on + locale.getpreferredencoding() and some further tweaks. + """ + def yield_preferredencoding(): + try: + pref = locale.getpreferredencoding() + u'TEST'.encode(pref) + except: + pref = 'UTF-8' + while True: + yield pref + return yield_preferredencoding().next() + + +def htmlentity_transform(matchobj): + """Transforms an HTML entity to a Unicode character. + + This function receives a match object and is intended to be used with + the re.sub() function. + """ + entity = matchobj.group(1) + + # Known non-numeric HTML entity + if entity in htmlentitydefs.name2codepoint: + return unichr(htmlentitydefs.name2codepoint[entity]) + + # Unicode character + mobj = re.match(ur'(?u)#(x?\d+)', entity) + if mobj is not None: + numstr = mobj.group(1) + if numstr.startswith(u'x'): + base = 16 + numstr = u'0%s' % numstr + else: + base = 10 + return unichr(long(numstr, base)) + + # Unknown entity in name, return its literal representation + return (u'&%s;' % entity) + + +def sanitize_title(utitle): + """Sanitizes a video title so it could be used as part of a filename.""" + utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle) + return utitle.replace(unicode(os.sep), u'%') + + +def sanitize_open(filename, open_mode): + """Try to open the given filename, and slightly tweak it if this fails. + + Attempts to open the given filename. If this fails, it tries to change + the filename slightly, step by step, until it's either able to open it + or it fails and raises a final exception, like the standard open() + function. + + It returns the tuple (stream, definitive_file_name). + """ + try: + if filename == u'-': + if sys.platform == 'win32': + import msvcrt + msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) + return (sys.stdout, filename) + stream = open(encodeFilename(filename), open_mode) + return (stream, filename) + except (IOError, OSError), err: + # In case of error, try to remove win32 forbidden chars + filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename) + + # An exception here should be caught in the caller + stream = open(encodeFilename(filename), open_mode) + return (stream, filename) + + +def timeconvert(timestr): + """Convert RFC 2822 defined time string into system timestamp""" + timestamp = None + timetuple = email.utils.parsedate_tz(timestr) + if timetuple is not None: + timestamp = email.utils.mktime_tz(timetuple) + return timestamp + +def simplify_title(title): + expr = re.compile(ur'[^\w\d_\-]+', flags=re.UNICODE) + return expr.sub(u'_', title).strip(u'_') + +def orderedSet(iterable): + """ Remove all duplicates from the input iterable """ + res = [] + for el in iterable: + if el not in res: + res.append(el) + return res + +def unescapeHTML(s): + """ + @param s a string (of type unicode) + """ + assert type(s) == type(u'') + + htmlParser = HTMLParser.HTMLParser() + return htmlParser.unescape(s) + +def encodeFilename(s): + """ + @param s The name of the file (of type unicode) + """ + + assert type(s) == type(u'') + + if sys.platform == 'win32' and sys.getwindowsversion().major >= 5: + # Pass u'' directly to use Unicode APIs on Windows 2000 and up + # (Detecting Windows NT 4 is tricky because 'major >= 4' would + # match Windows 9x series as well. Besides, NT 4 is obsolete.) + return s + else: + return s.encode(sys.getfilesystemencoding(), 'ignore') + +class DownloadError(Exception): + """Download Error exception. + + This exception may be thrown by FileDownloader objects if they are not + configured to continue on errors. They will contain the appropriate + error message. + """ + pass + + +class SameFileError(Exception): + """Same File exception. + + This exception will be thrown by FileDownloader objects if they detect + multiple files would have to be downloaded to the same file on disk. + """ + pass + + +class PostProcessingError(Exception): + """Post Processing exception. + + This exception may be raised by PostProcessor's .run() method to + indicate an error in the postprocessing task. + """ + pass + +class MaxDownloadsReached(Exception): + """ --max-downloads limit has been reached. """ + pass + + +class UnavailableVideoError(Exception): + """Unavailable Format exception. + + This exception will be thrown when a video is requested + in a format that is not available for that video. + """ + pass + + +class ContentTooShortError(Exception): + """Content Too Short exception. + + This exception may be raised by FileDownloader objects when a file they + download is too small for what the server announced first, indicating + the connection was probably interrupted. + """ + # Both in bytes + downloaded = None + expected = None + + def __init__(self, downloaded, expected): + self.downloaded = downloaded + self.expected = expected + + +class YoutubeDLHandler(urllib2.HTTPHandler): + """Handler for HTTP requests and responses. + + This class, when installed with an OpenerDirector, automatically adds + the standard headers to every HTTP request and handles gzipped and + deflated responses from web servers. If compression is to be avoided in + a particular request, the original request in the program code only has + to include the HTTP header "Youtubedl-No-Compression", which will be + removed before making the real request. + + Part of this code was copied from: + + http://techknack.net/python-urllib2-handlers/ + + Andrew Rowls, the author of that code, agreed to release it to the + public domain. + """ + + @staticmethod + def deflate(data): + try: + return zlib.decompress(data, -zlib.MAX_WBITS) + except zlib.error: + return zlib.decompress(data) + + @staticmethod + def addinfourl_wrapper(stream, headers, url, code): + if hasattr(urllib2.addinfourl, 'getcode'): + return urllib2.addinfourl(stream, headers, url, code) + ret = urllib2.addinfourl(stream, headers, url) + ret.code = code + return ret + + def http_request(self, req): + for h in std_headers: + if h in req.headers: + del req.headers[h] + req.add_header(h, std_headers[h]) + if 'Youtubedl-no-compression' in req.headers: + if 'Accept-encoding' in req.headers: + del req.headers['Accept-encoding'] + del req.headers['Youtubedl-no-compression'] + return req + + def http_response(self, req, resp): + old_resp = resp + # gzip + if resp.headers.get('Content-encoding', '') == 'gzip': + gz = gzip.GzipFile(fileobj=StringIO.StringIO(resp.read()), mode='r') + resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) + resp.msg = old_resp.msg + # deflate + if resp.headers.get('Content-encoding', '') == 'deflate': + gz = StringIO.StringIO(self.deflate(resp.read())) + resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) + resp.msg = old_resp.msg + return resp + +try: + import json +except ImportError: # Python <2.6, use trivialjson (https://github.com/phihag/trivialjson): + import re + class json(object): + @staticmethod + def loads(s): + s = s.decode('UTF-8') + def raiseError(msg, i): + raise ValueError(msg + ' at position ' + str(i) + ' of ' + repr(s) + ': ' + repr(s[i:])) + def skipSpace(i, expectMore=True): + while i < len(s) and s[i] in ' \t\r\n': + i += 1 + if expectMore: + if i >= len(s): + raiseError('Premature end', i) + return i + def decodeEscape(match): + esc = match.group(1) + _STATIC = { + '"': '"', + '\\': '\\', + '/': '/', + 'b': unichr(0x8), + 'f': unichr(0xc), + 'n': '\n', + 'r': '\r', + 't': '\t', + } + if esc in _STATIC: + return _STATIC[esc] + if esc[0] == 'u': + if len(esc) == 1+4: + return unichr(int(esc[1:5], 16)) + if len(esc) == 5+6 and esc[5:7] == '\\u': + hi = int(esc[1:5], 16) + low = int(esc[7:11], 16) + return unichr((hi - 0xd800) * 0x400 + low - 0xdc00 + 0x10000) + raise ValueError('Unknown escape ' + str(esc)) + def parseString(i): + i += 1 + e = i + while True: + e = s.index('"', e) + bslashes = 0 + while s[e-bslashes-1] == '\\': + bslashes += 1 + if bslashes % 2 == 1: + e += 1 + continue + break + rexp = re.compile(r'\\(u[dD][89aAbB][0-9a-fA-F]{2}\\u[0-9a-fA-F]{4}|u[0-9a-fA-F]{4}|.|$)') + stri = rexp.sub(decodeEscape, s[i:e]) + return (e+1,stri) + def parseObj(i): + i += 1 + res = {} + i = skipSpace(i) + if s[i] == '}': # Empty dictionary + return (i+1,res) + while True: + if s[i] != '"': + raiseError('Expected a string object key', i) + i,key = parseString(i) + i = skipSpace(i) + if i >= len(s) or s[i] != ':': + raiseError('Expected a colon', i) + i,val = parse(i+1) + res[key] = val + i = skipSpace(i) + if s[i] == '}': + return (i+1, res) + if s[i] != ',': + raiseError('Expected comma or closing curly brace', i) + i = skipSpace(i+1) + def parseArray(i): + res = [] + i = skipSpace(i+1) + if s[i] == ']': # Empty array + return (i+1,res) + while True: + i,val = parse(i) + res.append(val) + i = skipSpace(i) # Raise exception if premature end + if s[i] == ']': + return (i+1, res) + if s[i] != ',': + raiseError('Expected a comma or closing bracket', i) + i = skipSpace(i+1) + def parseDiscrete(i): + for k,v in {'true': True, 'false': False, 'null': None}.items(): + if s.startswith(k, i): + return (i+len(k), v) + raiseError('Not a boolean (or null)', i) + def parseNumber(i): + mobj = re.match('^(-?(0|[1-9][0-9]*)(\.[0-9]*)?([eE][+-]?[0-9]+)?)', s[i:]) + if mobj is None: + raiseError('Not a number', i) + nums = mobj.group(1) + if '.' in nums or 'e' in nums or 'E' in nums: + return (i+len(nums), float(nums)) + return (i+len(nums), int(nums)) + CHARMAP = {'{': parseObj, '[': parseArray, '"': parseString, 't': parseDiscrete, 'f': parseDiscrete, 'n': parseDiscrete} + def parse(i): + i = skipSpace(i) + i,res = CHARMAP.get(s[i], parseNumber)(i) + i = skipSpace(i, False) + return (i,res) + i,res = parse(0) + if i < len(s): + raise ValueError('Extra data at end of input (index ' + str(i) + ' of ' + repr(s) + ': ' + repr(s[i:]) + ')') + return res diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py old mode 100755 new mode 100644 index 8d0d1cc33..409e4386f --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -25,4294 +25,21 @@ UPDATE_URL = 'https://raw.github.com/rg3/youtube-dl/master/youtube-dl' import cookielib -import datetime import getpass -import gzip -import htmlentitydefs -import HTMLParser -import httplib -import locale -import math -import netrc import optparse import os -import os.path import re import shlex import socket -import string import subprocess import sys -import time -import urllib import urllib2 import warnings -import zlib - -if os.name == 'nt': - import ctypes - -try: - import email.utils -except ImportError: # Python 2.4 - import email.Utils -try: - import cStringIO as StringIO -except ImportError: - import StringIO - -# parse_qs was moved from the cgi module to the urlparse module recently. -try: - from urlparse import parse_qs -except ImportError: - from cgi import parse_qs - -try: - import lxml.etree -except ImportError: - pass # Handled below - -try: - import xml.etree.ElementTree -except ImportError: # Python<2.5: Not officially supported, but let it slip - warnings.warn('xml.etree.ElementTree support is missing. Consider upgrading to Python >= 2.5 if you get related errors.') - -std_headers = { - 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:5.0.1) Gecko/20100101 Firefox/5.0.1', - 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', - 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', - 'Accept-Encoding': 'gzip, deflate', - 'Accept-Language': 'en-us,en;q=0.5', -} - -try: - import json -except ImportError: # Python <2.6, use trivialjson (https://github.com/phihag/trivialjson): - import re - class json(object): - @staticmethod - def loads(s): - s = s.decode('UTF-8') - def raiseError(msg, i): - raise ValueError(msg + ' at position ' + str(i) + ' of ' + repr(s) + ': ' + repr(s[i:])) - def skipSpace(i, expectMore=True): - while i < len(s) and s[i] in ' \t\r\n': - i += 1 - if expectMore: - if i >= len(s): - raiseError('Premature end', i) - return i - def decodeEscape(match): - esc = match.group(1) - _STATIC = { - '"': '"', - '\\': '\\', - '/': '/', - 'b': unichr(0x8), - 'f': unichr(0xc), - 'n': '\n', - 'r': '\r', - 't': '\t', - } - if esc in _STATIC: - return _STATIC[esc] - if esc[0] == 'u': - if len(esc) == 1+4: - return unichr(int(esc[1:5], 16)) - if len(esc) == 5+6 and esc[5:7] == '\\u': - hi = int(esc[1:5], 16) - low = int(esc[7:11], 16) - return unichr((hi - 0xd800) * 0x400 + low - 0xdc00 + 0x10000) - raise ValueError('Unknown escape ' + str(esc)) - def parseString(i): - i += 1 - e = i - while True: - e = s.index('"', e) - bslashes = 0 - while s[e-bslashes-1] == '\\': - bslashes += 1 - if bslashes % 2 == 1: - e += 1 - continue - break - rexp = re.compile(r'\\(u[dD][89aAbB][0-9a-fA-F]{2}\\u[0-9a-fA-F]{4}|u[0-9a-fA-F]{4}|.|$)') - stri = rexp.sub(decodeEscape, s[i:e]) - return (e+1,stri) - def parseObj(i): - i += 1 - res = {} - i = skipSpace(i) - if s[i] == '}': # Empty dictionary - return (i+1,res) - while True: - if s[i] != '"': - raiseError('Expected a string object key', i) - i,key = parseString(i) - i = skipSpace(i) - if i >= len(s) or s[i] != ':': - raiseError('Expected a colon', i) - i,val = parse(i+1) - res[key] = val - i = skipSpace(i) - if s[i] == '}': - return (i+1, res) - if s[i] != ',': - raiseError('Expected comma or closing curly brace', i) - i = skipSpace(i+1) - def parseArray(i): - res = [] - i = skipSpace(i+1) - if s[i] == ']': # Empty array - return (i+1,res) - while True: - i,val = parse(i) - res.append(val) - i = skipSpace(i) # Raise exception if premature end - if s[i] == ']': - return (i+1, res) - if s[i] != ',': - raiseError('Expected a comma or closing bracket', i) - i = skipSpace(i+1) - def parseDiscrete(i): - for k,v in {'true': True, 'false': False, 'null': None}.items(): - if s.startswith(k, i): - return (i+len(k), v) - raiseError('Not a boolean (or null)', i) - def parseNumber(i): - mobj = re.match('^(-?(0|[1-9][0-9]*)(\.[0-9]*)?([eE][+-]?[0-9]+)?)', s[i:]) - if mobj is None: - raiseError('Not a number', i) - nums = mobj.group(1) - if '.' in nums or 'e' in nums or 'E' in nums: - return (i+len(nums), float(nums)) - return (i+len(nums), int(nums)) - CHARMAP = {'{': parseObj, '[': parseArray, '"': parseString, 't': parseDiscrete, 'f': parseDiscrete, 'n': parseDiscrete} - def parse(i): - i = skipSpace(i) - i,res = CHARMAP.get(s[i], parseNumber)(i) - i = skipSpace(i, False) - return (i,res) - i,res = parse(0) - if i < len(s): - raise ValueError('Extra data at end of input (index ' + str(i) + ' of ' + repr(s) + ': ' + repr(s[i:]) + ')') - return res - -def preferredencoding(): - """Get preferred encoding. - - Returns the best encoding scheme for the system, based on - locale.getpreferredencoding() and some further tweaks. - """ - def yield_preferredencoding(): - try: - pref = locale.getpreferredencoding() - u'TEST'.encode(pref) - except: - pref = 'UTF-8' - while True: - yield pref - return yield_preferredencoding().next() - - -def htmlentity_transform(matchobj): - """Transforms an HTML entity to a Unicode character. - - This function receives a match object and is intended to be used with - the re.sub() function. - """ - entity = matchobj.group(1) - - # Known non-numeric HTML entity - if entity in htmlentitydefs.name2codepoint: - return unichr(htmlentitydefs.name2codepoint[entity]) - - # Unicode character - mobj = re.match(ur'(?u)#(x?\d+)', entity) - if mobj is not None: - numstr = mobj.group(1) - if numstr.startswith(u'x'): - base = 16 - numstr = u'0%s' % numstr - else: - base = 10 - return unichr(long(numstr, base)) - - # Unknown entity in name, return its literal representation - return (u'&%s;' % entity) - - -def sanitize_title(utitle): - """Sanitizes a video title so it could be used as part of a filename.""" - utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle) - return utitle.replace(unicode(os.sep), u'%') - - -def sanitize_open(filename, open_mode): - """Try to open the given filename, and slightly tweak it if this fails. - - Attempts to open the given filename. If this fails, it tries to change - the filename slightly, step by step, until it's either able to open it - or it fails and raises a final exception, like the standard open() - function. - - It returns the tuple (stream, definitive_file_name). - """ - try: - if filename == u'-': - if sys.platform == 'win32': - import msvcrt - msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) - return (sys.stdout, filename) - stream = open(_encodeFilename(filename), open_mode) - return (stream, filename) - except (IOError, OSError), err: - # In case of error, try to remove win32 forbidden chars - filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename) - - # An exception here should be caught in the caller - stream = open(_encodeFilename(filename), open_mode) - return (stream, filename) - - -def timeconvert(timestr): - """Convert RFC 2822 defined time string into system timestamp""" - timestamp = None - timetuple = email.utils.parsedate_tz(timestr) - if timetuple is not None: - timestamp = email.utils.mktime_tz(timetuple) - return timestamp - -def _simplify_title(title): - expr = re.compile(ur'[^\w\d_\-]+', flags=re.UNICODE) - return expr.sub(u'_', title).strip(u'_') - -def _orderedSet(iterable): - """ Remove all duplicates from the input iterable """ - res = [] - for el in iterable: - if el not in res: - res.append(el) - return res - -def _unescapeHTML(s): - """ - @param s a string (of type unicode) - """ - assert type(s) == type(u'') - - htmlParser = HTMLParser.HTMLParser() - return htmlParser.unescape(s) - -def _encodeFilename(s): - """ - @param s The name of the file (of type unicode) - """ - - assert type(s) == type(u'') - - if sys.platform == 'win32' and sys.getwindowsversion().major >= 5: - # Pass u'' directly to use Unicode APIs on Windows 2000 and up - # (Detecting Windows NT 4 is tricky because 'major >= 4' would - # match Windows 9x series as well. Besides, NT 4 is obsolete.) - return s - else: - return s.encode(sys.getfilesystemencoding(), 'ignore') - -class DownloadError(Exception): - """Download Error exception. - - This exception may be thrown by FileDownloader objects if they are not - configured to continue on errors. They will contain the appropriate - error message. - """ - pass - - -class SameFileError(Exception): - """Same File exception. - - This exception will be thrown by FileDownloader objects if they detect - multiple files would have to be downloaded to the same file on disk. - """ - pass - - -class PostProcessingError(Exception): - """Post Processing exception. - - This exception may be raised by PostProcessor's .run() method to - indicate an error in the postprocessing task. - """ - pass - -class MaxDownloadsReached(Exception): - """ --max-downloads limit has been reached. """ - pass - - -class UnavailableVideoError(Exception): - """Unavailable Format exception. - - This exception will be thrown when a video is requested - in a format that is not available for that video. - """ - pass - - -class ContentTooShortError(Exception): - """Content Too Short exception. - - This exception may be raised by FileDownloader objects when a file they - download is too small for what the server announced first, indicating - the connection was probably interrupted. - """ - # Both in bytes - downloaded = None - expected = None - - def __init__(self, downloaded, expected): - self.downloaded = downloaded - self.expected = expected - - -class YoutubeDLHandler(urllib2.HTTPHandler): - """Handler for HTTP requests and responses. - - This class, when installed with an OpenerDirector, automatically adds - the standard headers to every HTTP request and handles gzipped and - deflated responses from web servers. If compression is to be avoided in - a particular request, the original request in the program code only has - to include the HTTP header "Youtubedl-No-Compression", which will be - removed before making the real request. - - Part of this code was copied from: - - http://techknack.net/python-urllib2-handlers/ - - Andrew Rowls, the author of that code, agreed to release it to the - public domain. - """ - - @staticmethod - def deflate(data): - try: - return zlib.decompress(data, -zlib.MAX_WBITS) - except zlib.error: - return zlib.decompress(data) - - @staticmethod - def addinfourl_wrapper(stream, headers, url, code): - if hasattr(urllib2.addinfourl, 'getcode'): - return urllib2.addinfourl(stream, headers, url, code) - ret = urllib2.addinfourl(stream, headers, url) - ret.code = code - return ret - - def http_request(self, req): - for h in std_headers: - if h in req.headers: - del req.headers[h] - req.add_header(h, std_headers[h]) - if 'Youtubedl-no-compression' in req.headers: - if 'Accept-encoding' in req.headers: - del req.headers['Accept-encoding'] - del req.headers['Youtubedl-no-compression'] - return req - - def http_response(self, req, resp): - old_resp = resp - # gzip - if resp.headers.get('Content-encoding', '') == 'gzip': - gz = gzip.GzipFile(fileobj=StringIO.StringIO(resp.read()), mode='r') - resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) - resp.msg = old_resp.msg - # deflate - if resp.headers.get('Content-encoding', '') == 'deflate': - gz = StringIO.StringIO(self.deflate(resp.read())) - resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) - resp.msg = old_resp.msg - return resp - - -class FileDownloader(object): - """File Downloader class. - - File downloader objects are the ones responsible of downloading the - actual video file and writing it to disk if the user has requested - it, among some other tasks. In most cases there should be one per - program. As, given a video URL, the downloader doesn't know how to - extract all the needed information, task that InfoExtractors do, it - has to pass the URL to one of them. - - For this, file downloader objects have a method that allows - InfoExtractors to be registered in a given order. When it is passed - a URL, the file downloader handles it to the first InfoExtractor it - finds that reports being able to handle it. The InfoExtractor extracts - all the information about the video or videos the URL refers to, and - asks the FileDownloader to process the video information, possibly - downloading the video. - - File downloaders accept a lot of parameters. In order not to saturate - the object constructor with arguments, it receives a dictionary of - options instead. These options are available through the params - attribute for the InfoExtractors to use. The FileDownloader also - registers itself as the downloader in charge for the InfoExtractors - that are added to it, so this is a "mutual registration". - - Available options: - - username: Username for authentication purposes. - password: Password for authentication purposes. - usenetrc: Use netrc for authentication instead. - quiet: Do not print messages to stdout. - forceurl: Force printing final URL. - forcetitle: Force printing title. - forcethumbnail: Force printing thumbnail URL. - forcedescription: Force printing description. - forcefilename: Force printing final filename. - simulate: Do not download the video files. - format: Video format code. - format_limit: Highest quality format to try. - outtmpl: Template for output names. - ignoreerrors: Do not stop on download errors. - ratelimit: Download speed limit, in bytes/sec. - nooverwrites: Prevent overwriting files. - retries: Number of times to retry for HTTP error 5xx - continuedl: Try to continue downloads if possible. - noprogress: Do not print the progress bar. - playliststart: Playlist item to start at. - playlistend: Playlist item to end at. - matchtitle: Download only matching titles. - rejecttitle: Reject downloads for matching titles. - logtostderr: Log messages to stderr instead of stdout. - consoletitle: Display progress in console window's titlebar. - nopart: Do not use temporary .part files. - updatetime: Use the Last-modified header to set output file timestamps. - writedescription: Write the video description to a .description file - writeinfojson: Write the video description to a .info.json file - writesubtitles: Write the video subtitles to a .srt file - subtitleslang: Language of the subtitles to download - """ - - params = None - _ies = [] - _pps = [] - _download_retcode = None - _num_downloads = None - _screen_file = None - - def __init__(self, params): - """Create a FileDownloader object with the given options.""" - self._ies = [] - self._pps = [] - self._download_retcode = 0 - self._num_downloads = 0 - self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] - self.params = params - - @staticmethod - def format_bytes(bytes): - if bytes is None: - return 'N/A' - if type(bytes) is str: - bytes = float(bytes) - if bytes == 0.0: - exponent = 0 - else: - exponent = long(math.log(bytes, 1024.0)) - suffix = 'bkMGTPEZY'[exponent] - converted = float(bytes) / float(1024 ** exponent) - return '%.2f%s' % (converted, suffix) - - @staticmethod - def calc_percent(byte_counter, data_len): - if data_len is None: - return '---.-%' - return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0)) - - @staticmethod - def calc_eta(start, now, total, current): - if total is None: - return '--:--' - dif = now - start - if current == 0 or dif < 0.001: # One millisecond - return '--:--' - rate = float(current) / dif - eta = long((float(total) - float(current)) / rate) - (eta_mins, eta_secs) = divmod(eta, 60) - if eta_mins > 99: - return '--:--' - return '%02d:%02d' % (eta_mins, eta_secs) - - @staticmethod - def calc_speed(start, now, bytes): - dif = now - start - if bytes == 0 or dif < 0.001: # One millisecond - return '%10s' % '---b/s' - return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif)) - - @staticmethod - def best_block_size(elapsed_time, bytes): - new_min = max(bytes / 2.0, 1.0) - new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB - if elapsed_time < 0.001: - return long(new_max) - rate = bytes / elapsed_time - if rate > new_max: - return long(new_max) - if rate < new_min: - return long(new_min) - return long(rate) - - @staticmethod - def parse_bytes(bytestr): - """Parse a string indicating a byte quantity into a long integer.""" - matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) - if matchobj is None: - return None - number = float(matchobj.group(1)) - multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) - return long(round(number * multiplier)) - - def add_info_extractor(self, ie): - """Add an InfoExtractor object to the end of the list.""" - self._ies.append(ie) - ie.set_downloader(self) - - def add_post_processor(self, pp): - """Add a PostProcessor object to the end of the chain.""" - self._pps.append(pp) - pp.set_downloader(self) - - def to_screen(self, message, skip_eol=False): - """Print message to stdout if not in quiet mode.""" - assert type(message) == type(u'') - if not self.params.get('quiet', False): - terminator = [u'\n', u''][skip_eol] - output = message + terminator - - if 'b' not in self._screen_file.mode or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr - output = output.encode(preferredencoding(), 'ignore') - self._screen_file.write(output) - self._screen_file.flush() - - def to_stderr(self, message): - """Print message to stderr.""" - print >>sys.stderr, message.encode(preferredencoding()) - - def to_cons_title(self, message): - """Set console/terminal window title to message.""" - if not self.params.get('consoletitle', False): - return - if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): - # c_wchar_p() might not be necessary if `message` is - # already of type unicode() - ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) - elif 'TERM' in os.environ: - sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding())) - - def fixed_template(self): - """Checks if the output template is fixed.""" - return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None) - - def trouble(self, message=None): - """Determine action to take when a download problem appears. - - Depending on if the downloader has been configured to ignore - download errors or not, this method may throw an exception or - not when errors are found, after printing the message. - """ - if message is not None: - self.to_stderr(message) - if not self.params.get('ignoreerrors', False): - raise DownloadError(message) - self._download_retcode = 1 - - def slow_down(self, start_time, byte_counter): - """Sleep if the download speed is over the rate limit.""" - rate_limit = self.params.get('ratelimit', None) - if rate_limit is None or byte_counter == 0: - return - now = time.time() - elapsed = now - start_time - if elapsed <= 0.0: - return - speed = float(byte_counter) / elapsed - if speed > rate_limit: - time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit) - - def temp_name(self, filename): - """Returns a temporary filename for the given filename.""" - if self.params.get('nopart', False) or filename == u'-' or \ - (os.path.exists(_encodeFilename(filename)) and not os.path.isfile(_encodeFilename(filename))): - return filename - return filename + u'.part' - - def undo_temp_name(self, filename): - if filename.endswith(u'.part'): - return filename[:-len(u'.part')] - return filename - - def try_rename(self, old_filename, new_filename): - try: - if old_filename == new_filename: - return - os.rename(_encodeFilename(old_filename), _encodeFilename(new_filename)) - except (IOError, OSError), err: - self.trouble(u'ERROR: unable to rename file') - - def try_utime(self, filename, last_modified_hdr): - """Try to set the last-modified time of the given file.""" - if last_modified_hdr is None: - return - if not os.path.isfile(_encodeFilename(filename)): - return - timestr = last_modified_hdr - if timestr is None: - return - filetime = timeconvert(timestr) - if filetime is None: - return filetime - try: - os.utime(filename, (time.time(), filetime)) - except: - pass - return filetime - - def report_writedescription(self, descfn): - """ Report that the description file is being written """ - self.to_screen(u'[info] Writing video description to: ' + descfn) - - def report_writesubtitles(self, srtfn): - """ Report that the subtitles file is being written """ - self.to_screen(u'[info] Writing video subtitles to: ' + srtfn) - - def report_writeinfojson(self, infofn): - """ Report that the metadata file has been written """ - self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn) - - def report_destination(self, filename): - """Report destination filename.""" - self.to_screen(u'[download] Destination: ' + filename) - - def report_progress(self, percent_str, data_len_str, speed_str, eta_str): - """Report download progress.""" - if self.params.get('noprogress', False): - return - self.to_screen(u'\r[download] %s of %s at %s ETA %s' % - (percent_str, data_len_str, speed_str, eta_str), skip_eol=True) - self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' % - (percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip())) - - def report_resuming_byte(self, resume_len): - """Report attempt to resume at given byte.""" - self.to_screen(u'[download] Resuming download at byte %s' % resume_len) - - def report_retry(self, count, retries): - """Report retry in case of HTTP error 5xx""" - self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) - - def report_file_already_downloaded(self, file_name): - """Report file has already been fully downloaded.""" - try: - self.to_screen(u'[download] %s has already been downloaded' % file_name) - except (UnicodeEncodeError), err: - self.to_screen(u'[download] The file has already been downloaded') - - def report_unable_to_resume(self): - """Report it was impossible to resume download.""" - self.to_screen(u'[download] Unable to resume') - - def report_finish(self): - """Report download finished.""" - if self.params.get('noprogress', False): - self.to_screen(u'[download] Download completed') - else: - self.to_screen(u'') - - def increment_downloads(self): - """Increment the ordinal that assigns a number to each file.""" - self._num_downloads += 1 - - def prepare_filename(self, info_dict): - """Generate the output filename.""" - try: - template_dict = dict(info_dict) - template_dict['epoch'] = unicode(long(time.time())) - template_dict['autonumber'] = unicode('%05d' % self._num_downloads) - filename = self.params['outtmpl'] % template_dict - return filename - except (ValueError, KeyError), err: - self.trouble(u'ERROR: invalid system charset or erroneous output template') - return None - - def _match_entry(self, info_dict): - """ Returns None iff the file should be downloaded """ - - title = info_dict['title'] - matchtitle = self.params.get('matchtitle', False) - if matchtitle and not re.search(matchtitle, title, re.IGNORECASE): - return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"' - rejecttitle = self.params.get('rejecttitle', False) - if rejecttitle and re.search(rejecttitle, title, re.IGNORECASE): - return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"' - return None - - def process_info(self, info_dict): - """Process a single dictionary returned by an InfoExtractor.""" - - reason = self._match_entry(info_dict) - if reason is not None: - self.to_screen(u'[download] ' + reason) - return - - max_downloads = self.params.get('max_downloads') - if max_downloads is not None: - if self._num_downloads > int(max_downloads): - raise MaxDownloadsReached() - - filename = self.prepare_filename(info_dict) - - # Forced printings - if self.params.get('forcetitle', False): - print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace') - if self.params.get('forceurl', False): - print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace') - if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict: - print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace') - if self.params.get('forcedescription', False) and 'description' in info_dict: - print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace') - if self.params.get('forcefilename', False) and filename is not None: - print filename.encode(preferredencoding(), 'xmlcharrefreplace') - if self.params.get('forceformat', False): - print info_dict['format'].encode(preferredencoding(), 'xmlcharrefreplace') - - # Do nothing else if in simulate mode - if self.params.get('simulate', False): - return - - if filename is None: - return - - try: - dn = os.path.dirname(_encodeFilename(filename)) - if dn != '' and not os.path.exists(dn): # dn is already encoded - os.makedirs(dn) - except (OSError, IOError), err: - self.trouble(u'ERROR: unable to create directory ' + unicode(err)) - return - - if self.params.get('writedescription', False): - try: - descfn = filename + u'.description' - self.report_writedescription(descfn) - descfile = open(_encodeFilename(descfn), 'wb') - try: - descfile.write(info_dict['description'].encode('utf-8')) - finally: - descfile.close() - except (OSError, IOError): - self.trouble(u'ERROR: Cannot write description file ' + descfn) - return - - if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']: - # subtitles download errors are already managed as troubles in relevant IE - # that way it will silently go on when used with unsupporting IE - try: - srtfn = filename.rsplit('.', 1)[0] + u'.srt' - self.report_writesubtitles(srtfn) - srtfile = open(_encodeFilename(srtfn), 'wb') - try: - srtfile.write(info_dict['subtitles'].encode('utf-8')) - finally: - srtfile.close() - except (OSError, IOError): - self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) - return - - if self.params.get('writeinfojson', False): - infofn = filename + u'.info.json' - self.report_writeinfojson(infofn) - try: - json.dump - except (NameError,AttributeError): - self.trouble(u'ERROR: No JSON encoder found. Update to Python 2.6+, setup a json module, or leave out --write-info-json.') - return - try: - infof = open(_encodeFilename(infofn), 'wb') - try: - json_info_dict = dict((k,v) for k,v in info_dict.iteritems() if not k in ('urlhandle',)) - json.dump(json_info_dict, infof) - finally: - infof.close() - except (OSError, IOError): - self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn) - return - - if not self.params.get('skip_download', False): - if self.params.get('nooverwrites', False) and os.path.exists(_encodeFilename(filename)): - success = True - else: - try: - success = self._do_download(filename, info_dict) - except (OSError, IOError), err: - raise UnavailableVideoError - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self.trouble(u'ERROR: unable to download video data: %s' % str(err)) - return - except (ContentTooShortError, ), err: - self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) - return - - if success: - try: - self.post_process(filename, info_dict) - except (PostProcessingError), err: - self.trouble(u'ERROR: postprocessing: %s' % str(err)) - return - - def download(self, url_list): - """Download a given list of URLs.""" - if len(url_list) > 1 and self.fixed_template(): - raise SameFileError(self.params['outtmpl']) - - for url in url_list: - suitable_found = False - for ie in self._ies: - # Go to next InfoExtractor if not suitable - if not ie.suitable(url): - continue - - # Suitable InfoExtractor found - suitable_found = True - - # Extract information from URL and process it - ie.extract(url) - - # Suitable InfoExtractor had been found; go to next URL - break - - if not suitable_found: - self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url) - - return self._download_retcode - - def post_process(self, filename, ie_info): - """Run the postprocessing chain on the given file.""" - info = dict(ie_info) - info['filepath'] = filename - for pp in self._pps: - info = pp.run(info) - if info is None: - break - - def _download_with_rtmpdump(self, filename, url, player_url): - self.report_destination(filename) - tmpfilename = self.temp_name(filename) - - # Check for rtmpdump first - try: - subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT) - except (OSError, IOError): - self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run') - return False - - # Download using rtmpdump. rtmpdump returns exit code 2 when - # the connection was interrumpted and resuming appears to be - # possible. This is part of rtmpdump's normal usage, AFAIK. - basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename] - args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)] - if self.params.get('verbose', False): - try: - import pipes - shell_quote = lambda args: ' '.join(map(pipes.quote, args)) - except ImportError: - shell_quote = repr - self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args)) - retval = subprocess.call(args) - while retval == 2 or retval == 1: - prevsize = os.path.getsize(_encodeFilename(tmpfilename)) - self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True) - time.sleep(5.0) # This seems to be needed - retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1]) - cursize = os.path.getsize(_encodeFilename(tmpfilename)) - if prevsize == cursize and retval == 1: - break - # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those - if prevsize == cursize and retval == 2 and cursize > 1024: - self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.') - retval = 0 - break - if retval == 0: - self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(_encodeFilename(tmpfilename))) - self.try_rename(tmpfilename, filename) - return True - else: - self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval) - return False - - def _do_download(self, filename, info_dict): - url = info_dict['url'] - player_url = info_dict.get('player_url', None) - - # Check file already present - if self.params.get('continuedl', False) and os.path.isfile(_encodeFilename(filename)) and not self.params.get('nopart', False): - self.report_file_already_downloaded(filename) - return True - - # Attempt to download using rtmpdump - if url.startswith('rtmp'): - return self._download_with_rtmpdump(filename, url, player_url) - - tmpfilename = self.temp_name(filename) - stream = None - - # Do not include the Accept-Encoding header - headers = {'Youtubedl-no-compression': 'True'} - basic_request = urllib2.Request(url, None, headers) - request = urllib2.Request(url, None, headers) - - # Establish possible resume length - if os.path.isfile(_encodeFilename(tmpfilename)): - resume_len = os.path.getsize(_encodeFilename(tmpfilename)) - else: - resume_len = 0 - - open_mode = 'wb' - if resume_len != 0: - if self.params.get('continuedl', False): - self.report_resuming_byte(resume_len) - request.add_header('Range','bytes=%d-' % resume_len) - open_mode = 'ab' - else: - resume_len = 0 - - count = 0 - retries = self.params.get('retries', 0) - while count <= retries: - # Establish connection - try: - if count == 0 and 'urlhandle' in info_dict: - data = info_dict['urlhandle'] - data = urllib2.urlopen(request) - break - except (urllib2.HTTPError, ), err: - if (err.code < 500 or err.code >= 600) and err.code != 416: - # Unexpected HTTP error - raise - elif err.code == 416: - # Unable to resume (requested range not satisfiable) - try: - # Open the connection again without the range header - data = urllib2.urlopen(basic_request) - content_length = data.info()['Content-Length'] - except (urllib2.HTTPError, ), err: - if err.code < 500 or err.code >= 600: - raise - else: - # Examine the reported length - if (content_length is not None and - (resume_len - 100 < long(content_length) < resume_len + 100)): - # The file had already been fully downloaded. - # Explanation to the above condition: in issue #175 it was revealed that - # YouTube sometimes adds or removes a few bytes from the end of the file, - # changing the file size slightly and causing problems for some users. So - # I decided to implement a suggested change and consider the file - # completely downloaded if the file size differs less than 100 bytes from - # the one in the hard drive. - self.report_file_already_downloaded(filename) - self.try_rename(tmpfilename, filename) - return True - else: - # The length does not match, we start the download over - self.report_unable_to_resume() - open_mode = 'wb' - break - # Retry - count += 1 - if count <= retries: - self.report_retry(count, retries) - - if count > retries: - self.trouble(u'ERROR: giving up after %s retries' % retries) - return False - - data_len = data.info().get('Content-length', None) - if data_len is not None: - data_len = long(data_len) + resume_len - data_len_str = self.format_bytes(data_len) - byte_counter = 0 + resume_len - block_size = 1024 - start = time.time() - while True: - # Download and write - before = time.time() - data_block = data.read(block_size) - after = time.time() - if len(data_block) == 0: - break - byte_counter += len(data_block) - - # Open file just in time - if stream is None: - try: - (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) - assert stream is not None - filename = self.undo_temp_name(tmpfilename) - self.report_destination(filename) - except (OSError, IOError), err: - self.trouble(u'ERROR: unable to open for writing: %s' % str(err)) - return False - try: - stream.write(data_block) - except (IOError, OSError), err: - self.trouble(u'\nERROR: unable to write data: %s' % str(err)) - return False - block_size = self.best_block_size(after - before, len(data_block)) - - # Progress message - speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len) - if data_len is None: - self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA') - else: - percent_str = self.calc_percent(byte_counter, data_len) - eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) - self.report_progress(percent_str, data_len_str, speed_str, eta_str) - - # Apply rate limit - self.slow_down(start, byte_counter - resume_len) - - if stream is None: - self.trouble(u'\nERROR: Did not get any data blocks') - return False - stream.close() - self.report_finish() - if data_len is not None and byte_counter != data_len: - raise ContentTooShortError(byte_counter, long(data_len)) - self.try_rename(tmpfilename, filename) - - # Update file modification time - if self.params.get('updatetime', True): - info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None)) - - return True - - -class InfoExtractor(object): - """Information Extractor class. - - Information extractors are the classes that, given a URL, extract - information from the video (or videos) the URL refers to. This - information includes the real video URL, the video title and simplified - title, author and others. The information is stored in a dictionary - which is then passed to the FileDownloader. The FileDownloader - processes this information possibly downloading the video to the file - system, among other possible outcomes. The dictionaries must include - the following fields: - - id: Video identifier. - url: Final video URL. - uploader: Nickname of the video uploader. - title: Literal title. - stitle: Simplified title. - ext: Video filename extension. - format: Video format. - player_url: SWF Player URL (may be None). - - The following fields are optional. Their primary purpose is to allow - youtube-dl to serve as the backend for a video search function, such - as the one in youtube2mp3. They are only used when their respective - forced printing functions are called: - - thumbnail: Full URL to a video thumbnail image. - description: One-line video description. - - Subclasses of this one should re-define the _real_initialize() and - _real_extract() methods and define a _VALID_URL regexp. - Probably, they should also be added to the list of extractors. - """ - - _ready = False - _downloader = None - - def __init__(self, downloader=None): - """Constructor. Receives an optional downloader.""" - self._ready = False - self.set_downloader(downloader) - - def suitable(self, url): - """Receives a URL and returns True if suitable for this IE.""" - return re.match(self._VALID_URL, url) is not None - - def initialize(self): - """Initializes an instance (authentication, etc).""" - if not self._ready: - self._real_initialize() - self._ready = True - - def extract(self, url): - """Extracts URL information and returns it in list of dicts.""" - self.initialize() - return self._real_extract(url) - - def set_downloader(self, downloader): - """Sets the downloader for this IE.""" - self._downloader = downloader - - def _real_initialize(self): - """Real initialization process. Redefine in subclasses.""" - pass - - def _real_extract(self, url): - """Real extraction process. Redefine in subclasses.""" - pass - - -class YoutubeIE(InfoExtractor): - """Information extractor for youtube.com.""" - - _VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?!view_play_list|my_playlists|artist|playlist)(?:(?:(?:v|embed|e)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))?)?([0-9A-Za-z_-]+)(?(1).+)?$' - _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' - _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en' - _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' - _NEXT_URL_RE = r'[\?&]next_url=([^&]+)' - _NETRC_MACHINE = 'youtube' - # Listed in order of quality - _available_formats = ['38', '37', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13'] - _available_formats_prefer_free = ['38', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13'] - _video_extensions = { - '13': '3gp', - '17': 'mp4', - '18': 'mp4', - '22': 'mp4', - '37': 'mp4', - '38': 'video', # You actually don't know if this will be MOV, AVI or whatever - '43': 'webm', - '44': 'webm', - '45': 'webm', - } - _video_dimensions = { - '5': '240x400', - '6': '???', - '13': '???', - '17': '144x176', - '18': '360x640', - '22': '720x1280', - '34': '360x640', - '35': '480x854', - '37': '1080x1920', - '38': '3072x4096', - '43': '360x640', - '44': '480x854', - '45': '720x1280', - } - IE_NAME = u'youtube' - - def report_lang(self): - """Report attempt to set language.""" - self._downloader.to_screen(u'[youtube] Setting language') - - def report_login(self): - """Report attempt to log in.""" - self._downloader.to_screen(u'[youtube] Logging in') - - def report_age_confirmation(self): - """Report attempt to confirm age.""" - self._downloader.to_screen(u'[youtube] Confirming age') - - def report_video_webpage_download(self, video_id): - """Report attempt to download video webpage.""" - self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id) - - def report_video_info_webpage_download(self, video_id): - """Report attempt to download video info webpage.""" - self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id) - - def report_video_subtitles_download(self, video_id): - """Report attempt to download video info webpage.""" - self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id) - - def report_information_extraction(self, video_id): - """Report attempt to extract video information.""" - self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id) - - def report_unavailable_format(self, video_id, format): - """Report extracted video URL.""" - self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format)) - - def report_rtmp_download(self): - """Indicate the download will use the RTMP protocol.""" - self._downloader.to_screen(u'[youtube] RTMP download detected') - - def _closed_captions_xml_to_srt(self, xml_string): - srt = '' - texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE) - # TODO parse xml instead of regex - for n, (start, dur_tag, dur, caption) in enumerate(texts): - if not dur: dur = '4' - start = float(start) - end = start + float(dur) - start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000) - end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000) - caption = re.sub(ur'(?u)&(.+?);', htmlentity_transform, caption) - caption = re.sub(ur'(?u)&(.+?);', htmlentity_transform, caption) # double cycle, inentional - srt += str(n) + '\n' - srt += start + ' --> ' + end + '\n' - srt += caption + '\n\n' - return srt - - def _print_formats(self, formats): - print 'Available formats:' - for x in formats: - print '%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')) - - def _real_initialize(self): - if self._downloader is None: - return - - username = None - password = None - downloader_params = self._downloader.params - - # Attempt to use provided username and password or .netrc data - if downloader_params.get('username', None) is not None: - username = downloader_params['username'] - password = downloader_params['password'] - elif downloader_params.get('usenetrc', False): - try: - info = netrc.netrc().authenticators(self._NETRC_MACHINE) - if info is not None: - username = info[0] - password = info[2] - else: - raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) - except (IOError, netrc.NetrcParseError), err: - self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err)) - return - - # Set language - request = urllib2.Request(self._LANG_URL) - try: - self.report_lang() - urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err)) - return - - # No authentication to be performed - if username is None: - return - - # Log in - login_form = { - 'current_form': 'loginForm', - 'next': '/', - 'action_login': 'Log In', - 'username': username, - 'password': password, - } - request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form)) - try: - self.report_login() - login_results = urllib2.urlopen(request).read() - if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None: - self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password') - return - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err)) - return - - # Confirm age - age_form = { - 'next_url': '/', - 'action_confirm': 'Confirm', - } - request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form)) - try: - self.report_age_confirmation() - age_results = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err)) - return - - def _real_extract(self, url): - # Extract original video URL from URL with redirection, like age verification, using next_url parameter - mobj = re.search(self._NEXT_URL_RE, url) - if mobj: - url = 'http://www.youtube.com/' + urllib.unquote(mobj.group(1)).lstrip('/') - - # Extract video id from URL - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - video_id = mobj.group(2) - - # Get video webpage - self.report_video_webpage_download(video_id) - request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id) - try: - video_webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) - return - - # Attempt to extract SWF player URL - mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) - if mobj is not None: - player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) - else: - player_url = None - - # Get video info - self.report_video_info_webpage_download(video_id) - for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: - video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' - % (video_id, el_type)) - request = urllib2.Request(video_info_url) - try: - video_info_webpage = urllib2.urlopen(request).read() - video_info = parse_qs(video_info_webpage) - if 'token' in video_info: - break - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err)) - return - if 'token' not in video_info: - if 'reason' in video_info: - self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0].decode('utf-8')) - else: - self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason') - return - - # Start extracting information - self.report_information_extraction(video_id) - - # uploader - if 'author' not in video_info: - self._downloader.trouble(u'ERROR: unable to extract uploader nickname') - return - video_uploader = urllib.unquote_plus(video_info['author'][0]) - - # title - if 'title' not in video_info: - self._downloader.trouble(u'ERROR: unable to extract video title') - return - video_title = urllib.unquote_plus(video_info['title'][0]) - video_title = video_title.decode('utf-8') - video_title = sanitize_title(video_title) - - # simplified title - simple_title = _simplify_title(video_title) - - # thumbnail image - if 'thumbnail_url' not in video_info: - self._downloader.trouble(u'WARNING: unable to extract video thumbnail') - video_thumbnail = '' - else: # don't panic if we can't find it - video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0]) - - # upload date - upload_date = u'NA' - mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL) - if mobj is not None: - upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) - format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y'] - for expression in format_expressions: - try: - upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d') - except: - pass - - # description - try: - lxml.etree - except NameError: - video_description = u'No description available.' - mobj = re.search(r'<meta name="description" content="(.*?)">', video_webpage) - if mobj is not None: - video_description = mobj.group(1).decode('utf-8') - else: - html_parser = lxml.etree.HTMLParser(encoding='utf-8') - vwebpage_doc = lxml.etree.parse(StringIO.StringIO(video_webpage), html_parser) - video_description = u''.join(vwebpage_doc.xpath('id("eow-description")//text()')) - # TODO use another parser - - # closed captions - video_subtitles = None - if self._downloader.params.get('writesubtitles', False): - self.report_video_subtitles_download(video_id) - request = urllib2.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) - try: - srt_list = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) - else: - srt_lang_list = re.findall(r'lang_code="([\w\-]+)"', srt_list) - if srt_lang_list: - if self._downloader.params.get('subtitleslang', False): - srt_lang = self._downloader.params.get('subtitleslang') - elif 'en' in srt_lang_list: - srt_lang = 'en' - else: - srt_lang = srt_lang_list[0] - if not srt_lang in srt_lang_list: - self._downloader.trouble(u'WARNING: no closed captions found in the specified language') - else: - request = urllib2.Request('http://video.google.com/timedtext?hl=en&lang=%s&v=%s' % (srt_lang, video_id)) - try: - srt_xml = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) - else: - video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8')) - else: - self._downloader.trouble(u'WARNING: video has no closed captions') - - # token - video_token = urllib.unquote_plus(video_info['token'][0]) - - # Decide which formats to download - req_format = self._downloader.params.get('format', None) - - if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): - self.report_rtmp_download() - video_url_list = [(None, video_info['conn'][0])] - elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: - url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',') - url_data = [parse_qs(uds) for uds in url_data_strs] - url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data) - url_map = dict((ud['itag'][0], ud['url'][0]) for ud in url_data) - - format_limit = self._downloader.params.get('format_limit', None) - available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats - if format_limit is not None and format_limit in available_formats: - format_list = available_formats[available_formats.index(format_limit):] - else: - format_list = available_formats - existing_formats = [x for x in format_list if x in url_map] - if len(existing_formats) == 0: - self._downloader.trouble(u'ERROR: no known formats available for video') - return - if self._downloader.params.get('listformats', None): - self._print_formats(existing_formats) - return - if req_format is None or req_format == 'best': - video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality - elif req_format == 'worst': - video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality - elif req_format in ('-1', 'all'): - video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats - else: - # Specific formats. We pick the first in a slash-delimeted sequence. - # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. - req_formats = req_format.split('/') - video_url_list = None - for rf in req_formats: - if rf in url_map: - video_url_list = [(rf, url_map[rf])] - break - if video_url_list is None: - self._downloader.trouble(u'ERROR: requested format not available') - return - else: - self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info') - return - - for format_param, video_real_url in video_url_list: - # At this point we have a new video - self._downloader.increment_downloads() - - # Extension - video_extension = self._video_extensions.get(format_param, 'flv') - - try: - # Process video information - self._downloader.process_info({ - 'id': video_id.decode('utf-8'), - 'url': video_real_url.decode('utf-8'), - 'uploader': video_uploader.decode('utf-8'), - 'upload_date': upload_date, - 'title': video_title, - 'stitle': simple_title, - 'ext': video_extension.decode('utf-8'), - 'format': (format_param is None and u'NA' or format_param.decode('utf-8')), - 'thumbnail': video_thumbnail.decode('utf-8'), - 'description': video_description, - 'player_url': player_url, - 'subtitles': video_subtitles - }) - except UnavailableVideoError, err: - self._downloader.trouble(u'\nERROR: unable to download video') - - -class MetacafeIE(InfoExtractor): - """Information Extractor for metacafe.com.""" - - _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*' - _DISCLAIMER = 'http://www.metacafe.com/family_filter/' - _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user' - _youtube_ie = None - IE_NAME = u'metacafe' - - def __init__(self, youtube_ie, downloader=None): - InfoExtractor.__init__(self, downloader) - self._youtube_ie = youtube_ie - - def report_disclaimer(self): - """Report disclaimer retrieval.""" - self._downloader.to_screen(u'[metacafe] Retrieving disclaimer') - - def report_age_confirmation(self): - """Report attempt to confirm age.""" - self._downloader.to_screen(u'[metacafe] Confirming age') - - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id) - - def _real_initialize(self): - # Retrieve disclaimer - request = urllib2.Request(self._DISCLAIMER) - try: - self.report_disclaimer() - disclaimer = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err)) - return - - # Confirm age - disclaimer_form = { - 'filters': '0', - 'submit': "Continue - I'm over 18", - } - request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form)) - try: - self.report_age_confirmation() - disclaimer = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err)) - return - - def _real_extract(self, url): - # Extract id and simplified title from URL - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - - video_id = mobj.group(1) - - # Check if video comes from YouTube - mobj2 = re.match(r'^yt-(.*)$', video_id) - if mobj2 is not None: - self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1)) - return - - # At this point we have a new video - self._downloader.increment_downloads() - - simple_title = mobj.group(2).decode('utf-8') - - # Retrieve video webpage to extract further information - request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id) - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err)) - return - - # Extract URL, uploader and title from webpage - self.report_extraction(video_id) - mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage) - if mobj is not None: - mediaURL = urllib.unquote(mobj.group(1)) - video_extension = mediaURL[-3:] - - # Extract gdaKey if available - mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) - if mobj is None: - video_url = mediaURL - else: - gdaKey = mobj.group(1) - video_url = '%s?__gda__=%s' % (mediaURL, gdaKey) - else: - mobj = re.search(r' name="flashvars" value="(.*?)"', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - vardict = parse_qs(mobj.group(1)) - if 'mediaData' not in vardict: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0]) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - mediaURL = mobj.group(1).replace('\\/', '/') - video_extension = mediaURL[-3:] - video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2)) - - mobj = re.search(r'(?im)<title>(.*) - Video', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') - return - video_title = mobj.group(1).decode('utf-8') - video_title = sanitize_title(video_title) - - mobj = re.search(r'(?ms)By:\s*(.+?)<', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract uploader nickname') - return - video_uploader = mobj.group(1) - - try: - # Process video information - self._downloader.process_info({ - 'id': video_id.decode('utf-8'), - 'url': video_url.decode('utf-8'), - 'uploader': video_uploader.decode('utf-8'), - 'upload_date': u'NA', - 'title': video_title, - 'stitle': simple_title, - 'ext': video_extension.decode('utf-8'), - 'format': u'NA', - 'player_url': None, - }) - except UnavailableVideoError: - self._downloader.trouble(u'\nERROR: unable to download video') - - -class DailymotionIE(InfoExtractor): - """Information Extractor for Dailymotion""" - - _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^_/]+)_([^/]+)' - IE_NAME = u'dailymotion' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) - - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id) - - def _real_extract(self, url): - # Extract id and simplified title from URL - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - - # At this point we have a new video - self._downloader.increment_downloads() - video_id = mobj.group(1) - - video_extension = 'flv' - - # Retrieve video webpage to extract further information - request = urllib2.Request(url) - request.add_header('Cookie', 'family_filter=off') - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err)) - return - - # Extract URL, uploader and title from webpage - self.report_extraction(video_id) - mobj = re.search(r'(?i)addVariable\(\"sequence\"\s*,\s*\"([^\"]+?)\"\)', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - sequence = urllib.unquote(mobj.group(1)) - mobj = re.search(r',\"sdURL\"\:\"([^\"]+?)\",', sequence) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - mediaURL = urllib.unquote(mobj.group(1)).replace('\\', '') - - # if needed add http://www.dailymotion.com/ if relative URL - - video_url = mediaURL - - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') - return - video_title = _unescapeHTML(mobj.group('title').decode('utf-8')) - video_title = sanitize_title(video_title) - simple_title = _simplify_title(video_title) - - mobj = re.search(r'(?im)[^<]+?]+?>([^<]+?)', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract uploader nickname') - return - video_uploader = mobj.group(1) - - try: - # Process video information - self._downloader.process_info({ - 'id': video_id.decode('utf-8'), - 'url': video_url.decode('utf-8'), - 'uploader': video_uploader.decode('utf-8'), - 'upload_date': u'NA', - 'title': video_title, - 'stitle': simple_title, - 'ext': video_extension.decode('utf-8'), - 'format': u'NA', - 'player_url': None, - }) - except UnavailableVideoError: - self._downloader.trouble(u'\nERROR: unable to download video') - - -class GoogleIE(InfoExtractor): - """Information extractor for video.google.com.""" - - _VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*' - IE_NAME = u'video.google' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) - - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id) - - def _real_extract(self, url): - # Extract id from URL - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) - return - - # At this point we have a new video - self._downloader.increment_downloads() - video_id = mobj.group(1) - - video_extension = 'mp4' - - # Retrieve video webpage to extract further information - request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id) - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return - - # Extract URL, uploader, and title from webpage - self.report_extraction(video_id) - mobj = re.search(r"download_url:'([^']+)'", webpage) - if mobj is None: - video_extension = 'flv' - mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - mediaURL = urllib.unquote(mobj.group(1)) - mediaURL = mediaURL.replace('\\x3d', '\x3d') - mediaURL = mediaURL.replace('\\x26', '\x26') - - video_url = mediaURL - - mobj = re.search(r'(.*)', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') - return - video_title = mobj.group(1).decode('utf-8') - video_title = sanitize_title(video_title) - simple_title = _simplify_title(video_title) - - # Extract video description - mobj = re.search(r'([^<]*)', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video description') - return - video_description = mobj.group(1).decode('utf-8') - if not video_description: - video_description = 'No description available.' - - # Extract video thumbnail - if self._downloader.params.get('forcethumbnail', False): - request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id))) - try: - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video thumbnail') - return - video_thumbnail = mobj.group(1) - else: # we need something to pass to process_info - video_thumbnail = '' - - try: - # Process video information - self._downloader.process_info({ - 'id': video_id.decode('utf-8'), - 'url': video_url.decode('utf-8'), - 'uploader': u'NA', - 'upload_date': u'NA', - 'title': video_title, - 'stitle': simple_title, - 'ext': video_extension.decode('utf-8'), - 'format': u'NA', - 'player_url': None, - }) - except UnavailableVideoError: - self._downloader.trouble(u'\nERROR: unable to download video') - - -class PhotobucketIE(InfoExtractor): - """Information extractor for photobucket.com.""" - - _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)' - IE_NAME = u'photobucket' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) - - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id) - - def _real_extract(self, url): - # Extract id from URL - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) - return - - # At this point we have a new video - self._downloader.increment_downloads() - video_id = mobj.group(1) - - video_extension = 'flv' - - # Retrieve video webpage to extract further information - request = urllib2.Request(url) - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return - - # Extract URL, uploader, and title from webpage - self.report_extraction(video_id) - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - mediaURL = urllib.unquote(mobj.group(1)) - - video_url = mediaURL - - mobj = re.search(r'(.*) video by (.*) - Photobucket', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') - return - video_title = mobj.group(1).decode('utf-8') - video_title = sanitize_title(video_title) - simple_title = _simplify_title(vide_title) - - video_uploader = mobj.group(2).decode('utf-8') - - try: - # Process video information - self._downloader.process_info({ - 'id': video_id.decode('utf-8'), - 'url': video_url.decode('utf-8'), - 'uploader': video_uploader, - 'upload_date': u'NA', - 'title': video_title, - 'stitle': simple_title, - 'ext': video_extension.decode('utf-8'), - 'format': u'NA', - 'player_url': None, - }) - except UnavailableVideoError: - self._downloader.trouble(u'\nERROR: unable to download video') - - -class YahooIE(InfoExtractor): - """Information extractor for video.yahoo.com.""" - - # _VALID_URL matches all Yahoo! Video URLs - # _VPAGE_URL matches only the extractable '/watch/' URLs - _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?' - _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?' - IE_NAME = u'video.yahoo' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) - - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id) - - def _real_extract(self, url, new_video=True): - # Extract ID from URL - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) - return - - # At this point we have a new video - self._downloader.increment_downloads() - video_id = mobj.group(2) - video_extension = 'flv' - - # Rewrite valid but non-extractable URLs as - # extractable English language /watch/ URLs - if re.match(self._VPAGE_URL, url) is None: - request = urllib2.Request(url) - try: - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return - - mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: Unable to extract id field') - return - yahoo_id = mobj.group(1) - - mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: Unable to extract vid field') - return - yahoo_vid = mobj.group(1) - - url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id) - return self._real_extract(url, new_video=False) - - # Retrieve video webpage to extract further information - request = urllib2.Request(url) - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return - - # Extract uploader and title from webpage - self.report_extraction(video_id) - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video title') - return - video_title = mobj.group(1).decode('utf-8') - simple_title = _simplify_title(video_title) - - mobj = re.search(r'

(.*)

', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video uploader') - return - video_uploader = mobj.group(1).decode('utf-8') - - # Extract video thumbnail - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video thumbnail') - return - video_thumbnail = mobj.group(1).decode('utf-8') - - # Extract video description - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video description') - return - video_description = mobj.group(1).decode('utf-8') - if not video_description: - video_description = 'No description available.' - - # Extract video height and width - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video height') - return - yv_video_height = mobj.group(1) - - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video width') - return - yv_video_width = mobj.group(1) - - # Retrieve video playlist to extract media URL - # I'm not completely sure what all these options are, but we - # seem to need most of them, otherwise the server sends a 401. - yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents - yv_bitrate = '700' # according to Wikipedia this is hard-coded - request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id + - '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height + - '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797') - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return - - # Extract media URL from playlist XML - mobj = re.search(r'', webpage, re.MULTILINE) - if mobj is not None: - video_description = mobj.group(1) - else: - html_parser = lxml.etree.HTMLParser() - vwebpage_doc = lxml.etree.parse(StringIO.StringIO(webpage), html_parser) - video_description = u''.join(vwebpage_doc.xpath('id("description")//text()')).strip() - # TODO use another parser - - # Extract upload date - video_upload_date = u'NA' - mobj = re.search(r'', webpage) - if mobj is not None: - video_upload_date = mobj.group(1) - - # Vimeo specific: extract request signature and timestamp - sig = config['request']['signature'] - timestamp = config['request']['timestamp'] - - # Vimeo specific: extract video codec and quality information - # TODO bind to format param - codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')] - for codec in codecs: - if codec[0] in config["video"]["files"]: - video_codec = codec[0] - video_extension = codec[1] - if 'hd' in config["video"]["files"][codec[0]]: quality = 'hd' - else: quality = 'sd' - break - else: - self._downloader.trouble(u'ERROR: no known codec found') - return - - video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ - %(video_id, sig, timestamp, quality, video_codec.upper()) - - try: - # Process video information - self._downloader.process_info({ - 'id': video_id, - 'url': video_url, - 'uploader': video_uploader, - 'upload_date': video_upload_date, - 'title': video_title, - 'stitle': simple_title, - 'ext': video_extension, - 'thumbnail': video_thumbnail, - 'description': video_description, - 'player_url': None, - }) - except UnavailableVideoError: - self._downloader.trouble(u'ERROR: unable to download video') - - -class GenericIE(InfoExtractor): - """Generic last-resort information extractor.""" - - _VALID_URL = r'.*' - IE_NAME = u'generic' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) - - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.') - self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id) - - def report_following_redirect(self, new_url): - """Report information extraction.""" - self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url) - - def _test_redirect(self, url): - """Check if it is a redirect, like url shorteners, in case restart chain.""" - class HeadRequest(urllib2.Request): - def get_method(self): - return "HEAD" - - class HEADRedirectHandler(urllib2.HTTPRedirectHandler): - """ - Subclass the HTTPRedirectHandler to make it use our - HeadRequest also on the redirected URL - """ - def redirect_request(self, req, fp, code, msg, headers, newurl): - if code in (301, 302, 303, 307): - newurl = newurl.replace(' ', '%20') - newheaders = dict((k,v) for k,v in req.headers.items() - if k.lower() not in ("content-length", "content-type")) - return HeadRequest(newurl, - headers=newheaders, - origin_req_host=req.get_origin_req_host(), - unverifiable=True) - else: - raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp) - - class HTTPMethodFallback(urllib2.BaseHandler): - """ - Fallback to GET if HEAD is not allowed (405 HTTP error) - """ - def http_error_405(self, req, fp, code, msg, headers): - fp.read() - fp.close() - - newheaders = dict((k,v) for k,v in req.headers.items() - if k.lower() not in ("content-length", "content-type")) - return self.parent.open(urllib2.Request(req.get_full_url(), - headers=newheaders, - origin_req_host=req.get_origin_req_host(), - unverifiable=True)) - - # Build our opener - opener = urllib2.OpenerDirector() - for handler in [urllib2.HTTPHandler, urllib2.HTTPDefaultErrorHandler, - HTTPMethodFallback, HEADRedirectHandler, - urllib2.HTTPErrorProcessor, urllib2.HTTPSHandler]: - opener.add_handler(handler()) - - response = opener.open(HeadRequest(url)) - new_url = response.geturl() - - if url == new_url: return False - - self.report_following_redirect(new_url) - self._downloader.download([new_url]) - return True - - def _real_extract(self, url): - if self._test_redirect(url): return - - # At this point we have a new video - self._downloader.increment_downloads() - - video_id = url.split('/')[-1] - request = urllib2.Request(url) - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return - except ValueError, err: - # since this is the last-resort InfoExtractor, if - # this error is thrown, it'll be thrown here - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) - return - - self.report_extraction(video_id) - # Start with something easy: JW Player in SWFObject - mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) - if mobj is None: - # Broaden the search a little bit - mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) - return - - # It's possible that one of the regexes - # matched, but returned an empty group: - if mobj.group(1) is None: - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) - return - - video_url = urllib.unquote(mobj.group(1)) - video_id = os.path.basename(video_url) - - # here's a fun little line of code for you: - video_extension = os.path.splitext(video_id)[1][1:] - video_id = os.path.splitext(video_id)[0] - - # it's tempting to parse this further, but you would - # have to take into account all the variations like - # Video Title - Site Name - # Site Name | Video Title - # Video Title - Tagline | Site Name - # and so on and so forth; it's just not practical - mobj = re.search(r'(.*)', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') - return - video_title = mobj.group(1).decode('utf-8') - video_title = sanitize_title(video_title) - simple_title = _simplify_title(video_title) - - # video uploader is domain name - mobj = re.match(r'(?:https?://)?([^/]*)/.*', url) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') - return - video_uploader = mobj.group(1).decode('utf-8') - - try: - # Process video information - self._downloader.process_info({ - 'id': video_id.decode('utf-8'), - 'url': video_url.decode('utf-8'), - 'uploader': video_uploader, - 'upload_date': u'NA', - 'title': video_title, - 'stitle': simple_title, - 'ext': video_extension.decode('utf-8'), - 'format': u'NA', - 'player_url': None, - }) - except UnavailableVideoError, err: - self._downloader.trouble(u'\nERROR: unable to download video') - - -class YoutubeSearchIE(InfoExtractor): - """Information Extractor for YouTube search queries.""" - _VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+' - _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc' - _youtube_ie = None - _max_youtube_results = 1000 - IE_NAME = u'youtube:search' - - def __init__(self, youtube_ie, downloader=None): - InfoExtractor.__init__(self, downloader) - self._youtube_ie = youtube_ie - - def report_download_page(self, query, pagenum): - """Report attempt to download playlist page with given number.""" - query = query.decode(preferredencoding()) - self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum)) - - def _real_initialize(self): - self._youtube_ie.initialize() - - def _real_extract(self, query): - mobj = re.match(self._VALID_URL, query) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) - return - - prefix, query = query.split(':') - prefix = prefix[8:] - query = query.encode('utf-8') - if prefix == '': - self._download_n_results(query, 1) - return - elif prefix == 'all': - self._download_n_results(query, self._max_youtube_results) - return - else: - try: - n = long(prefix) - if n <= 0: - self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) - return - elif n > self._max_youtube_results: - self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n)) - n = self._max_youtube_results - self._download_n_results(query, n) - return - except ValueError: # parsing prefix as integer fails - self._download_n_results(query, 1) - return - - def _download_n_results(self, query, n): - """Downloads a specified number of results for a query""" - - video_ids = [] - pagenum = 0 - limit = n - - while (50 * pagenum) < limit: - self.report_download_page(query, pagenum+1) - result_url = self._API_URL % (urllib.quote_plus(query), (50*pagenum)+1) - request = urllib2.Request(result_url) - try: - data = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download API page: %s' % str(err)) - return - api_response = json.loads(data)['data'] - - new_ids = list(video['id'] for video in api_response['items']) - video_ids += new_ids - - limit = min(n, api_response['totalItems']) - pagenum += 1 - - if len(video_ids) > n: - video_ids = video_ids[:n] - for id in video_ids: - self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id) - return - - -class GoogleSearchIE(InfoExtractor): - """Information Extractor for Google Video search queries.""" - _VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+' - _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en' - _VIDEO_INDICATOR = r' self._max_google_results: - self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n)) - n = self._max_google_results - self._download_n_results(query, n) - return - except ValueError: # parsing prefix as integer fails - self._download_n_results(query, 1) - return - - def _download_n_results(self, query, n): - """Downloads a specified number of results for a query""" - - video_ids = [] - pagenum = 0 - - while True: - self.report_download_page(query, pagenum) - result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum*10) - request = urllib2.Request(result_url) - try: - page = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) - return - - # Extract video identifiers - for mobj in re.finditer(self._VIDEO_INDICATOR, page): - video_id = mobj.group(1) - if video_id not in video_ids: - video_ids.append(video_id) - if len(video_ids) == n: - # Specified n videos reached - for id in video_ids: - self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id) - return - - if re.search(self._MORE_PAGES_INDICATOR, page) is None: - for id in video_ids: - self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id) - return - - pagenum = pagenum + 1 - - -class YahooSearchIE(InfoExtractor): - """Information Extractor for Yahoo! Video search queries.""" - _VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+' - _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s' - _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"' - _MORE_PAGES_INDICATOR = r'\s*Next' - _yahoo_ie = None - _max_yahoo_results = 1000 - IE_NAME = u'video.yahoo:search' - - def __init__(self, yahoo_ie, downloader=None): - InfoExtractor.__init__(self, downloader) - self._yahoo_ie = yahoo_ie - - def report_download_page(self, query, pagenum): - """Report attempt to download playlist page with given number.""" - query = query.decode(preferredencoding()) - self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum)) - - def _real_initialize(self): - self._yahoo_ie.initialize() - - def _real_extract(self, query): - mobj = re.match(self._VALID_URL, query) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) - return - - prefix, query = query.split(':') - prefix = prefix[8:] - query = query.encode('utf-8') - if prefix == '': - self._download_n_results(query, 1) - return - elif prefix == 'all': - self._download_n_results(query, self._max_yahoo_results) - return - else: - try: - n = long(prefix) - if n <= 0: - self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) - return - elif n > self._max_yahoo_results: - self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n)) - n = self._max_yahoo_results - self._download_n_results(query, n) - return - except ValueError: # parsing prefix as integer fails - self._download_n_results(query, 1) - return - - def _download_n_results(self, query, n): - """Downloads a specified number of results for a query""" - - video_ids = [] - already_seen = set() - pagenum = 1 - - while True: - self.report_download_page(query, pagenum) - result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum) - request = urllib2.Request(result_url) - try: - page = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) - return - - # Extract video identifiers - for mobj in re.finditer(self._VIDEO_INDICATOR, page): - video_id = mobj.group(1) - if video_id not in already_seen: - video_ids.append(video_id) - already_seen.add(video_id) - if len(video_ids) == n: - # Specified n videos reached - for id in video_ids: - self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id) - return - - if re.search(self._MORE_PAGES_INDICATOR, page) is None: - for id in video_ids: - self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id) - return - - pagenum = pagenum + 1 - - -class YoutubePlaylistIE(InfoExtractor): - """Information Extractor for YouTube playlists.""" - - _VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*' - _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' - _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&list=PL%s&' - _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*' - _youtube_ie = None - IE_NAME = u'youtube:playlist' - - def __init__(self, youtube_ie, downloader=None): - InfoExtractor.__init__(self, downloader) - self._youtube_ie = youtube_ie - - def report_download_page(self, playlist_id, pagenum): - """Report attempt to download playlist page with given number.""" - self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum)) - - def _real_initialize(self): - self._youtube_ie.initialize() - - def _real_extract(self, url): - # Extract playlist id - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid url: %s' % url) - return - - # Single video case - if mobj.group(3) is not None: - self._youtube_ie.extract(mobj.group(3)) - return - - # Download playlist pages - # prefix is 'p' as default for playlists but there are other types that need extra care - playlist_prefix = mobj.group(1) - if playlist_prefix == 'a': - playlist_access = 'artist' - else: - playlist_prefix = 'p' - playlist_access = 'view_play_list' - playlist_id = mobj.group(2) - video_ids = [] - pagenum = 1 - - while True: - self.report_download_page(playlist_id, pagenum) - url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum) - request = urllib2.Request(url) - try: - page = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) - return - - # Extract video identifiers - ids_in_page = [] - for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page): - if mobj.group(1) not in ids_in_page: - ids_in_page.append(mobj.group(1)) - video_ids.extend(ids_in_page) - - if re.search(self._MORE_PAGES_INDICATOR, page) is None: - break - pagenum = pagenum + 1 - - playliststart = self._downloader.params.get('playliststart', 1) - 1 - playlistend = self._downloader.params.get('playlistend', -1) - if playlistend == -1: - video_ids = video_ids[playliststart:] - else: - video_ids = video_ids[playliststart:playlistend] - - for id in video_ids: - self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id) - return - - -class YoutubeUserIE(InfoExtractor): - """Information Extractor for YouTube users.""" - - _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)' - _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s' - _GDATA_PAGE_SIZE = 50 - _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d' - _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]' - _youtube_ie = None - IE_NAME = u'youtube:user' - - def __init__(self, youtube_ie, downloader=None): - InfoExtractor.__init__(self, downloader) - self._youtube_ie = youtube_ie - - def report_download_page(self, username, start_index): - """Report attempt to download user page.""" - self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' % - (username, start_index, start_index + self._GDATA_PAGE_SIZE)) - - def _real_initialize(self): - self._youtube_ie.initialize() - - def _real_extract(self, url): - # Extract username - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid url: %s' % url) - return - - username = mobj.group(1) - - # Download video ids using YouTube Data API. Result size per - # query is limited (currently to 50 videos) so we need to query - # page by page until there are no video ids - it means we got - # all of them. - - video_ids = [] - pagenum = 0 - - while True: - start_index = pagenum * self._GDATA_PAGE_SIZE + 1 - self.report_download_page(username, start_index) - - request = urllib2.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)) - - try: - page = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) - return - - # Extract video identifiers - ids_in_page = [] - - for mobj in re.finditer(self._VIDEO_INDICATOR, page): - if mobj.group(1) not in ids_in_page: - ids_in_page.append(mobj.group(1)) - - video_ids.extend(ids_in_page) - - # A little optimization - if current page is not - # "full", ie. does not contain PAGE_SIZE video ids then - # we can assume that this page is the last one - there - # are no more ids on further pages - no need to query - # again. - - if len(ids_in_page) < self._GDATA_PAGE_SIZE: - break - - pagenum += 1 - - all_ids_count = len(video_ids) - playliststart = self._downloader.params.get('playliststart', 1) - 1 - playlistend = self._downloader.params.get('playlistend', -1) - - if playlistend == -1: - video_ids = video_ids[playliststart:] - else: - video_ids = video_ids[playliststart:playlistend] - - self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" % - (username, all_ids_count, len(video_ids))) - - for video_id in video_ids: - self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id) - - -class DepositFilesIE(InfoExtractor): - """Information extractor for depositfiles.com""" - - _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)' - IE_NAME = u'DepositFiles' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) - - def report_download_webpage(self, file_id): - """Report webpage download.""" - self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id) - - def report_extraction(self, file_id): - """Report information extraction.""" - self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id) - - def _real_extract(self, url): - # At this point we have a new file - self._downloader.increment_downloads() - - file_id = url.split('/')[-1] - # Rebuild url in english locale - url = 'http://depositfiles.com/en/files/' + file_id - - # Retrieve file webpage with 'Free download' button pressed - free_download_indication = { 'gateway_result' : '1' } - request = urllib2.Request(url, urllib.urlencode(free_download_indication)) - try: - self.report_download_webpage(file_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % str(err)) - return - - # Search for the real file URL - mobj = re.search(r'(Attention.*?)', webpage, re.DOTALL) - if (mobj is not None) and (mobj.group(1) is not None): - restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip() - self._downloader.trouble(u'ERROR: %s' % restriction_message) - else: - self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url) - return - - file_url = mobj.group(1) - file_extension = os.path.splitext(file_url)[1][1:] - - # Search for file title - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') - return - file_title = mobj.group(1).decode('utf-8') - - try: - # Process file information - self._downloader.process_info({ - 'id': file_id.decode('utf-8'), - 'url': file_url.decode('utf-8'), - 'uploader': u'NA', - 'upload_date': u'NA', - 'title': file_title, - 'stitle': file_title, - 'ext': file_extension.decode('utf-8'), - 'format': u'NA', - 'player_url': None, - }) - except UnavailableVideoError, err: - self._downloader.trouble(u'ERROR: unable to download file') - - -class FacebookIE(InfoExtractor): - """Information Extractor for Facebook""" - - _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P\d+)(?:.*)' - _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&' - _NETRC_MACHINE = 'facebook' - _available_formats = ['video', 'highqual', 'lowqual'] - _video_extensions = { - 'video': 'mp4', - 'highqual': 'mp4', - 'lowqual': 'mp4', - } - IE_NAME = u'facebook' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) - - def _reporter(self, message): - """Add header and report message.""" - self._downloader.to_screen(u'[facebook] %s' % message) - - def report_login(self): - """Report attempt to log in.""" - self._reporter(u'Logging in') - - def report_video_webpage_download(self, video_id): - """Report attempt to download video webpage.""" - self._reporter(u'%s: Downloading video webpage' % video_id) - - def report_information_extraction(self, video_id): - """Report attempt to extract video information.""" - self._reporter(u'%s: Extracting video information' % video_id) - - def _parse_page(self, video_webpage): - """Extract video information from page""" - # General data - data = {'title': r'\("video_title", "(.*?)"\)', - 'description': r'
(.*?)
', - 'owner': r'\("video_owner_name", "(.*?)"\)', - 'thumbnail': r'\("thumb_url", "(?P.*?)"\)', - } - video_info = {} - for piece in data.keys(): - mobj = re.search(data[piece], video_webpage) - if mobj is not None: - video_info[piece] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape")) - - # Video urls - video_urls = {} - for fmt in self._available_formats: - mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage) - if mobj is not None: - # URL is in a Javascript segment inside an escaped Unicode format within - # the generally utf-8 page - video_urls[fmt] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape")) - video_info['video_urls'] = video_urls - - return video_info - - def _real_initialize(self): - if self._downloader is None: - return - - useremail = None - password = None - downloader_params = self._downloader.params - - # Attempt to use provided username and password or .netrc data - if downloader_params.get('username', None) is not None: - useremail = downloader_params['username'] - password = downloader_params['password'] - elif downloader_params.get('usenetrc', False): - try: - info = netrc.netrc().authenticators(self._NETRC_MACHINE) - if info is not None: - useremail = info[0] - password = info[2] - else: - raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) - except (IOError, netrc.NetrcParseError), err: - self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err)) - return - - if useremail is None: - return - - # Log in - login_form = { - 'email': useremail, - 'pass': password, - 'login': 'Log+In' - } - request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form)) - try: - self.report_login() - login_results = urllib2.urlopen(request).read() - if re.search(r'', login_results) is not None: - self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') - return - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err)) - return - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - video_id = mobj.group('ID') - - # Get video webpage - self.report_video_webpage_download(video_id) - request = urllib2.Request('https://www.facebook.com/video/video.php?v=%s' % video_id) - try: - page = urllib2.urlopen(request) - video_webpage = page.read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) - return - - # Start extracting information - self.report_information_extraction(video_id) - - # Extract information - video_info = self._parse_page(video_webpage) - - # uploader - if 'owner' not in video_info: - self._downloader.trouble(u'ERROR: unable to extract uploader nickname') - return - video_uploader = video_info['owner'] - - # title - if 'title' not in video_info: - self._downloader.trouble(u'ERROR: unable to extract video title') - return - video_title = video_info['title'] - video_title = video_title.decode('utf-8') - video_title = sanitize_title(video_title) - - simple_title = _simplify_title(video_title) - - # thumbnail image - if 'thumbnail' not in video_info: - self._downloader.trouble(u'WARNING: unable to extract video thumbnail') - video_thumbnail = '' - else: - video_thumbnail = video_info['thumbnail'] - - # upload date - upload_date = u'NA' - if 'upload_date' in video_info: - upload_time = video_info['upload_date'] - timetuple = email.utils.parsedate_tz(upload_time) - if timetuple is not None: - try: - upload_date = time.strftime('%Y%m%d', timetuple[0:9]) - except: - pass - - # description - video_description = video_info.get('description', 'No description available.') - - url_map = video_info['video_urls'] - if len(url_map.keys()) > 0: - # Decide which formats to download - req_format = self._downloader.params.get('format', None) - format_limit = self._downloader.params.get('format_limit', None) - - if format_limit is not None and format_limit in self._available_formats: - format_list = self._available_formats[self._available_formats.index(format_limit):] - else: - format_list = self._available_formats - existing_formats = [x for x in format_list if x in url_map] - if len(existing_formats) == 0: - self._downloader.trouble(u'ERROR: no known formats available for video') - return - if req_format is None: - video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality - elif req_format == 'worst': - video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality - elif req_format == '-1': - video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats - else: - # Specific format - if req_format not in url_map: - self._downloader.trouble(u'ERROR: requested format not available') - return - video_url_list = [(req_format, url_map[req_format])] # Specific format - - for format_param, video_real_url in video_url_list: - - # At this point we have a new video - self._downloader.increment_downloads() - - # Extension - video_extension = self._video_extensions.get(format_param, 'mp4') - - try: - # Process video information - self._downloader.process_info({ - 'id': video_id.decode('utf-8'), - 'url': video_real_url.decode('utf-8'), - 'uploader': video_uploader.decode('utf-8'), - 'upload_date': upload_date, - 'title': video_title, - 'stitle': simple_title, - 'ext': video_extension.decode('utf-8'), - 'format': (format_param is None and u'NA' or format_param.decode('utf-8')), - 'thumbnail': video_thumbnail.decode('utf-8'), - 'description': video_description.decode('utf-8'), - 'player_url': None, - }) - except UnavailableVideoError, err: - self._downloader.trouble(u'\nERROR: unable to download video') - -class BlipTVIE(InfoExtractor): - """Information extractor for blip.tv""" - - _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$' - _URL_EXT = r'^.*\.([a-z0-9]+)$' - IE_NAME = u'blip.tv' - - def report_extraction(self, file_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) - - def report_direct_download(self, title): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title)) - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - - if '?' in url: - cchar = '&' - else: - cchar = '?' - json_url = url + cchar + 'skin=json&version=2&no_wrap=1' - request = urllib2.Request(json_url) - self.report_extraction(mobj.group(1)) - info = None - try: - urlh = urllib2.urlopen(request) - if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download - basename = url.split('/')[-1] - title,ext = os.path.splitext(basename) - title = title.decode('UTF-8') - ext = ext.replace('.', '') - self.report_direct_download(title) - info = { - 'id': title, - 'url': url, - 'title': title, - 'stitle': _simplify_title(title), - 'ext': ext, - 'urlhandle': urlh - } - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err)) - return - if info is None: # Regular URL - try: - json_code = urlh.read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % str(err)) - return - - try: - json_data = json.loads(json_code) - if 'Post' in json_data: - data = json_data['Post'] - else: - data = json_data - - upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') - video_url = data['media']['url'] - umobj = re.match(self._URL_EXT, video_url) - if umobj is None: - raise ValueError('Can not determine filename extension') - ext = umobj.group(1) - - info = { - 'id': data['item_id'], - 'url': video_url, - 'uploader': data['display_name'], - 'upload_date': upload_date, - 'title': data['title'], - 'stitle': _simplify_title(data['title']), - 'ext': ext, - 'format': data['media']['mimeType'], - 'thumbnail': data['thumbnailUrl'], - 'description': data['description'], - 'player_url': data['embedUrl'] - } - except (ValueError,KeyError), err: - self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err)) - return - - self._downloader.increment_downloads() - - try: - self._downloader.process_info(info) - except UnavailableVideoError, err: - self._downloader.trouble(u'\nERROR: unable to download video') - - -class MyVideoIE(InfoExtractor): - """Information Extractor for myvideo.de.""" - - _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*' - IE_NAME = u'myvideo' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) - - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id) - - def _real_extract(self,url): - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._download.trouble(u'ERROR: invalid URL: %s' % url) - return - - video_id = mobj.group(1) - - # Get video webpage - request = urllib2.Request('http://www.myvideo.de/watch/%s' % video_id) - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return - - self.report_extraction(video_id) - mobj = re.search(r'', - webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - video_url = mobj.group(1) + ('/%s.flv' % video_id) - - mobj = re.search('([^<]+)', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') - return - - video_title = mobj.group(1) - video_title = sanitize_title(video_title) - - simple_title = _simplify_title(video_title) - - try: - self._downloader.process_info({ - 'id': video_id, - 'url': video_url, - 'uploader': u'NA', - 'upload_date': u'NA', - 'title': video_title, - 'stitle': simple_title, - 'ext': u'flv', - 'format': u'NA', - 'player_url': None, - }) - except UnavailableVideoError: - self._downloader.trouble(u'\nERROR: Unable to download video') - -class ComedyCentralIE(InfoExtractor): - """Information extractor for The Daily Show and Colbert Report """ - - _VALID_URL = r'^(:(?Ptds|thedailyshow|cr|colbert|colbertnation|colbertreport))|(https?://)?(www\.)?(?Pthedailyshow|colbertnation)\.com/full-episodes/(?P.*)$' - IE_NAME = u'comedycentral' - - def report_extraction(self, episode_id): - self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id) - - def report_config_download(self, episode_id): - self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id) - - def report_index_download(self, episode_id): - self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id) - - def report_player_url(self, episode_id): - self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id) - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - - if mobj.group('shortname'): - if mobj.group('shortname') in ('tds', 'thedailyshow'): - url = u'http://www.thedailyshow.com/full-episodes/' - else: - url = u'http://www.colbertnation.com/full-episodes/' - mobj = re.match(self._VALID_URL, url) - assert mobj is not None - - dlNewest = not mobj.group('episode') - if dlNewest: - epTitle = mobj.group('showname') - else: - epTitle = mobj.group('episode') - - req = urllib2.Request(url) - self.report_extraction(epTitle) - try: - htmlHandle = urllib2.urlopen(req) - html = htmlHandle.read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err)) - return - if dlNewest: - url = htmlHandle.geturl() - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url) - return - if mobj.group('episode') == '': - self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url) - return - epTitle = mobj.group('episode') - - mMovieParams = re.findall('(?:[^/]+)/(?P[^/?]+)[/?]?.*$' - IE_NAME = u'escapist' - - def report_extraction(self, showName): - self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName) - - def report_config_download(self, showName): - self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName) - - def _real_extract(self, url): - htmlParser = HTMLParser.HTMLParser() - - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - showName = mobj.group('showname') - videoId = mobj.group('episode') - - self.report_extraction(showName) - try: - webPage = urllib2.urlopen(url).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: ' + unicode(err)) - return - - descMatch = re.search('[0-9]+)/(?P.*)$' - IE_NAME = u'collegehumor' - - def report_webpage(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) - - def _real_extract(self, url): - htmlParser = HTMLParser.HTMLParser() - - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - video_id = mobj.group('videoid') - - self.report_webpage(video_id) - request = urllib2.Request(url) - try: - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) - return - - m = re.search(r'id="video:(?P[0-9]+)"', webpage) - if m is None: - self._downloader.trouble(u'ERROR: Cannot extract internal video ID') - return - internal_video_id = m.group('internalvideoid') - - info = { - 'id': video_id, - 'internal_id': internal_video_id, - } - - self.report_extraction(video_id) - xmlUrl = 'http://www.collegehumor.com/moogaloop/video:' + internal_video_id - try: - metaXml = urllib2.urlopen(xmlUrl).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % str(err)) - return - - mdoc = xml.etree.ElementTree.fromstring(metaXml) - try: - videoNode = mdoc.findall('./video')[0] - info['description'] = videoNode.findall('./description')[0].text - info['title'] = videoNode.findall('./caption')[0].text - info['stitle'] = _simplify_title(info['title']) - info['url'] = videoNode.findall('./file')[0].text - info['thumbnail'] = videoNode.findall('./thumbnail')[0].text - info['ext'] = info['url'].rpartition('.')[2] - info['format'] = info['ext'] - except IndexError: - self._downloader.trouble(u'\nERROR: Invalid metadata XML file') - return - - self._downloader.increment_downloads() - - try: - self._downloader.process_info(info) - except UnavailableVideoError, err: - self._downloader.trouble(u'\nERROR: unable to download video') - - -class XVideosIE(InfoExtractor): - """Information extractor for xvideos.com""" - - _VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)' - IE_NAME = u'xvideos' - - def report_webpage(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) - - def _real_extract(self, url): - htmlParser = HTMLParser.HTMLParser() - - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - video_id = mobj.group(1).decode('utf-8') - - self.report_webpage(video_id) - - request = urllib2.Request(r'http://www.xvideos.com/video' + video_id) - try: - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) - return - - self.report_extraction(video_id) - - - # Extract video URL - mobj = re.search(r'flv_url=(.+?)&', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video url') - return - video_url = urllib2.unquote(mobj.group(1).decode('utf-8')) - - - # Extract title - mobj = re.search(r'(.*?)\s+-\s+XVID', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video title') - return - video_title = mobj.group(1).decode('utf-8') - - - # Extract video thumbnail - mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]/[a-fA-F0-9]/[a-fA-F0-9]/([a-fA-F0-9.]+jpg)', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video thumbnail') - return - video_thumbnail = mobj.group(1).decode('utf-8') - - - - self._downloader.increment_downloads() - info = { - 'id': video_id, - 'url': video_url, - 'uploader': None, - 'upload_date': None, - 'title': video_title, - 'stitle': _simplify_title(video_title), - 'ext': 'flv', - 'format': 'flv', - 'thumbnail': video_thumbnail, - 'description': None, - 'player_url': None, - } - - try: - self._downloader.process_info(info) - except UnavailableVideoError, err: - self._downloader.trouble(u'\nERROR: unable to download ' + video_id) - - -class SoundcloudIE(InfoExtractor): - """Information extractor for soundcloud.com - To access the media, the uid of the song and a stream token - must be extracted from the page source and the script must make - a request to media.soundcloud.com/crossdomain.xml. Then - the media can be grabbed by requesting from an url composed - of the stream token and uid - """ - - _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)' - IE_NAME = u'soundcloud' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) - - def report_webpage(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) - - def _real_extract(self, url): - htmlParser = HTMLParser.HTMLParser() - - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - - # extract uploader (which is in the url) - uploader = mobj.group(1).decode('utf-8') - # extract simple title (uploader + slug of song title) - slug_title = mobj.group(2).decode('utf-8') - simple_title = uploader + '-' + slug_title - - self.report_webpage('%s/%s' % (uploader, slug_title)) - - request = urllib2.Request('http://soundcloud.com/%s/%s' % (uploader, slug_title)) - try: - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) - return - - self.report_extraction('%s/%s' % (uploader, slug_title)) - - # extract uid and stream token that soundcloud hands out for access - mobj = re.search('"uid":"([\w\d]+?)".*?stream_token=([\w\d]+)', webpage) - if mobj: - video_id = mobj.group(1) - stream_token = mobj.group(2) - - # extract unsimplified title - mobj = re.search('"title":"(.*?)",', webpage) - if mobj: - title = mobj.group(1) - - # construct media url (with uid/token) - mediaURL = "http://media.soundcloud.com/stream/%s?stream_token=%s" - mediaURL = mediaURL % (video_id, stream_token) - - # description - description = u'No description available' - mobj = re.search('track-description-value"><p>(.*?)</p>', webpage) - if mobj: - description = mobj.group(1) - - # upload date - upload_date = None - mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage) - if mobj: - try: - upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d') - except Exception, e: - print str(e) - - # for soundcloud, a request to a cross domain is required for cookies - request = urllib2.Request('http://media.soundcloud.com/crossdomain.xml', std_headers) - - try: - self._downloader.process_info({ - 'id': video_id.decode('utf-8'), - 'url': mediaURL, - 'uploader': uploader.decode('utf-8'), - 'upload_date': upload_date, - 'title': simple_title.decode('utf-8'), - 'stitle': simple_title.decode('utf-8'), - 'ext': u'mp3', - 'format': u'NA', - 'player_url': None, - 'description': description.decode('utf-8') - }) - except UnavailableVideoError: - self._downloader.trouble(u'\nERROR: unable to download video') - - -class InfoQIE(InfoExtractor): - """Information extractor for infoq.com""" - - _VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$' - IE_NAME = u'infoq' - - def report_webpage(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) - - def _real_extract(self, url): - htmlParser = HTMLParser.HTMLParser() - - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - - self.report_webpage(url) - - request = urllib2.Request(url) - try: - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) - return - - self.report_extraction(url) - - - # Extract video URL - mobj = re.search(r"jsclassref='([^']*)'", webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video url') - return - video_url = 'rtmpe://video.infoq.com/cfx/st/' + urllib2.unquote(mobj.group(1).decode('base64')) - - - # Extract title - mobj = re.search(r'contentTitle = "(.*?)";', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video title') - return - video_title = mobj.group(1).decode('utf-8') - - # Extract description - video_description = u'No description available.' - mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage) - if mobj is not None: - video_description = mobj.group(1).decode('utf-8') - - video_filename = video_url.split('/')[-1] - video_id, extension = video_filename.split('.') - - self._downloader.increment_downloads() - info = { - 'id': video_id, - 'url': video_url, - 'uploader': None, - 'upload_date': None, - 'title': video_title, - 'stitle': _simplify_title(video_title), - 'ext': extension, - 'format': extension, # Extension is always(?) mp4, but seems to be flv - 'thumbnail': None, - 'description': video_description, - 'player_url': None, - } - - try: - self._downloader.process_info(info) - except UnavailableVideoError, err: - self._downloader.trouble(u'\nERROR: unable to download ' + video_url) - -class MixcloudIE(InfoExtractor): - """Information extractor for www.mixcloud.com""" - _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)' - IE_NAME = u'mixcloud' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) - - def report_download_json(self, file_id): - """Report JSON download.""" - self._downloader.to_screen(u'[%s] Downloading json' % self.IE_NAME) - - def report_extraction(self, file_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) - - def get_urls(self, jsonData, fmt, bitrate='best'): - """Get urls from 'audio_formats' section in json""" - file_url = None - try: - bitrate_list = jsonData[fmt] - if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list: - bitrate = max(bitrate_list) # select highest - - url_list = jsonData[fmt][bitrate] - except TypeError: # we have no bitrate info. - url_list = jsonData[fmt] - - return url_list - - def check_urls(self, url_list): - """Returns 1st active url from list""" - for url in url_list: - try: - urllib2.urlopen(url) - return url - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - url = None - - return None - - def _print_formats(self, formats): - print 'Available formats:' - for fmt in formats.keys(): - for b in formats[fmt]: - try: - ext = formats[fmt][b][0] - print '%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1]) - except TypeError: # we have no bitrate info - ext = formats[fmt][0] - print '%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1]) - break - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - # extract uploader & filename from url - uploader = mobj.group(1).decode('utf-8') - file_id = uploader + "-" + mobj.group(2).decode('utf-8') - - # construct API request - file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json' - # retrieve .json file with links to files - request = urllib2.Request(file_url) - try: - self.report_download_json(file_url) - jsonData = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % str(err)) - return - - # parse JSON - json_data = json.loads(jsonData) - player_url = json_data['player_swf_url'] - formats = dict(json_data['audio_formats']) - - req_format = self._downloader.params.get('format', None) - bitrate = None - - if self._downloader.params.get('listformats', None): - self._print_formats(formats) - return - - if req_format is None or req_format == 'best': - for format_param in formats.keys(): - url_list = self.get_urls(formats, format_param) - # check urls - file_url = self.check_urls(url_list) - if file_url is not None: - break # got it! - else: - if req_format not in formats.keys(): - self._downloader.trouble(u'ERROR: format is not available') - return - - url_list = self.get_urls(formats, req_format) - file_url = self.check_urls(url_list) - format_param = req_format - - # We have audio - self._downloader.increment_downloads() - try: - # Process file information - self._downloader.process_info({ - 'id': file_id.decode('utf-8'), - 'url': file_url.decode('utf-8'), - 'uploader': uploader.decode('utf-8'), - 'upload_date': u'NA', - 'title': json_data['name'], - 'stitle': _simplify_title(json_data['name']), - 'ext': file_url.split('.')[-1].decode('utf-8'), - 'format': (format_param is None and u'NA' or format_param.decode('utf-8')), - 'thumbnail': json_data['thumbnail_url'], - 'description': json_data['description'], - 'player_url': player_url.decode('utf-8'), - }) - except UnavailableVideoError, err: - self._downloader.trouble(u'ERROR: unable to download file') - -class StanfordOpenClassroomIE(InfoExtractor): - """Information extractor for Stanford's Open ClassRoom""" - - _VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$' - IE_NAME = u'stanfordoc' - - def report_download_webpage(self, objid): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, objid)) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - - if mobj.group('course') and mobj.group('video'): # A specific video - course = mobj.group('course') - video = mobj.group('video') - info = { - 'id': _simplify_title(course + '_' + video), - } - - self.report_extraction(info['id']) - baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/' - xmlUrl = baseUrl + video + '.xml' - try: - metaXml = urllib2.urlopen(xmlUrl).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % unicode(err)) - return - mdoc = xml.etree.ElementTree.fromstring(metaXml) - try: - info['title'] = mdoc.findall('./title')[0].text - info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text - except IndexError: - self._downloader.trouble(u'\nERROR: Invalid metadata XML file') - return - info['stitle'] = _simplify_title(info['title']) - info['ext'] = info['url'].rpartition('.')[2] - info['format'] = info['ext'] - self._downloader.increment_downloads() - try: - self._downloader.process_info(info) - except UnavailableVideoError, err: - self._downloader.trouble(u'\nERROR: unable to download video') - elif mobj.group('course'): # A course page - unescapeHTML = HTMLParser.HTMLParser().unescape - - course = mobj.group('course') - info = { - 'id': _simplify_title(course), - 'type': 'playlist', - } - - self.report_download_webpage(info['id']) - try: - coursepage = urllib2.urlopen(url).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download course info page: ' + unicode(err)) - return - - m = re.search('<h1>([^<]+)</h1>', coursepage) - if m: - info['title'] = unescapeHTML(m.group(1)) - else: - info['title'] = info['id'] - info['stitle'] = _simplify_title(info['title']) - - m = re.search('<description>([^<]+)</description>', coursepage) - if m: - info['description'] = unescapeHTML(m.group(1)) - - links = _orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage)) - info['list'] = [ - { - 'type': 'reference', - 'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage), - } - for vpage in links] - - for entry in info['list']: - assert entry['type'] == 'reference' - self.extract(entry['url']) - else: # Root page - unescapeHTML = HTMLParser.HTMLParser().unescape - - info = { - 'id': 'Stanford OpenClassroom', - 'type': 'playlist', - } - - self.report_download_webpage(info['id']) - rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php' - try: - rootpage = urllib2.urlopen(rootURL).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download course info page: ' + unicode(err)) - return - - info['title'] = info['id'] - info['stitle'] = _simplify_title(info['title']) - - links = _orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage)) - info['list'] = [ - { - 'type': 'reference', - 'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage), - } - for cpage in links] - - for entry in info['list']: - assert entry['type'] == 'reference' - self.extract(entry['url']) - -class MTVIE(InfoExtractor): - """Information extractor for MTV.com""" - - _VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$' - IE_NAME = u'mtv' - - def report_webpage(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - if not mobj.group('proto'): - url = 'http://' + url - video_id = mobj.group('videoid') - self.report_webpage(video_id) - - request = urllib2.Request(url) - try: - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) - return - - mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract song name') - return - song_name = _unescapeHTML(mobj.group(1).decode('iso-8859-1')) - mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract performer') - return - performer = _unescapeHTML(mobj.group(1).decode('iso-8859-1')) - video_title = performer + ' - ' + song_name - - mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to mtvn_uri') - return - mtvn_uri = mobj.group(1) - - mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract content id') - return - content_id = mobj.group(1) - - videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri - self.report_extraction(video_id) - request = urllib2.Request(videogen_url) - try: - metadataXml = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % str(err)) - return - - mdoc = xml.etree.ElementTree.fromstring(metadataXml) - renditions = mdoc.findall('.//rendition') - - # For now, always pick the highest quality. - rendition = renditions[-1] - - try: - _,_,ext = rendition.attrib['type'].partition('/') - format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate'] - video_url = rendition.find('./src').text - except KeyError: - self._downloader.trouble('Invalid rendition field.') - return - - self._downloader.increment_downloads() - info = { - 'id': video_id, - 'url': video_url, - 'uploader': performer, - 'title': video_title, - 'stitle': _simplify_title(video_title), - 'ext': ext, - 'format': format, - } - - try: - self._downloader.process_info(info) - except UnavailableVideoError, err: - self._downloader.trouble(u'\nERROR: unable to download ' + video_id) - - -class PostProcessor(object): - """Post Processor class. - - PostProcessor objects can be added to downloaders with their - add_post_processor() method. When the downloader has finished a - successful download, it will take its internal chain of PostProcessors - and start calling the run() method on each one of them, first with - an initial argument and then with the returned value of the previous - PostProcessor. - - The chain will be stopped if one of them ever returns None or the end - of the chain is reached. - - PostProcessor objects follow a "mutual registration" process similar - to InfoExtractor objects. - """ - - _downloader = None - - def __init__(self, downloader=None): - self._downloader = downloader - - def set_downloader(self, downloader): - """Sets the downloader for this PP.""" - self._downloader = downloader - - def run(self, information): - """Run the PostProcessor. - - The "information" argument is a dictionary like the ones - composed by InfoExtractors. The only difference is that this - one has an extra field called "filepath" that points to the - downloaded file. - - When this method returns None, the postprocessing chain is - stopped. However, this method may return an information - dictionary that will be passed to the next postprocessing - object in the chain. It can be the one it received after - changing some fields. - - In addition, this method may raise a PostProcessingError - exception that will be taken into account by the downloader - it was called from. - """ - return information # by default, do nothing - -class AudioConversionError(BaseException): - def __init__(self, message): - self.message = message - -class FFmpegExtractAudioPP(PostProcessor): - - def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False): - PostProcessor.__init__(self, downloader) - if preferredcodec is None: - preferredcodec = 'best' - self._preferredcodec = preferredcodec - self._preferredquality = preferredquality - self._keepvideo = keepvideo - - @staticmethod - def get_audio_codec(path): - try: - cmd = ['ffprobe', '-show_streams', '--', _encodeFilename(path)] - handle = subprocess.Popen(cmd, stderr=file(os.path.devnull, 'w'), stdout=subprocess.PIPE) - output = handle.communicate()[0] - if handle.wait() != 0: - return None - except (IOError, OSError): - return None - audio_codec = None - for line in output.split('\n'): - if line.startswith('codec_name='): - audio_codec = line.split('=')[1].strip() - elif line.strip() == 'codec_type=audio' and audio_codec is not None: - return audio_codec - return None - - @staticmethod - def run_ffmpeg(path, out_path, codec, more_opts): - if codec is None: - acodec_opts = [] - else: - acodec_opts = ['-acodec', codec] - cmd = ['ffmpeg', '-y', '-i', _encodeFilename(path), '-vn'] + acodec_opts + more_opts + ['--', _encodeFilename(out_path)] - try: - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout,stderr = p.communicate() - except (IOError, OSError): - e = sys.exc_info()[1] - if isinstance(e, OSError) and e.errno == 2: - raise AudioConversionError('ffmpeg not found. Please install ffmpeg.') - else: - raise e - if p.returncode != 0: - msg = stderr.strip().split('\n')[-1] - raise AudioConversionError(msg) - - def run(self, information): - path = information['filepath'] - - filecodec = self.get_audio_codec(path) - if filecodec is None: - self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe') - return None - - more_opts = [] - if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'): - if self._preferredcodec == 'm4a' and filecodec == 'aac': - # Lossless, but in another container - acodec = 'copy' - extension = self._preferredcodec - more_opts = ['-absf', 'aac_adtstoasc'] - elif filecodec in ['aac', 'mp3', 'vorbis']: - # Lossless if possible - acodec = 'copy' - extension = filecodec - if filecodec == 'aac': - more_opts = ['-f', 'adts'] - if filecodec == 'vorbis': - extension = 'ogg' - else: - # MP3 otherwise. - acodec = 'libmp3lame' - extension = 'mp3' - more_opts = [] - if self._preferredquality is not None: - more_opts += ['-ab', self._preferredquality] - else: - # We convert the audio (lossy) - acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec] - extension = self._preferredcodec - more_opts = [] - if self._preferredquality is not None: - more_opts += ['-ab', self._preferredquality] - if self._preferredcodec == 'aac': - more_opts += ['-f', 'adts'] - if self._preferredcodec == 'm4a': - more_opts += ['-absf', 'aac_adtstoasc'] - if self._preferredcodec == 'vorbis': - extension = 'ogg' - if self._preferredcodec == 'wav': - extension = 'wav' - more_opts += ['-f', 'wav'] - - prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups - new_path = prefix + sep + extension - self._downloader.to_screen(u'[ffmpeg] Destination: ' + new_path) - try: - self.run_ffmpeg(path, new_path, acodec, more_opts) - except: - etype,e,tb = sys.exc_info() - if isinstance(e, AudioConversionError): - self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message) - else: - self._downloader.to_stderr(u'ERROR: error running ffmpeg') - return None - - # Try to update the date time for extracted audio file. - if information.get('filetime') is not None: - try: - os.utime(_encodeFilename(new_path), (time.time(), information['filetime'])) - except: - self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file') - - if not self._keepvideo: - try: - os.remove(_encodeFilename(path)) - except (IOError, OSError): - self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file') - return None - - information['filepath'] = new_path - return information +from Utils import * +from FileDownloader import * +from InfoExtractors import * +from PostProcessing import * def updateSelf(downloader, filename): ''' Update the program file with the latest version from the repository ''' @@ -4324,7 +51,7 @@ def updateSelf(downloader, filename): try: try: - urlh = urllib.urlopen(UPDATE_URL) + urlh = urllib2.urlopen(UPDATE_URL) newcontent = urlh.read() vmatch = re.search("__version__ = '([^']+)'", newcontent) @@ -4781,8 +508,3 @@ def main(): sys.exit(u'ERROR: fixed output name but more than one file to download') except KeyboardInterrupt: sys.exit(u'\nERROR: Interrupted by user') - -if __name__ == '__main__': - main() - -# vim: set ts=4 sw=4 sts=4 noet ai si filetype=python: diff --git a/youtube_dl/__main__.py b/youtube_dl/__main__.py new file mode 100755 index 000000000..6f20402e2 --- /dev/null +++ b/youtube_dl/__main__.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import __init__ + +if __name__ == '__main__': + __init__.main()