glmnet/0000755000176200001440000000000014471023421011536 5ustar liggesusersglmnet/NAMESPACE0000644000176200001440000000523214410376106012763 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method("[",stratifySurv) S3method(buildPredmat,array) S3method(buildPredmat,coxnetlist) S3method(buildPredmat,default) S3method(buildPredmat,glmnetfitlist) S3method(buildPredmat,lognetlist) S3method(buildPredmat,mrelnetlist) S3method(buildPredmat,multnetlist) S3method(coef,cv.glmnet) S3method(coef,cv.relaxed) S3method(coef,glmnet) S3method(coef,relaxed) S3method(deviance,glmnet) S3method(family,cv.glmnet) S3method(family,glmnet) S3method(family,glmnetfit) S3method(family,relaxed) S3method(plot,cv.glmnet) S3method(plot,cv.relaxed) S3method(plot,glmnet) S3method(plot,mrelnet) S3method(plot,multnet) S3method(plot,relaxed) S3method(predict,coxnet) S3method(predict,cv.glmnet) S3method(predict,cv.relaxed) S3method(predict,elnet) S3method(predict,fishnet) S3method(predict,glmnet) S3method(predict,glmnetfit) S3method(predict,lognet) S3method(predict,mrelnet) S3method(predict,multnet) S3method(predict,relaxed) S3method(print,bigGlm) S3method(print,confusion.table) S3method(print,cv.glmnet) S3method(print,cv.relaxed) S3method(print,glmnet) S3method(print,relaxed) S3method(survfit,coxnet) S3method(survfit,cv.glmnet) export(Cindex) export(assess.glmnet) export(bigGlm) export(buildPredmat) export(coef.glmnet) export(coef.relaxed) export(confusion.glmnet) export(coxgrad) export(coxnet.deviance) export(cv.glmnet) export(glmnet) export(glmnet.control) export(glmnet.measures) export(makeX) export(na.replace) export(na_sparse_fix) export(predict.glmnet) export(predict.relaxed) export(prepareX) export(print.cv.glmnet) export(relax.glmnet) export(rmult) export(roc.glmnet) export(stratifySurv) import(Matrix) import(foreach) import(methods) importFrom(Matrix,sparse.model.matrix) importFrom(Rcpp,sourceCpp) importFrom(grDevices,rainbow) importFrom(graphics,abline) importFrom(graphics,axis) importFrom(graphics,matplot) importFrom(graphics,par) importFrom(graphics,plot) importFrom(graphics,points) importFrom(graphics,segments) importFrom(graphics,text) importFrom(shape,colorlegend) importFrom(stats,approx) importFrom(stats,as.formula) importFrom(stats,binomial) importFrom(stats,coef) importFrom(stats,contrasts) importFrom(stats,family) importFrom(stats,gaussian) importFrom(stats,glm) importFrom(stats,glm.fit) importFrom(stats,median) importFrom(stats,model.matrix) importFrom(stats,na.pass) importFrom(stats,predict) importFrom(stats,rmultinom) importFrom(stats,rnorm) importFrom(stats,runif) importFrom(stats,weighted.mean) importFrom(survival,Surv) importFrom(survival,concordance) importFrom(survival,coxph) importFrom(survival,is.Surv) importFrom(survival,strata) importFrom(survival,survfit) importFrom(utils,packageDescription) useDynLib(glmnet) glmnet/README.md0000644000176200001440000000745314405701475013037 0ustar liggesusers # Lasso and Elastic-Net Regularized Generalized Linear Models [![CRAN\_Status\_Badge](https://www.r-pkg.org/badges/version/glmnet)](https://cran.r-project.org/package=glmnet)[![](https://cranlogs.r-pkg.org/badges/glmnet)](https://CRAN.R-project.org/package=glmnet) We provide extremely efficient procedures for fitting the entire lasso or elastic-net regularization path for linear regression (gaussian), multi-task gaussian, logistic and multinomial regression models (grouped or not), Poisson regression and the Cox model. The algorithm uses cyclical coordinate descent in a path-wise fashion. Details may be found in Friedman, Hastie, and Tibshirani ([2010](#ref-glmnet)), Simon et al. ([2011](#ref-coxnet)), Tibshirani et al. ([2012](#ref-strongrules)), Simon, Friedman, and Hastie ([2013](#ref-block)). Version 3.0 is a major release with several new features, including: - Relaxed fitting to allow models in the path to be refit without regularization. CV will select from these, or from specified mixtures of the relaxed fit and the regular fit; - Progress bar to monitor computation; - Assessment functions for displaying performance of models on test data. These include all the measures available via `cv.glmnet`, as well as confusion matrices and ROC plots for classification models; - print methods for CV output; - Functions for building the `x` input matrix for `glmnet` that allow for *one-hot-encoding* of factor variables, appropriate treatment of missing values, and an option to create a sparse matrix if appropriate. - A function for fitting unpenalized a single version of any of the GLMs of `glmnet`. Version 4.0 is a major release that allows for any GLM family, besides the built-in families. Version 4.1 is a major release that expands the scope for survival modeling, allowing for (start, stop) data, strata, and sparse X inputs. It also provides a much-requested method for `survival:survfit`. ## References
Friedman, Jerome, Trevor Hastie, and Robert Tibshirani. 2010. “Regularization Paths for Generalized Linear Models via Coordinate Descent.” *Journal of Statistical Software, Articles* 33 (1): 1–22. .
Simon, Noah, Jerome Friedman, and Trevor Hastie. 2013. “A Blockwise Descent Algorithm for Group-Penalized Multiresponse and Multinomial Regression.”
Simon, Noah, Jerome Friedman, Trevor Hastie, and Robert Tibshirani. 2011. “Regularization Paths for Cox’s Proportional Hazards Model via Coordinate Descent.” *Journal of Statistical Software, Articles* 39 (5): 1–13. .
Tibshirani, Robert, Jacob Bien, Jerome Friedman, Trevor Hastie, Noah Simon, Jonathan Taylor, and Ryan Tibshirani. 2012. “Strong Rules for Discarding Predictors in Lasso-Type Problems.” *Journal of the Royal Statistical Society: Series B (Statistical Methodology)* 74 (2): 245–66. .
Kenneth Tay, J, Narasimhan, Balasubramanian, Hastie, Trevor. 2023. “Elastic Net Regularization Paths for All Generalized Linear Models.” *Journal of Statistical Software, Articles* 106 (1): 1–31. .
glmnet/data/0000755000176200001440000000000014046050560012451 5ustar liggesusersglmnet/data/QuickStartExample.rda0000644000176200001440000004070514046050560016555 0ustar liggesusersBZh91AY&SYd *_wm}3m0u\zwWww5c[۽*{ؽ{ܽޮm{yz{WJۻou̴jn_duQPA @)IOb&5<= m SL ?L2bdT$،L@&Ѣ`OcFL U?h Si1dɦ4ODi3BdL?Ԟ#̦ibm24&M1 M2m"Q hhO@ҞSxO zi)S#d&O&jm4z &212i&xLh)L&FI`#jm(uS2 #dd`ʧi=2eO4zi0LIdڄ#ɣL cH&SѦ d%<ɦ)PF 2h 2i"z`14O&L2aM`Ѫ~S44i<#m)4I#&&M10*O! Hɦdi=4hhL4@50@2jlh4Șh= 21Ipf 웎"s&]poV? H<$r x 0$-w\yNZY=s{ޒ C] MI=UmP <C &Ϩ]I9/[ݑVuWWI=|? u!%=`5LƾL0Vu㯇2ClI o_zU{ʆ;I֪P?(ON6:gYDf&H& ͐L~ .MQzI"linﶯ°^Bzgg:~;ͯzGEGBOt5|Pרjxs)(ܗ|TWЏ)ݩ*q4'l T?L)?7L{rtvsCՐ?}'wXtmvQѮJeҲE#-=m 3zsKIU6OUN^`Ri&i[l K%Р+CL ک|-Հs{V0h.YjO_ b(.MRyf*E9 d+ _,I-@jY}Fvr_!Zj\4OJe3I )@~&hl#XQe^n*Xfhc+D@=3o76&v5us2o1@,+/(2lܩr|#_Prs]a0[.0? Jqx9P#\uO:D^@Hl̵Em<X< R0h6w;/S̎Y^&N>\š Qyotwܢ)8=DPSB; ״U +~(}CL(GWua2/͕gI̪CUq綫E#4bf5sşY퍀Z֙B܈HӯL\@]&Y`Wz|%q2oJ2B]:VO)E:)/@٘i/Fd^Svb3fx.XBQe_g5WɉT m3K;i/0ՐjْL}j>v}엱G$ʜ }ҽ!ƅAr.4!Vq]ϔ5XJ@ƞy5ur\Cڵ4x/}g5ցxB֬ GC6_dlcLwVdǾ,m&,;)L&[+'#b OPD8so&^cP]8v.rF7G~!rI!Ao[ޱD{a;?zSs5ktSIK,5jg=% wB{"Va3Y:hP=F)GY_s{=DWT\[ĆvUHV3\  oW~"u|0% (#n^UW ֵkC}u;pYn&KOҎj4#;#Wu!8Y>NqbzB̎BrxCh5+koy+EK}{«QqDj4l3'ΫM;@Jq?W&Vc?H[ؽ1 SsOT6M7FSjNϝlYlUOD*ϼ $Hl (15 ჲwO1O2ڱX<?eѪ]ŧ̅=< ́횽Nc"6'm NtP4B ?qI(< ω_譌Z(ZĆSkL̨vݛ{ݰ.v-~F%K5JxlYz#⦛>O?ͥLqhz —>׫7_9UohѕreYeaGm}_(9!YԙDex2֛@~sο=弱!vz*|ԮVtԸxbu5;q}_VvGB2Y/rO J{:@;ڈ{cٙ*#En&s3W`@βo):0as˥֭}ݑSX`VI/SӪ]3RdkV"(03mSQVG1Wm7 ^$|[&IŴ̑ ~d|?+7Zv3]0LB^[IyhcJQ*)ۘ)ё'bT޼,~2)uЦٰH%Z}Cs-.jb;n0`"h{ϱ  `])6S! hg/sn[bh@@Rxýy|:Q2!jfgzSN ˝:dKY^Y#W' Q%VG>?'L:Γ' j3*=9&Xwvވ}`]ˬ4aB'd(ܵlov8IKnwoul&5Nu6& V~܊zNRZoכRdDXvkQ*:m!m{f<+-A<ȨAucV CrAE '᱇i9 7FU)m%vc&tY, Z9@_.a ުmk1dhWdRF~!{%] BZ֛MnsyNM?]A?Qj~9͛j< ɷe1u u!Z'ױIeIBKQ(u'U[S$9Q!KPrx}fէ-olMd)C4YR[:XklÆaO3sAd8h&##3;e[[l \uSናzAYYrpZ3f! 歟6:YaGk~LAR )AGґ ;R6E`|UoP6L2)oA|c<= Eo姍Mob xe;CdJ`$nE€k:qOny2Lp fk6gե9Բ]߁ Ӿz@AT?SϘp)709(C0*ngS@KVIqkCUf uQݞKꐂRX~ZWK9ȀIp( vi!j (4T-'}^m'S7tÔ*L;;a UXd7ˌeCU,g!)R)55$r[WӍ\))-dvo> d#.3u˷ :Bit{D1)D+we| 6XtT?Ӷ]5z_k|-v'L]q/oS4WH!Qa|?Ojuad*ш87yMEs^Z:w ov=7&MAi_gգڌL5I bµ((Xҹ/#ۀ?|[&Zn7 a;eB$P@>uY,'b4ְi'r#fY]CNe Y NxVA{J{!rZDl J O˘XDlg'{)yԑ/l>!+rap'qJrڕ|hTؚZkMʏ3Px[W љx -пC۶q1c }Y鎩 9/,xkB6( qq˟IhӦc2 *P*1tڸ6 Kkҙ\m']拾9*+{։!RВE^hu*zq?X6HXඣS7KJZv?oU ouv4gL)M~L<7J#d6H:{_ve+lQdvxpԯ ;Ah [>HϳUN-- -xQx$3YSxB@{ r]r¸ T 3>2/Ξ0oelŠhu٢m iT*U-%\~e6\L>_Hd}"OZ Vw<;7"5ՌyLcܻKao5@{j XyKDff$V/ogјKDVAeJѽq-=[G;^Cu_'NXw}rLQ3v[ #[G4Z 6gId/wtal~,fXi)-.*bϻU]D!ֈtR8R}uCjK{Ve?ײG.E~$Z! o)QD\b]C,}L: H.sD'5XH)wSFjфt6 SA}b y30j܌WaSn R-oaJM5Ni3$rwNw) dHh, WaգXy(_q)ڢzeXij$wWai'5g$h\z-Un۱5_Z(]{wSD 얜) ܤ9}ٔ@-$,2 ;Q)JZ?_(Sf\p(ҁWrSY\5ag !&=oiu&h2:;dW|TKYվ8IL!IZG.=4glEgxM܀RR(d2IVj{+dqn7~O5!g)`<.22R#N!^:H ,"_W+-keC7F\K$gրwy?\Q#w>R^P66Ojq5%l(5e>J~g3^;([W xFcimoiwLp=RL{3m}sb `g1ޝ0sDSfl{ /#%Coրj5eD`Wd'%icdG=μRJO霸9gJ'ֱ+$(kY͑(_6?rgsC.툆%𷀡Eőb6x "Wzn'⏈L٫*P)\yk!xqOO*;/ ̴`YBJjR35RkP]7) r`nqRE㩣}GԀ!E.IdZZ ]uaH] mޞrԘ!3|)5ѐƄ+z*{мI5jJWNj/?_XôNl.O`BۘъhHou%woz6FĬNvA[%L~,#n=; "S&gDB+FGd܆}<-͋9WEFsۨțd&fhS3Ha9Ϟ;2Z6)u|T7̠`0EظpgG]\*Q5@UO+{Uʷy".ji_ 4W*^UpGNgr͑Z|y"8ZxX$6k_]Cl)zQ0ϫ. ]Q~l8Ѷ2k 9S>k>Y6/N2[}Lv#VR4 Q0)?v`8DQ/e#Ǩ؃jKXQz6zZq|[GyԌ%v'`Wտ^sjx[.}j|)f1өwy$f(KDB x'k,=c^H?mckjBka0$TH]L9::*&-n;% QQt'cmxPiZ C ^1_/9:A ܷy)DsTΥ{-;jjdlU򒩂-LUݿ㯭Se;Ffer_*w/Ġs+!ȑfx^sD1S4W[ ;avd ..z@8|_c;F=hw=u2q#R@H7*uAc2y4m!77D{mߘA 1|Zj+RBu[S4Tz%7[R`EKu30Rz!ew)3~_JPwfViMabP~h-7 ҦfNufcFQDɿsH[RmU yЦ_uT~\ͿFT@'<7l/~o.Dim8<  ,O,(>=oB ֚b303`m} NoW@fʺA[9C:nnS9i,;]I.?N*AFVɉ-/nh5.l{␡{I}Zg}60鹗XLHk;h+5sI \CW4R}Gj6f *;H'voL(dˤ]^~iɾEƮǤ~Ωfw8?؊ F:k!-đ jV8b~TD]fU)?ڼX+#M-MAX@6 Q>ۜ}Yfˋ9,e6^u4gze+= +ꊱ\]Jt*2ͱ2X REN;ǝC'լUpRf8eXQUOxxILCj?YRLzW_US@|`ieI#_zG./n"7ٖ?}0Sw6ֲ6_rIY@մ|,` qgN` (=9(?fe,^>{$?\:j%٪CCw5hUcAr&IF8(e=8mn IR%q7i}u )'ZO5s8uU-*W7ݰGbTӹxZ%OB/Ց:HІbJ$b96uPKzGdS͆Xzm̀K&cDjyEl{:FE7M $9(h]@cTiۗdA՞eQ*P+zYlL: ? O#*OܼP_mhiT."$T(m6U1$FG/? )&BnR9=ȕqFd[IrgD| fr\skY}W/hf1ZbdSVjII`D]r@x-x0&'zU gd3^Bʼn\m`mɡu[ m C>U'TrK`f bg1@={0ڰb ) TвBE0r2?a)Sg7aLRmL[3́[>(lX]߆Unȼ@k3ٖǣ(GNڲ8nN| J%6dˣzKiF"e"NOIuһ\|PutQ8کvDڒ%v P't&lۓ)h༉/)h:fWnԇV';4޲0/4*ҬUL{,r8#v>9{Yzgzd;XE^eZ{P-*H:c. u0lU+y#N Y>Wih1T~1;ݤ+ESqP'nk*vSIW bpBrE];G}SwqgƈC#ZTg\n2|^vOщpOam|ݎ$bR}nA|c/H4YXGR&WH$DȖDZ#IsZVI pa덡'ZdV,FfST'k̲`hH6rY9c yo43'Ǥ ^oat[㘽>DlBz9p. "? }~`Zr3GW`U_Q 5vg,sbrכp x1rf=b䴋GY5?&k`O 7?Z" sGDͿP\&fduZ syߙL;q/ XO~CQ6vAI32>ؿ%GkAs mSnƁ,#g7r3A0~yo'݁C-@>;|t7#R^/5IҡȨ_vYҩRB,aתk x~:Շ`ٽBr٫?c0Ux'7{(f\2 {-Xy\i&aݽ⁚լONBCJATeW Hy},K~uey YY` G! C=zD0op`$gc.Cˤid[[dtGъN?|g} d`a*aeRO@s,7t F??GhZ@>ؓ@u%JS8Dafgx퐱`dtxoxs(=>~3XeĶgMtn|T]D:UQrc%.ւDr5RHMraAI0) 7@_ޒ 5,YO= ɵ~ jxN<{&&gA]e8ӨX4ɩ5Ѓu~o wµ8@vxbXd?U'M8$9Mf6Ƀ`CN{ɣ jdf[ԛPMK$m@GŜM<llCԪ#9 >!&<( &&Nژc+.l< ]Kk|ޗ7qh)'ܩ۰MBe:\PR]ުdr"q6n}5 maw'<2j] 57in@Zj÷Hj+1#rv؉DF>ǭ%j|k2^  FDԹqC: DpYNɐ-%ѥv̈́[B! tiSyOgI|^hB#i'rC~3__fA})zm>"wȼr^h:^ -OqCOdkvuTRkLn}ȅj/8Q\4 ' LX1^~R,@?RodhmN ).6k]ȴ?`VQN+__eikb@+s4|⌟I>lƁF̽3,)!.aC3k8(=ty˾-oJXQP/LVzƗ(/A˭G37gLse QUm8ЃW'hu2*jP˦!|xZ~hx^WM1`+(%A\>Im@wkTUdFd"$z;˓wz){ߘJ_x+ ;/}V/E>K{CB] V=&BT_ŶPK&n4zMК#0$~RE醹7ohbJV}2JfM=A"Se-sG>~/ݥ368?+48yoo“`iثW}禼0DeLa@G8RpQpTوwxD'7|?&% >gHEFc %n|vqz1[i +rJZvtt99]賗G N彝(m}O YezLd6sbcqJe:#s#4i:-z=r`櫩ެzxJʀ˺~۠15*&#\ _&5)4e@WI;)V,l=ًvFA\y_qR)1#&JNsCM)nSKWC'[XT6yC@lHvGeX*ݑAl6Am67>-цF`Ǘ5]0R=\l| 4Y`̑re_ZW S]]bh:ֲb;i.1SR5A'b0 VP)tp5d kYj qGֺ&򵉫bk6o%3GxC)~Rn3:޼E <7W\Msgnm?y?UD4ɇ_Hʽ: `4bƛ@"Q J7 k#%.jԑRg7s A-IP{|3d&O_BδIڮ͂6Lht|s\*#Ǒ@\}mЩsnR29DTh6p7SX}$$R=3}ŷ@7TP!;=QxxlMk")ኲy7 /V3%oiwRRZZ-8Q =`cҘk)6$ u-m?6Jصlɡ*WZCy?:#ew9`pLOğ :u:ɻs T=clqz=U >cevXX6z>[r幢󌚢3Yl6N- U. \ ,ǭsg?M j#cndS)Am#O*∄F<}";Aav"[IO ǛqBՁ'+r`m ?reW+9i.f v6HD![9R!F:Qڿ<)4ZW="hVh>UᰂewB763,yA'Er@yXルjEih^$cK#Tuy:,s"mͲԭ{:A&%D. Mmzf)շ69'[+=Pvدi&m VSYRge77Pov'H4j/L A1`.re}[e|5-|yYd1O7^RT.j7es*ҧaq eF\RFukm @R~ĺ'"KRdkR3htlԨkVlSk߃͢T.j, ⭠_H7Nc!! KpniL!G`jW :E*^y0ѕs',dkF^!d)L-LbOt,q.@ISI tŖN1O?3Q>0pc;$: FM!H%)S| U95#O3960+:Jk@vJ4ж @f\.puwuӖ'0Eu{tr;j !"^Q~cf["|{*2?69By. yyTX pȼ< Ӻ۟WNIxב<]yaS p87@nz\PwpNP"vm2 zIOU+إ4/bnnãݥCz!/@ W4F>r׃O)eG^u3-jEQ[L`lS=SU ;QnQ+E=nc h}od!;-27h*t-N{MZ[~5' k],OMH5W_iR)j ֎0F =Ӯ\#&SOU[" wc/'s@KjUBVtRAuk.aΟr;cíK%'J:_Nݲ BQ|y9Z[L:O֬^AY^cQ։πWSH%S7+$82l בc ~^"'gʷL}+F05L0s9$`L0_$&w"{cSkMS!=C6}Ur^l z}; V /fМL2y!N$2dA8@d Y KN2jח0Ѱ_jvZ V5@WH!+кش5|\N!:!H),:7봔sXb0 }F}|1WoV'%  ͷ2?JxeM捎k%UzCJFx '0} $&pr Py 3(8ż1ljXC0`j2'֯#ه2(bv%x"iP@1BYaP5cvP/~ 87-:)] .BJ#nt0J5:LkU#6~0l2%' [ @l>3ߦr6F K4{K 5dCn6zǘØGW)O WP*![{ %t-z 8LǨ?VW:t9䓾oH4G-YLE "(`cn;+&0!̗qsiM!ReϺt\4GZvq@&n~GEhK݄5O&$@Ǧ' j iH}#znTwr8)0[f-;tNG*p@,e,hFkMyM#߮ 's.̌Y{~m=AH3=l6{g_1Ӝ RE=oYMA 99G%,XZ~4: @G%˾!nݤ3ʺֆTBu. Cй+i$ gi6|ZdOcL `n)pJywy`:JjT -{8Z{s/ ~ RKƮ*-PJAOYviM:A3Oplp>[Ϫ~f>}\mqK6kf( ޶_]B@ |,glmnet/data/PoissonExample.rda0000644000176200001440000023117114046050560016114 0ustar liggesusersBZh91AY&SYq3_E_}ϼZ}󷵚tK>ӽeByz물hEOƀL L4 TB4dS` h!M0L5Od2? 10FF 40 0M2da0&=IM&F$BLC 4Ad4Sbahɂ2d0&iL&LOFl4ɄSFF ѩ20M2a4ɂ`ɉ0&&&` xL&CLL`P~ #@ɡ24`h O 'Spဲw3z9k,)haBܢ)BB f Z %kkn_Y ;:l6n_>ϖl1< ٛ2jpcG&/;_h&d-AAw;̥ɳ @c Zے&"~ -Mdv7Q0L̄pOJ[ZtxpQ}:Y?DD ЮQ!u6`&aU @h0a`0m ށEk6#2&0ah6 ȆTS"&+NYX j3iʊ}b[v?e O*}6I`;X/AYW2i1(jS_׈EYnFL=[Bvw)/"鿡%;rR~YjpWpd>fZrk<5b:R9#vvwWxmczMO~Ƽ'^[Ȱ](|=꼾PJ(lIDV^8ahn%GvNeuk g߽U8-⪓:x`,T䀸0@+rh(}_K爫m ~1`d6D3}ǯ=V(t)͓(MN?]UL92/Ibq-J4aaH@E0 hZw`* fg$n>-?>HVkt\'i(}(kjARy6jxmB'㡝&,FF9A-t+vSM*3{,AoGWZvo]hl* A|A7>.Jhz:^. bFqw)<ȥ G, V/哋祑]JT>lcTy<q[_*&J#.A r <9en.ԂEcaO"  rC_LbAr`:d*7ҋp/MDnqf=v`a/56:@slܠ\60cIrp:x=A嶹|&?jD\J6zypwpQ ktnyiCR9lG*aܦQ>k= :mhr`Ot-Dמ;54}d&zh'β/a_V@mJ{;Q;1$o]16>D$a a=-P%86 &`xh8MyKԸ3 [2>,?6<ثe.^ DO)!YЂyAC{?-/'4ՌgkNr np0P ЭF+BhQm cf쮒t }TX2x4 yxcha JtqX "Wp=T\u$Y SULTs&iN,A d<В@*߹ߛYVe r<dzJ%&s=p c&tϥBmZ6V`:Lok%Û|B%h iӝ?f/˞ &FIbM?jC5GZYBL,H<ǏTԟ9EIb5 8GmĮ[ԍǗPJz$12ܳOdlK @3ho"H7MN J/iE wVUyɾSL'\.^J#s- @+ݟv!58vAf B-NIv7%xoZ-yQr2L!8WDħ'-jgGD7J?[eүh< OϓR=Q U=FTIV\aj\YWGSOP&0r Iu4ˠFB0g"{$V֖nbrd'zkj/Vx6[={(Htȇ%XY CҦ7_v[?DMc~9ev;+|MBgfm},WӑĉXsխ?3T͑]=ڢ,n..ϏA `& bh=0(.Ś%S/7LuX}z{ێ|MVfs-m^7}4klze i1&8glmIEeJ ShiAi4Ț Q+=q1yҚw܍D!8ky^)ss_ HWr(Qo{?c ]MtktI fܻv>3 7~|Bf)DXBJxq'ʚZX &i(+PBb_@@b((Eyinx{HM,xFvנoV_&ʳ3AEOӹ֘pt<P+ǝ"M?ɝ\7Ma1L? ?F;51e({Gԥ|3*>7#e/|USƎ]d+0f}C>#Kr[]=$WC{}&{56C2p^Y/iPӊdt+5WkTi? SF۝BV 䇹)oudu4vwM[ssԴhC`q%c/r&4ꪨ%M~H*%ؾ[!] Dt5+tm"{ַPu7ׂ,"ƙ..q=F1CyO{Z@w!!~o3[rIDGoA^t)(I:Ԣ^1A7(hH ?SN $FW&O |?L`Ie'5 EL@PVi/-߿6c3C+J%ReyY;Bԥp`S*C /dwqt9W;ƜUxB0&Om ڎq\vbxon` Į̸h"/6Uy?47@; qs5"^BMu@o~HFqfJ+92@:9c[:禮m">T-O2(<^o"zC~ڶfVx_4xO1ul:#qPZh.`ꬽB:I,M0 gdz 0469c5OR`"1HzgrMeE=8+Шz( 6 m7 1-UfKWUMz EG>mvh18r~-2ih =J8;Sq 2ӔL4:wrN?ba4Wb C Uxg #ӕ fHBa\^c!AO0꓃oU .ל"fl0Qc&6wJ"T^xЊ$ׂS>(oj 1Rٚ +2 *΃OVutBI׊qX3{4[Z@xTЬ% -:rrrZ,/fTG}/_Vgˉ@|D . Ro_^=\z{2geqv$p(-Ja h JϻC.?WI ҼpO_2골Ϣf^5%DGgΉO۫WoJw&OZVBW=i5`SYm̔g<\А0'Rv )[Ih/|#z9ܔ$rwrhmWD]2R[?- ?@6ϻ*}CkCӡBn;1ks0Q赝H#[C3;㹌$X{X~/[ZF&V@s'F?"/p~C}M,s;; Q'U0^sz[}{7:*A*c'҃bLNpw Nm,)2 `eS< ^n󊙫YYק " IyM&seLIw}~HY+t߱s]^7_g'@-~cbqh29]PT<L|LCܕZ 5Rk "JpȞxwfc:(Y{ZJ݌Z\;s7(k3e2g%l RJ CCP+XrMI(!A͘ZpL \/{=2<$"`CW_%ARO$ZyC K{Jb]u*]yיo8"̹2 \{ʣPO\o|`x( i9i e.8k1:<1$`E"2Ҥ 19nMYtv]'q'G Udר쇤Sz,w_{r`:P7@mM׫zk6 JW^n[U1[_A6e`"3j]%ɓI$#P]N.teB2b\̎S!uWΗw檸PΪ&S~ qt&|${] FHT(Z^+\2K3 3$EךU˟՞|$Y ?kMt ~]w'x6d+6>XyYLA]6?y()?uX?*gmxM*5 yuYGS[vy mܵ,4G33XW?4;i(^lLo+L! j|ڗL{7~ҼoNFf3UI |~i>hwQQxU/'ks{YGM+_:_nDގ>/UP+O~wqQ5/ifšU\aNln/HZShqRmvjI_DrnHuO0fv%LSMX?&@"1~TjғNP'a vw>rmlE%_hmI RRg=e%y ڳZ'ʯ+ " 'W{-idCKdlҕ { EDp TH1MAڑ@$L-GFz<& $Iꌂe>C7լbq(P֧2EE\;QpG#_,'"} dzN6S̜+N9CĊK m]Sڒx2vi雏 &wχRZotZ&+pCRٟ܂,~K{]jݕ R̼ͪD,dV$YYukǒ2i! k3lX]k(x_7*?Oc:epVͿ_]QG*2Jxԓ:peVi \"}9? 61aFc/NZ״ɿ.*Z8C_E5=fm#qdK-Re5[%|fńPcQ̵Z^GWZ{,0>ylJލ$}_RJI4 Y452Q{9W5>a OB$geg4K5_YSb6SuQERLÎ o#n2O͜6no:Zvڔhfyⳟ,L7b"E)w]ЮH1|U%)l(Cd6긻 ﺔ LP?e3Ύ{PHYI$I ^i_jAVpU!0Mk>^-&hd4aVCx/PMzMFlc,V<:V/Ll#.e(ԪוF~?} pb4y('4_Ƿp fBp׊ t ǐueZ(bO Qjr{OC^Xk̮b1pHX0 ,܁Ih牍sx˕7"b-j8Kd\p&hBы5& < eSW#X㪐<{U=IGqxVGV&9Faw򹤼Ħ+} #v)S 0!Xన<-YY.Űjnz= 75A$@@UG˒ݶy[&l'43I]nWAy6Q:bB8S"tⴧUL;LY2$n#RJWr9duψ * *%KVqv;gX>]t\O+ ͈BOxI89FKC^GXco/TXu7eNア=2=Dɔ8yBy/mOrκ6/mEA:%w#)[jPFR٬^F;`(P TtsB{e^z.gS>ނЀXwVn siSTl{{h"v#L]c0JEkIrY0ZWOfj5$%]n no!먼܆`lIocЗاދW "Cꅆ,V; }S#V: M5jƱRfadw.k≺mϱFʳjQձX/C9XԗfMIh7"NnˁK0:?aװH<"zir O˩R"hI2 fnaFcS@j @ #j7KT?zCXa9 𻅅 voE.RkX;0,-hN t>wI *ϱ=Y/9u=frv= FM'ܛ:şy}Bͪ(w!Gkb͚ S/ --vIw?ȟYS5Od \Q۟RȊ^tZm]79S;jJ|S:F/J8h8"pp+&>`?_aԵ^U3>Qyb\W(`L;{Gnu߁\w-% yRVb2/x#r& C˨m4/Y k{X(ݝ\)D4D:6G0fa].]V*^0Y$}5d Hu,Go09Uq>1@ܺht.$yNgܹ"/HUcP:Vy+VG ?yخvwt3-##[uEylk"ǡ7 yiH;=.o)QA|D1toeHr"|ޘϪo76+w>*8H2)U~5CńaXr]JCk3Ef۲#syR]$i EዻgQ"Iw!24 ]hܿM3 rKVH~86Ce@ZDSKޕW057ohMy94Sgm!Dy)[^g׃p=2*Q~؊ķs_=1W//* R=};<$Dp*3XuO=>Y6ԸDw,Rd>vтtՁSS/<ͨ G|EJsg7Gg%h`OmleF? dƏ k:2lL.u?*O|Ⳇ@~Z(b 4^^}rg_u* 4,ImޖPss^<|/bLoJQED_wE?fxv[e&OgÚ]- 봁H^j 1yûû]~p-z3}[m\Sh8e㽽yy޾|f*|}-7 &;}&O386j?TFo,)"L~ -5(3 O+"͍t9jHT6!T_4="ֿ{gޖ[yA|s~ s_iyR݃dO NayX @؈7ߺ?p+nXEۚ ye})# h+[9I_ -яubHF 2XЬM1ZUk> qd.sL9pk*$v9QڡX 6wimQUT cB&5g\^o ֹwz1!v>*Q}˲Emlk"y.P\s"! ->Oe7 ʳ fR"mzČjwÏ+"le`ZZ^#\HG`I+ zaFחa=D;:\1Օ0=uwpir9Kcf?y^ovНO9!e|b!S%2;~ s%Uٔ7&u P}Zu($Nc֭~0~a|6YC]2#`:ڤs&?+Yɼ 23ePdG:֣FՂf˖m`9dEhP Omw6}gJ.OQ E0Bģe vrxzMn1j ʔ02I2zVyuoIs L_]" Tg Yi[dcTPw<ݗ{57eq|ANC*b Zu},UrN Qc>Ry"=V}'JBBYf<3Ȅ ӾrB*5[h)ݵ~$t[VP^T9JSڳ\wk *;qW`ϔ>G50]vIM{(`Wג@N_jm;f1s ΄׌Eq5UϺuur$,|H6/hG  wJ>iT[uOy%" Ӑ7"vYt#0o4eŏ~Zc &A>E˟0Z tY~HKdNP_ȏTiO9&G iy|qC#wL=5 SX4KrZ٬-E3)$)1W" 'Y!rYpFk= BV BM[ǷPZ{!6TQ#cC؁@ߛV!2RWK׋:YkW{$"|7ջ*).)Y^"+gUTJdc f/d!3wޯ6Rn1~e\&EC=kqft*VWeIO*}P2tY& I.]RJSWCe6DTLst]IL][ xkdwm00ZKڨqثl`(,N6ZC,dJzZkTmZnC(T1oaNϕ;m΋q93o܄5w~.i *:T=\{mJ '~R!#/-NqfU0M(TP *4ӗS[u)єȐ .ԧ6[wPn¡+W9@iQ |pWgBMulueC-n c{WIeť_aᵞӲHw%۱t;*]=jI-w*)E]`H\6_Z np7jn^?ê'&70>#.xGi`fM:mNAGg'Ij@#0VlPĖRY[si5N=Y)a6Q1:WB 3mq?vb@;-WF9 dWyor~h10n _m :X'EP+yK'!92O8<i%mW5͆ G|v_)7rݮCQǹᴗi3gE{'GZ_ r%P Cxbi"lyMϝ}#ϯu>ܦ/P`ws1j,6l/վ~Xw6c/O# tpe..v.!ǭ-i&O͕Tb}hhXDjKˤ8/Gi(H Z> ේX\Wo-yn.(^Q*ksh\n6ԅ\| қT16h+3CVU ښYP%mˆhg t*F7jF@F6=;bq=2cJƻ`+,W_,*uY?K>ՁCK/~%޲㤲7ȵ;YBt3Om9]eh^[H')Ѩ鹣Y,NM;B%2Y ؾ<ƹdܚ~,gqd n*cp(*i)8]Z8w-0KYgT;qJ?ַ4﷫B:dtr8>4CUcwK#IH`83Qsh-\ "I(kժ'1)=}lDRO1JՍZQ 0;x)"ߏQ6~uTUT¨4^+wpsilϖ_)8o'*>V ŕ|x 㣯! GZ2kz{B%ЦUHEBE&]@ j<uaTUpN#+2Q^]9k&RKRT '~qTW-F~E! 4lxLޝ,rLDHY9gbmuS>; 3|0>ɱ:o3*͌GMHѠ ʉaؔcV<]n拖ŊO_MoK9(Tw5Hi^7]eAUYCp^շAFA02dR-(qRG\d˱ς0]uwa?k 0? { &T ,ZΥt\nj_[_u\7tolk0*3VRmΧ++𘑶58c4RUk~? Z)`U5党EyupZ%^{#ڠ9}e+_>&==|#>n .?\ayuۅw1t@_'7ESc{=cIWu.s1<^{Ws2 ])FsƯ3?=5Joud5MN9_G:.^Nh= ;5-KD_2 ބųK|6KFdqL|9LI q˜rn2nz(y%]€W' Pn`Rs7ٯxLԃJ̱Ԓ#jBq6Ƴ@yY\qSs3L o@6\闌E紶tUrN/'}8l$tzbMN?Uj3 ^|Te ?3Fo`r[zS)̬315M\.) {r!8EA1#(#rLI+gxkgcm_JNs8072M k*+'!QxcS=ɷ-7G>g̞Vz|J-aړ>UCauxO;uʵW)-e+8ЧCpcCUc/A۾x^AEcf`OT4@Y2'-խwQ_P{BY)Wb4FyPܔ wwӪ_(Hf"B1vA_V3tL!$&Zr⃦{\JS\*g·n'k5u D`5co+xwăv?jIST?@.UC;ZQdjݕvyL=N\vBWrN4"vXN$i>lHš1|zrIH9yړ ) K\5aj?KgNE&7C>K$Hqy*}kT~ l)눝s( ɩ;YvmW枏XDş H%2s= =C \dZ gK4(4I>phtpqgz 6q4'(G78h$ DT 2ϙx?wC=LbPfjrMM*y t+ϿJj0EOXo`x=:ou xYUj&^_KWUcwM4h€#s:"sQ0g2E 0C%5NgЪlW$7C/$|×_J.]ZU-z)e}$,)}8"dS@؎0Zik']{& ]VB3`qhCi~nw|Bmh5k}NɈ@Ѷ{©E*I>\Twߡ9`Zkz|r&LRY0z̒;)',:h:(+Z"iS∎l{Z,O*|+Ǟj=Jr7}ZXV"VLiTz̹Ukv4_ }_{)V~YG%W_A-؎YJ|z2ÈIhT-4dKZqJ7wKN=ڹQݻRsQcsj s,@ "7 CgϤ`A|z&4ل6wXyJƗSmInq4Z A\KG%(r> MsYߍw}\?бԞĆ#t2</eg[UV>yA/B3kItCkIq7iPWi-ѷa!_/EKk,(m3R}>oqŋH :T"_+?\yؚGw7Xq!!v"ƾu}jA xny>}K4$Xj(PkrH{|0ؠܝz>-;T{%aY53O`qF=A]b}z,Qu5D3\YeD6i\Mwx|~sHl#9cI,fDǣ/[2JZ庹x}rf0%p7i2UoNғW^`n"( q`:|(-X nZogV|(ҶhZuĭv#:wZgtSR؍%9HڵjGV}@_7׵f28bD %|j6ku?Z?+[Ǫ'E47-4H ғ[嘉YK5Bt%9>9 *i2>UApT#_p0`t|yEqjK.Zh?KǸ}d=`2[~QUKKgY[}g8aM=.ӯ)I9 juhBTMCB%T6+d@f-(뗬6O:'+2T0jQ]CLi*)y.9XzK|{=8=}tLQVyd6!!`xH!RA" ˸'0Ui$oљٰG̻2ÞFz< FǸeJ`0phv,C/ XK)-ydbK~8:OCp[/)j_> Tg2OU*]rŅ݇-8?K)^ -¥c5u9[U?NH+T")IPGB'wUk 2q" x8:%r(rW/2iPZgeonݮ1‡|*r;^LS}:edbmjWW}4Q L9&B%*o)u~6QGI{/w:+z!HEEP돏hXZz+۴NR YA?&iwo#V9j&lې=:0oR0(⨪61uȜVZ܇pb0g1ʷ)Kvd1Tӹmv<#4xMU_b4Z%F) `[ps0{Pj~ um^Į&H0v+i̇m2t d8l@3$$^_˟5X:WE5k:dA͝pP"Ec^_9¹rmF9ACgQ{>wjEao<%Jj)EPfoSkY[P MOIdΗM,0llMi~^D`yZG/r3+E4_[2sΕS=L){]AT}e#d ؑe)e*m .b411MWƟP,*ŨjT);\cK͢⍪mmu=N続^وW_%Q9[Q!q>4_m1߹e]P=_4 mGwOb,f$^g!ӿ$a}q1g\|G( 9tC_kb:.ϘC;f_zhOuU`pSo2 >q(Yz/gh )mW 93Ι9mR9 bi=L(U>5 bW`̮!`^#Ӱ>* N#lJ;X%Mb+0?lLtIiM%X3~0Ogc(oŠU)7[<ʵqzE)4Lo=yOBM ǫ>3NW INj7ã IA102z(w~|jĻHX;` wc Jv4I|on EUTv_d:oQV^nE܌eD͔T8hV=P6^I9]'fG9q`10_+w ovXڊ.%ldrzj,Q(KVS ˴N{4g6xfi"z'$`FWP"E"vj\*w2ű- f$k 2f߭r/.Rz[\o+p`]3 Q)w2ya̼Nx1-h c^eDĩ{yU&KwM¤ftc 'T騬[,1 ?;>l;7"HwdKwLLVg$Lt `7CM\ÛCh_Zw9qph4!2`jՐ., <\cߓ{DW"z{cRJa &$7v;0#P!#0s1G#MgJo ;_|_h`H$yX4FwRM8Υ2~5@5cpܪ#T`RG ^ȹxwԓƆQej:88,ȯ]!$ی}lq\y`'K?\9:5Jl;WKrDݿ R8 . 69n;8ZҔNz܅2 ciT;/n>7be [EOj+ w$ud du0.n0>jQ*`Fྫ]{hEGC?'9z/`?ސ,hZo[x1.Y|HFg2q.OfH m{.lЊ@zrT5eTZ贃A勒' ja ~ea7**q :qԊ;θ('&WOʣG/˭uMl(;evhKRꞇyNyN+Қ]GmYK~WBLܙ~/M]ž4sZcˈ԰O toBc/wuIoLne㬔'xF Ahwv俤SʠS&*nLEMU̿ Z-0%X?xķ9Hf[ b<8w-ߎB@:.W o1C&pCodKp2*P ^,g%8]w=LHjC`'C*~9 ¤?$UՊGu ?33vyh &ezy!<_ p1.TۺMwПvh?wV9B-QIj0(I_9IG?Ь@R=<<~DSI -X@~,.'R^geϙk"GF.:h.*k{5<]`̗'pvd;\`QvCic"_]ůEA{+D]wXQ:}Gg!ďOzTevw0u!+DV* ϐҳ$n $+>Kȶ)%WGgnVDKo+g$S>Y@I?zAMa|Iǹ0I0ZU٢E6H٢qGAXٍN's^a(*.NT;=`s@PEg_O[e14+Pv/%12֯J\`o[J8A炀u@o͔_q@¹{Z,*+Z9Βg#RjԊ.yOjxQNbo#;?s*V[嬀.C.Q4\b ?Xs 7 61pDLjMb2e,YRu'XWUgIL"߿yT!k7>[86?`Z?ҪrSbnf_ԥׂZb~Rm*#wY'/?}&7 ;^SK zEVrδ|kb*36V9s\*]M[nV;}Q.dR=%@xgH:7xvR.ji*-"f]?v5̶]cLϻC™U詀3LRfč+}*(%>k ,Rr⅑Pvއ1 c:hֹET=%zn0>ef>TSt ZMr78:_l덷,]B XL,6 kU^|Kj`LVWtěp'15ϑ23c,LJm'Bn7$qtTqsI.7ːN5un 9Zӧ9x!z~'Xc -oj+5M]lL<&gZa:})}J;L'9fٽ ӦVja*FD^?>7[,S+Q1t7L7z6"Ȼ[k-MuFWK}W Uݠ mt;*wۋhuzBāY34I!#&ˢ~uý a-b{yRJnmOIr\_\Z,wU^&9wp z Vs9S'JΘ.D2Ll@/! g&qSvi8jfx G ʍIwDGo#lv?3 Mte8!TDzR^s~Db0wc؎7T066 xTElg jo2+ã?޴~y)Z'7B粞т3XqL.jOûUH(t}}έ74̡ zx\|(Y1x= h:!q~<Ǎpvm"V|5Q𑇜Z+5d,*3MmC;0.ͼR[JuQ{XY|nBMmf׳dg^LCJ .KBE)a ǙJa *Ƽ^#fRtz3YCTWr("*8Иu8L/ao;tiܡo55o!-pM5%ׅQh-{GÝvN:4Q49kQx419Dok6|˟[Ѫ7'rV4&QZDXzoǂ^I=A&V/iM6ύ#006y(`[`VdcR_g>IKLr&7vYLt"68s?+GA݈кJ8v*#fzHjaW\QA .=2dhbFXj.o%ye2;)elnU4-!kK,o{,6 >]iceyO!~\?X3 $k_`}Ɣ.w9?՜"N0TH #W1[serVVm޷9X)>0}`Vmn 6i:~kq+ou}tK݉VxQW1eϷV+qNy߃]i࿤@/7}IUW&)Yqg ,6W@;l)e(][Â]ƑL95xsYQեX1ٓo矾FDἘL&J>oZW8rf ~c:d)]{`]p3hۤQGFv&4PHu4pu=]/Ko 1zkf4*H4YW'jmr~> >}ܝf^{g_HvG P|-BFbF{^Rg:Nǜ#TS}T )2uÞq :DHvùR8[SwW 17o8Y3 kf|8`i[XxT%rb-+Qy3sۮҐ:I牍QBRlYܫT':C#k0wiw:lQ^ HʝO`ӔqWKpISmՐbz+[P9 YIK-1yi!6K!p!$QϬ,*P fO} afj'{93r]S|>_zFpŶN־ 衢d HeR{wdF| (cSÁѴL-G..qqa?S0%O.i iHCw :gx*FK1bbd+^!]e1)%':7X4UmOR$N& 8| C*_ -, H%iX xV.D`?1c۫uNj p95&:p GgemEZ¬Ԥ2}c~Կ%H!&H{6p{R"č\ -7GO5fu;(Աp l=Ws{se CWЗU/<Όc+%,P鍏Q]-Oant|ZMul0 !.4d|zX+u)d+A.İ!iIE!7IA{K: -_"lݧ1t*ΧK BhCs*NG ?y Q8ӈס~ρ~ȝ '֋ Z4E;sh֣,TLoER$z9?z\m/ka'Isb˜> %|(^vڜFگ75f+[ =zhk)2k.mχoѭ-ByLKD8| wyn~&4&.9~|ԉ$Jguay.TvKEwoO//Mlb YNJ}g"D3o>'Gr)a HC>p,'l$A82 ZZ)21O7fMS(/M+N2gi;g,([Iñ @Q+H'Ů<17o#rcx/0cBNh A#5e{͵Ol2>PVNtMoue~nU:`e̅e2Ր=7L/\ 4$rr8-=ki,/'kB-#g=:F@߯foyjS4x;Vg66 楓aFPp'$R~fk˃Mq1F`#[1) Y: >;5o1iuCe:GD>* !Jsw>ua#K!< L`ϢΈ&Z@tR bQL }MՒ %1$D}M_~;ritF+IjZUjvFgQZH7X,Jhmīc˓V3 YI|R^6<׍d ESVi\F| ql1b4Wh*b+3)`Dedˋ۪?F`]D*HOY?阳 ${+$r6ȧiW!橗]kڗ,cKoŐ6G=Sy%E\ 짅ա[J3Ӳ|2rƧfO=IHQAhjiv攺hNy4h)ԛ@pV/7Q΍. UˊXGvbɑ |?>?E7G #:u]oUcm[Lqn >T 1=K//;T{-B Q뽷N$BUl_ b(zf7_"]Z1t7;ŏI5C &ƥ>>O|4_gy5^EۅD_BtRw4G ;lM+`X-|Fc]?47m`^2_j8 w6^I`9,=Al{VZ6*ujW D=^2wpXV>P]VMљq͡tŞ-}DmWd p3Q5^A'P γw,F30k"ZHؐDY7"c3_< ;<9 ]vHh"^QoOK;:lym@VRJG󍪏lVçeW=[1}͙yVl@5#nԦ)7jX3ͪR X! 2}_z7م/&1`Ť[$ f?%sR n_˕x 9Oިx׼Y՗peɦ\C0ĒA) M,M~nR -%˹]1QGLZس#5~~OeW>/{ Ow߸BMц N{ U盧PO_YK;):6&gu쇉#ZE(@H^M7a|ܾ.LnS+O * IPl'7^aU[&y'/ 2/ bƖ/x ^#WM9]+$QnVZ˾fgi?|߶d'6In7[Qv5;Ḩ'FIdp}MVܨzL4A"rPME6vHRg7#: :}sm&RT5yn0A%v N.q ٲ79+ Y4xur^nF#m.RX2OtⰧTY!oVfd C`gIg늖$~l4erP>h>4f3&&@Ä!SiqeWhn0c?v} * G"*5Y2E0.G:8b$O3n'AELq}<ʇ/z|X|+omq=@9eΥTHUTuksC8 t̮z-C߶.Ojw[NۀC}2 . et Є#-idyA9xrϓmwK[{em'FZ%\+M-wEFT>8`Lx=fVY_>U6Ƚv`A!C .61n7WKkYKb^Nsyrِ#^ݹ#07 g逜A"W1de{[%A18FݺO2vLb+]ep.<7|= OǒC/]!Nc;mcS VFDNʨCZr_'AS5!^Byj,WT*L.Vvj]1B{>qdoROgl])7»A&MsLt43+Vl;c} ]!C&cx13Ra0v=O$"QW&] Ģ"^eg(zw yqMATW {5!S˞[՝_]maC_/Q7kMLANl-2FF/H\y#L>d}D6]Crrfi53ԓ4G?`&t({ \v=elSi5\$ ~ Ã63u Zo/?j+U>vߓ6N Rvk[ZzqaD?c *^K:rt=bׯ-/|q IR%BMHޗ9x`%\ߌD 3jqU %n4Lz^a*tέT:L{p&rx857FȮ{;%^/ *PH֓]b=<;%Darj~fvzeKVAE4ӟ5MmVX>M6xqx2G?t|^Q Nx .ty'#kN˕ykɻVmy2' Pk D{!Rܽس\وߍMH%^EW=Ok#ՒMHW;4?׬!g&C9a C^J`KC$(_.׸&MN^f|r۩J ۼLz[Aq/(ЛXJ7rYt%~bIw!^P W}Iٚ/jk>*D$QRuca,3Ÿs%3]$Z,3^5D粦E9zΟ/i" в,BeHozTF?6#3z>a6hR5Lq{%y5^Qrce+W3.@R뾇/)d2ԓEj+}E97kx*e)0r>wq>jz4+y`Ҝ3( =u^A`~{Z$bwr7׵#&4ehErݖyfyX{m~ 7f~Lv!CUdg+#N@x۞͚~> )W#)D?آ) *E4(99JLg2e\ڊ#rj>P5Ѷ~=נ H}~aC@|y q / /HoE5Z.Ȭ͢Xgȸ5JZ. >O@\q5v[爦Y)>HtHHEhaլ$'ذЅt=snU}!=h.ЍIT {Ԯr:+Eb#HQ FcVsQ\M% B2egQѹugWg#k~~B%(v 8ݦgR6G""TFOi%ֶ@Ք,ڦ!hLrf6.DٷC [j}vJb1et;'.E&,&L1,dOw}Lݡb_ \ M.#^QlN!=v5ǒhi|>S:۷οDLYKc (z&0/kQAZc%:F; b9JaZVU; N:~l't47bqsKj)mUE>+Jw*x/^ =ְ"};۷*r'yC`hѳJ| ,h <$AztWSU?c/6X:w`-R3  ffZ17o]ܧ:,Q; (8w/?Gb]2~(cLc7 Bȹi#E`pg}𸗿i*TJ;gob i%8Lj \{3)΅n 7ia}w;w M9-Y V: y=i*e0GnD*iYpY[(iQCc;~qC#͔>wa(q1'=ͻ[(˪Oy[)XLnդ;قPnB ʋ,Z 9S+dLJe>rnQvJ~rVhFa|kQVe"QڒC ѕW{45e 2s/>O9)ei mpiò:M8DaS`8Ou$:kk=R+=Z+~  ( l; H AwF1 l6P{{PRAqbgݹKB2ܑ 1Hr`sJ xoO(g{$2k>F%e`p xȉ\V̅,|1Yo&4=?`Qyyx*fU+T c Pbt57i s M(S2%MzK \C'O'#3s2p0Dh? NN/HcL~\❴ѾYqt\d ycH;<[B@ʳK%Uum.#҇PV;>:)\S Zyԟv8NG;w3%5hO /At n: Y,Xch"d*tbbd8Uu[,yf\WB2 cOlᡅx>H델a["n4m7 >9+87= 4`7ʼn~u0*}n;O&(Z7s%!V])~LȌ jı+ģ+*lqI__G4`78\5Y8yHQQT\9½׳!<yv:U >3fK깨Adl-Y52Qnjl_ٞEiǨI52ւbqA'oQ;;TQ`Ē*JTO޶, =dmS#m"XèIwՏQl)!̱)OvrRsI9G^w/\(9J|Ʋ(kȹeS K؆'!{BSF+Txp:I/f/}}R֨ IHsV$į?ΙꜸR4~-"seΠއl_tLm)oMΘ9{7 ^V;߿f᪳Ēg^ʴ2ڬLF^ީb4_`dR89G8֕ڎxTG9_V3T0) C^~$:["s AKԶD87z^h:ǎ?(?ٞt7$[R-,-)J7!*}KOHگaY"Yz(z'c>YLFeJA hOȂ(fW hd64O 1B*Y' 85 GywCQ^=;+w%,ic |bdjsj/棺Px5=s zW_gu5E%X* WUeC ѳ0(OW 9^J"a7n$,Z%22^;hYޢӒ)`J 8_>b.$akpOk&P6,">?צ$ M W# VЅtp3!pޞ~$_rB-#>ϯޣtrk!YRg9K7:'St5%ږM_ TQdrNfU~D1.+rg֚Sε;$ߖCS'%z };_8Hw ^p+Rwcs/8 R {eG^{EzqR#PeRli[PKUK:z}џƪ#`FtQYJ~[j >JF&YY7`q[E %N/7=.U=:^E/`DKڜ?,44}оh Tҷa*mfUQ7>\gTш@*WAKCޏS '7znp nSV\U +t%A/%GD}&bfοHM6RZ}=1':6Dr>j(zJ^LP 68xV C~!ˡFW= c)I J:UR95ZVQPY)~k3C##0n([i9Iٵ0kCb.~ZHNo&q)4a+I }-diU(?g0<޼Y??+kz[G,ֺ@> 8C@3e&"e?V8hYii {/e{J*[әֽEi[-D:^ `!+TgBLw,c w?؊kDؙmPCVKLVnVȬ5!nP;rhrh=kEq\as~;`WCQ'gYȳ'*F!7̿ɖͣ# {éL0LQE@&}(ޥT8-F_rʊ>m7A}N6]-f'ga!Z6ZKA,B1sJV~|"j50HesZ ` *7ϾY$ſ\ce 5Wh "8$I ŽP n>2zQL4P̷ k%tqcDa u(-%$&Py|׼BSP&eI-;I|3HG{a:q甩q1', -/U6+ TQ@4X#iOŪ16W#kJ:Tf#f.si_;3]O;W\Îy/E~3z5h 3|[T㕜l,#H¯:r$P;4ys7] zM }`K">q}Hm;;z{/]-h89Qm.?fďo 2s=dAESڨ<g*#mZP/dSILAbL_-m19 2QoGs!ϟAov]z1ťF{'!wiӇrtluS4Es榫=tZF/߀g0*tV`Ake)5{/蘔[(.9&RwvZzT_gR%ZUL_4KL[SobO.IV||=&d&6BzO ȎZ0)ާ YnOCk /@^(H7Pv7NUW =`2Y>{<а>ZăWGbp+ 2I˩ C5m15tȉxMܩ8w`z봂Ϗ !B B c&KYl:8L9ʹja ?_U7߿j8K0[@Alw,ZD3+@Gzaߣ\S)7fԆv Ow:+ 6JlG~w5*-md&8u+V^+N\!#|4>9 Yk mlO^}_/&@ e"v}]HD4P TLlHvX:w-Kwȶ-KD)]'E 'C؉/X_cuRGDZ;V-qh4.`P="z1.OَrcmC3vC^kƾK]8@wK |ʦO]#\@y}%m":oOTu%jN}؎ қC9@N d2hצg5md,;ğz)~q:JCPWe&:+UevhR\JH}?l rwv9ރmL0TSIrriA|}|9E?C4VPoۏȯ$\wIr.wBa a orWs_ish,[Tε__o¹)2{5Mke4~ė`6Afaۣ<[dqfIS08 a|\8ۙ2=a#ui6jї&x<>kE}<l EL;U<n.~/_-YZ' 2!Y˘wf6p3}զ! yMpPhiOJ4oz84vw\JftOk>ӯ2öVմaX>$U-w78\̙ l"]Bž= Egk-Hy# &vlng ޤܸv ?:=I:shT| kb-(;,A'<2КNͽ ?t9vcTӛ59N374@t۴n /#3\-pJn&~O:Z 4-GyE(hKy},aۚ/L.zƺC `9V6>,ORڀ%j <q:w;ڴNĻ/:X+*F؆Bj[䱃=VӠtM}QLǷ")nٶ5{*X-tS8YY7 j'M[`oCxχUz ]ͿVBE^? N`|o3343S'Nq&{ҧ`6Er-aD6gczIQpu ҵjG8=_jg]>"哕 RI5Km8@{UVdtgE>ޥޕyoʦdw> \r݂n7UӖhu?+ΒR Cw I_YAXsxpQ9\h48J,_-Xnd3R|}<'B)3u< ;m󻓞` ϡIHRKȗ[-]l*(T;RK5s{kr*\5J?ޫJj12w:qwKWTU@6ZJh5Q:p-iw!|-`LVL'݇SV'(mu#~ i s01um%rzu֋>Zf\!dI* `݅awލƦLq3Ђpyr (z\\XưOκbQ{9[G ,3&"b Յ;so[y*-mbg7r`61q:@RPfp!qOI1TWV&aoЁnM t ēk&XU.gr;n +Sxdܲ hܟQD-ULg%3]P'nVsDon2us-jQSqU*+sMt} "D劥u`AQtʖD(^u,$C-s?R3ԫ0mmѼgսNZrA_Zmz<נΨ|K0H_L u֜C_%ozVuXhr ay E|_ʬ%;vkIUH+Bi/ɻlk-h/UlyҲ3Jg/oC]iDZ `irէ Cs׊M~<TPq'9ޓא|}yBeChUJDnY <7d]ߎ;p҉g<Ab gqgw&C|TV שHMRREShGi%ףcr=6OnFoЈ +4@-2a>nL9"= v&|]O~|*([jkUu.=U +j{0t.\W⯖̬Ӏ,pPED.){hyO:myw6b8 aGjPhϛ2f{7Zu" W>\-yKټՠƨ &3b<7䇡fxo[Wb UEB;5s|}ji{ɖ:?;c=@O_l]lu59̗ӳ(!NǨDc[zQaСakt!SMA?vulI-ybO62 ݺuL߾BcTw;X>9Hբ2v|+.f ߙ>2d#:_DgrsJ%>̉|*rݟѝG/:фS1!F+ƤW-UPHFL?54X|&.Q7^kD:9hb~( 7k:_?n>!siGMdQv9rcHe]N4%}7,KVEcjGܵBwx>%}Jjs03it D5h#%"mfKFϯ FI~/q.i Hs7\6ҿn"e>L\uI1m3"¶Y mp|>oO+'tGh#?.d"f@Fӟgdvvb[[vpcpxaHpVu~5dJ@^+ѠZS~e!,7+y,lT$Ooۛ#.ܵ3ʑſ굇TOy@J6eC<1]/8vsI;|5 Od-E7$lPڸ)WO#ҭ%_=ɨfs[Fzgi1&Ѕ^΃r4Z9yi&7 8hE!@k,F-9k${]$ۙų6&p|3Ibprs)Ӝ)g&׷\q6۷oAG1O<ʹ#lʘf@`6>(y35OģMQC"XŇLs`Z@:5ojqHd1AFI-Vw[2mHL,W3;|~a~Ӊ$bu^Tk,& _'[[>)rlNΞʭRYhͿ>{ćSX Xc)|{79+oնfW_*='fQ'v5y/3Y؇{*7O/6봂n,!f݉;Sp^cP0,Kfb[ RՀ ㉰*kUM#e05R=[:L=>JrKOPOxմǣ!;NwaP5hUA3˗z.K)]NVb[TlDOCFHf|+MaB S#]^mhwia [yboPh\Kyなa ;R O'B \/\vܚ@rvbfF֧[z J9OKzQ-{H>~gox;b- :`ZM 8*luW> x)/ʚ b72k09Ik^B}*D6_̚Ive#X3[oU8YQ[W%G"(|xaӏWcPBKmܘ/Nv<I"qd~ӶTܫ.񬜋oDkeƧ#G3bq$y#)ttRA ؙb5c||eso$4/y 7RؗOc$vW\fDK3[a(dG sX+ܴjr\J(n5UTC:V(=`i KTu+  {!^~譝ߟxDY$c2/Cm/Ji|D[T3 ; ] 3n]h$g(:J1"6wTK!y(} &投L;RTZuiېˤ|YL?E;B k=:J0] )A)S1&>B!A۶2Zٿ)r 6Fa@u.y$d h=/&LJ a."Dj%yyoX* ]1>*˒]7x% +vKj= l;Xf )2oJ]Pb IB8lCNK^*Ī{1UޓR'=XOD[!H}"\0;8O^XX:q(_8&)DVQr=fʼrOTE/^AB7$otQ'`'@ @  0}-Ӂ 6[E&RA2 ĥgmH@%*H(Z@*8օBGDx%%똦B"s{zn_I!5a/_K8Q ]dS f,qXV-֣◖I]f1l k~4N}%]f5:x{ ht]L vloh䑴k$Ob݉D7&\&]i/}bb=jk6Xv`%ŀ?3ʡSg 8Y ̌qQq ΜNZ&̴N=\MCa(́Bb|CH2{R3)?(hjp0y*>63_>4^HK7T[=xܠ ϴ=t.RZOs胢5!7/~T|YO3M:x0}9|Ϙ`xEJ*ħfQJSQu!0ID?k Pb*TPl:я"d<` 5ں|ҍ z@$r_uk-EP*rf3Dj꒔!xTp!X([/-L'jYW8n nMMG_\,TRtgV["$2h: pEuL ָ t R ~#*@9n Z4Z8[,D luH`D6wᝁ..pZ*_KL`m Rʘi|4|ZK\֓GۤH !R>&Sl =,i4!գOprtbTYm+J# 4ƇN>{m%rk|da"heV;סrucڮҍ~bHX6*Η%EJTBhM8Z4ij7SDUS)A4@M Paσ;VUOS?# ""6o<<-٩8XDQOgdg q=t~`V|\eZPPI3 XVڬϾVaýb&/7anP`*$uN¤V_xƕuQ9 +'8I?G[ SZl͢1t2I9 f|6 'J2%5WI8\JEAvwW#zX0mz"yI(K(4F Cro51ykvJ2 ƨld놨0 )uW |Td>!8ӕMJB6C.ƻ|x\MR(=r{Rt`Kӗۇ\#;꾛āBs@#eA5eZQPȮ0k#&pNLCC6N.֐Eym[';Q{7qyF']Y1rm=RY~'h-=,WNlbCaNXSyRb\,v ~@I 07ӀCHlL3sVkVQc󖤳9KaT``{anC-!#t'l.aT]=w 5s85Mo DOO H9wp ER jnr_/s!E8KlS!E(9/;(Qf!?29gծ ӊZuKfԔ }(`ς;Q&NURrYCM 2/F80PDvH_D21<IS:IQtƵل3 Tm#:ы=) <O_>+8<='GBb,OgWo҄!#t{?jcږQQ J𫨞ntTK@u|O\]ǍxbCW)bjo^1jý I |5BKzX%-AKU>hGcgb?{amOQ ۬c£wի?O) ̆[[>fF,54zݖI-^4_CŹj>JT{v?#I$[tGRmѤb~$d4$G|krx])O(FhkkǨғ0'3g#ҧ* zfBpY@:6ϲ,K ֢Õϖ.()7 d"WdC-+#%fϪd} FJז]"guG+rq6l>hvsgy"͌dNYo'ǯ6l @0|,lG#MHsaxN$z-ODY^ߏnņ.:XꍴƺVXdn-BKtݨhvpTSm$P?Fm1 KJ?K~KDLwҟ@-$5$q8m:sמ~"؝c zUw"sg| rZ=7m^hyu=%O]J _FD5V^Ł-Xݓ )3<gG`V~<.ڎ9i硁? w|f9#taUM_uXvAwn/P /A`Ttԗ @53 \d8)8p\x:SR )<[d1k5's܏/XҸB7+O31mJ"fG<il][Tqn?4& =j9 #`v܉`_jCNJDqV~.)bH=7yy8Fш \2>˯{bfOBg}v07CVWp^a ×doo9[C-)Sd8{y8[sMH[s,t`)oRiR;Y樀Bz~h[0LZ.,(m ^x#^ok/YX{'=Ovi97F6DsKi3> Wo?]t JWTIhQU?u{.a[5ЬxNmM7j*x17v[WwQj?~-?ܞX/$X tРי-.Z)~gkA@%%߇l 8PU1m;g]aKPq +4fEt,ӫھZWx ъM_C*)PU){siuG R:u?d1phh:0 aשv/l~C^k|Y < ?,"_Gv_rfe3GR*ՁJJTArKhy$=Xڢl&r"Dθ;!νc|?ړ9vK$ozf)e.2:TSgrqm`X~Ԍs?4*iFQ{r6; g{l,͌y]=5qEAt^9I~bFg 7FbS#kJ1C_KDϲ I=лbZ#O]"M"c/U&$vG#Y7}UʺIpCe qn= *^DOR%Ck@P~']Ԋ^; u;zY+])b Qӟhvt{ry"GJsI>۽pwC+"<7"ѿ=-L (7R* e~*&m)!ҮA, CK2M:F賳kȹ0q l9pG|QPF:421{0hְk&)Iw"R9Y ƫe|h^]\奸 n7@,gk{SC>`i`Ǵ i҇`I7?!8YNd!'edKa`4˩%$5kFJLy«{ѱxz>wcTRx;dιثu-ceMycBă~,o(:y"t ]˲(.3_NE!2N5:4sݎՂ:f29%t1߹fsJ&gh}x'U&M4tɶ LJ oY,V`!JuJr"M=^ |1݉o_2f H%X$~ީWc2KD~%# D=LTE3?ކ*XS\uv ol0zSA?FO02D+^_KV?^rdAL/J}^xU$zM5XZ- y/W疖CrnEiOCaC5 קY)G R[DP53M.Nua܌ gA1ڜkP %.k%,uzc>xa=wx,] O59ׯ\j?_}f{=G2cĞ kBO9jB *Y00K'ީrK6#eP7K 7ԥ^ujUec4ڮ3-j<^w)_""Orb,!~o&G^?- O'!ǼϿd bR`ǠdPq@N?VO,U8CA5yU:ٓ..ԜUN6g5m(Tsu_ %&[Jl HJ|B/?2d&wL2=sE[ZM%SaГ=6ijY%WZWxj5>0mZKD[5;M%S>=>ߡpk|+|}}lZ ԟ!p+(1A-:ZYvbڊ;m9Bw]W DT/R+=O$Y(AL43R $$eBΠӠ +EijR3C)֣{jѻS  Ɛ54Kzhw0^T͵'&<ѻtce.ךu mC"U2ن3;OA{;^; ?O=] )U~c %D\ j+]ڔq vB\ϸG d.{7\_+rȏ'SᣠHxQ 18Y8!v/> ^E&//L]H1a͠q|,:vWxnvжsrg~.Zld~K3w,#6D60}yȻ4* '-/i>@Ry`ĐTTޝRBg!"!ǣ+DaNWaZ1+J9,Nx IcWL(HChfqRZJDmSYKw82]ݝWS;zeY@k(gatOC@23"_ Ea@^ƫ_M!SiqHqZҗ s,00 V wnˢ[cR}Q}hPq{F,<)ԛ"AS@+UR7#RsW76"" mG)s&ySda@ U z#`V =p{Ea;zA@w0vqu"Q**:|'^wd?xo]^ ¡dHa#u''~6mR y]PO"n#f<a d |j#p(?!OorRgkVX^O1IV+y$aV%+ #hhe }[YJG}d,)&1_Sc^ 8>μ Ylsţ Sl:c<^>{zD慩w{j+]o涞= G&=ײxPQVMqG9l/3]g qx7 hn8`>q(IW v `_ǫ0h 9slK{b3 6~0F5,̞j_J5yΒⴐjW ,C5@!ǘ褄peΖv6u(t;pjcTlf >SɂJ}8B޴i,p~bkx|-=PJ.[a_Ut2MN' :pnjh82K+ogLOs:NZv.O8.S$\Y83j${KTRBb,6dJ_٦5buiZ&C}ݎN^fB$n$S @6bxv^ƙx器|2ׇXߨ׋"l׼z10|1wmʯ/Q^+eG|pٞs)O%\' %_&cnDž7pvI%x*|,7߾PЁ) L&oU:SUO fO=yp2v}L޶=9X."+=˷C)KIXGUa<(-jZ'oEӄ/|_[Lw$3Dd^0/2J>uENc%wP#/B"P @BN 4(-E~ERb'%KٛHF6 ذ-AO ]XIّTk- *s)f\ȳ@ܦ鿾&-FA[wF3,Mks?&v JVNξOәeJŠ| FF=7oOxԫ^n6F8 ቦ]z#8)d6À z=VO BhsZlE~M>> i,OS^ oƾ2}GHԇ}ZHEӫN2[Sp),%Β\ >1wjOX8&Q&OVMS!SUp CԤaۜp؈恁ep>3>rmNf|glplk90Kqס P W:3v{{yofowJ03;WB%Rtļևmw Cv#ÎɺU~Ԭ(|S0@S7gӪV¢Iw*4ܴG>Ʒ"I0 ?{+nx'?9^윾;'#<'X?d9 FRz!gcBlew-2Cfdḳ[jՑ4RYކ8!M=mNx3 hׂEn '1`NvG{XZ{d3.6:֙n9~}j !9}يwαGkhQ+x&Q\jG;  Uz50AXE0moP@m"xO1fl[B5"݂=Yp%9o8%X qTժd9t͑(C|G-,)? 8ؠ^2OvR5F@2O(XYmF7 F2>;K%&KU̲e0L;b4dNP?ı$&ٴ *6ZTgxnCc{/JnAX3_p_IҞWv1 DwO kYP0Y[$cgA"U\냈spt2 1U\3`BF+' [rtD)`´Y ",Gb\#8N՝ ,6Ǣ8wia2x+ v'`mD9U&Dkɧl& gAX`rda8z%)}w{9.BIE̿*") UnSI8`;VE?9kf׍۬)VBW`h(=O]m c̡ nƧ2`&ֈNSڷϮW]|ot]w{F sT{'}^8\Ę2Pkld[ͯ.:#,k=M nwEf̃#/)9Xs;~YQLm`+0&1Qe7zeNvpw&+GHZwRBJgR*#Аe,nʟYoQJ_ZSFc^e|Aڽa+@NFdw~U䎹wdym&ɵ dF^z6qvk`f&(?BU)<=u 2Ik$xC[v<9:8fV &Dbpl`Ӗeg"L>k*z8-7iA6&@TpԞN0jVZ +-:{>}&FMlJV}>u,ⵊ=|U$D0x:X ŢYMgmkW@/(-6j0>z^p_SB{Sihᢽ.stho<5a\Nj7yq]uagHd;Jj)jhBuIKhZ֭ÏBY*cHLsRZ}톖3&=ʹK)cGPW/j"Yz.7[Ъx=g-cebmYӧ>8i)Q[d6?IѢDz D]XCqU]y)rR6(j/ (eT1_U,Ly H&l/M{]PoN.;fd 79KJzY„_ѐYvK)n[kɗμe sɎw 9̳VI/=R  tk$I?`~R x3/^YN [7]u ѥ1 gvBP[5 <9d$lzU=#xTE2!#@oIQI?p:r n%t ^qF@H ҄$YWM1^Nuϟ.+?'g\l 䝬ttض|<"˧ڗ[8Թ72yƻMQƴ$rɱ.CGq25GzVcCέ CBE+2I^jvn⺕\{o#l:Ʋ#ۢ5Ft'KMeFgNQ>kwӂNu:ГEgkL. f0ky%_aќd)fF8,&Oo_6]\6̣"FR|X|6_*Tw򑻨d\McΫrQU#*dS;2s<5Tλ!4By; AŚNv.vV)7pc `ɇF!&PJb_7>Cd˧+C+PYf~=!ߪ,cXEk?p3z"'}"}F>V[#A^,5ՁGsZ0:jzz셚nS0ۣg /q&l%4r{;m~zg8/KQ܆cEpiLZW:e#&fOikסڄ;}dtM OeU+ +>z+;hY=\-p<]\2ۯiA[Wj1A\Yb4.y1'p.*u=Vmx?vy0qbi BejJsI`.ׂK4Y}:P@GeЭ.sѵ5ʣ9WUܱ4ް2{e?OeͲf^L6A5T믎 ߱w/`{,8q&@7d& #,oU\)r#kYtRV)!>fib>ހ.lAF֫իjO%/÷)bDBRӽ?K'Ɨ*X_Iuᥛ]Fxܣ!]QR[g|;8!%\bN@?{R-pӏwt$dW|'0tHi cLmuD4ae;?VY9U :Xzx(OGN\^#ׂEK+ꝴjpoK >t4p 2M;Ky rmƋC\;&[ٔ9+E2&KyjiuJYE]gJw^R>{EUe.3Nai\] ߔip풲+IH!L{~4`pT0+#>'6e떨n<$Ω^U&!.4e(h;{Zb'FΟ6l3 ә[gO2I.Z E׆y=N'/{-m|z\aSvw[}VI$ f@f%ͣpͧUػLgr-5FsU 7'8=x*N yicwL}ڻȌ ?,ݾ1g.7;z(zMhR{Roۢ"eZbQ4 <Ң}rW"QtSYBB&-C'as.uǧ3o#Cr gk 8߰,kۻx]u7 x zZ5:?"at&~6[צ'Jv&ԅ3\[cl⭪(Q[5ݏսECp<#q- 8;!n/HViLWN?ە{>rNނ{xQ ~zk/NBK&e߫rheGѡAo˛/LO\/(STf b w@8TpD8#^%!a[ N -*^n]|+eX8cVUOO5n$̊FeK*ܢt^Z GLVa|>AЧQtDƳXBC΃vx,IV.BwueZ!Y]0\~킯ಢ8ޱޡ6ƪ=<&OKq.Ojݗ*.?yLssҥ/QwɏQjX?q隫PTkv;OH=w82[)L%90'-`VAȫܨ0(Y*4f go=앢+MFH!7?.^\su͙S:6U**b2Yz\Yz~ԟ1lysK.'g4'NÕZm0{<S%w`Y&vdef-*4'(ߪGF+aC3Ӫu(YIU3Yrq2+Oz w A0x> 5iNNk}wmgz={4ӌ#31~kEj{zJ}@Wg%_ <'(R`a,أ/V5שRg%q7,4b_YE۽`b%9xpDʭX@-c|ՁҰox60viSTbݿ)jخ<-\u'a%zwGod`7]J.++E=+hW yFq唙[VE4~=eF ~c'0#=4+Z e.?w`ddSmһȠhFO=9t+V/2h1UCb;lЮI+`='W?7՚Y6ͯ ~n) Q9>|*z;ҜWgJZlZA-ĖI¡*EFԅjn~=(S0XpV] wc뾶s==^drE7ߚ[o(A5c x@OvcOan ;L?Mj+[+<[Nt:wPb]}7׏eиQo*"Jy;:ԫVŊhlgFV(\ IۧwLlL+#cZ!#CPґRB#GjU(ɮ})nn ʴ*>-!>˳fu.P뱭CCޢueyH&rfB{}xj +;,hQA P;He Q5WDo*a3/ӍW d%9nPS)߾(k 04e>.fد9w?>l`be g`t|O0 D7wU1pvOf[%gנW=š2x`'*=Sk*1RGil -}lx?ӍviAQ^Y#`~حќr)x''W)k$*olNz\ $ˌ;~`.HmUg\m%!ۗOOg+x6w1{5kctɓY-z)fC|30Vc.wMoW:MWJXN7a 87dKXv Ysm$ B/c)WAǕyy|$gկg323!F1ZKۨgA_?]ei=S'#u1Mէ Zp[,Z_4(9hTm K0saY.Je,mE z-w߁j@6XкH_G?̄.s4xJs`Rw#r.(I."Py E2Deyk2UJk[W#Aco&Ԍ&K *} UB{[啮n!eoR3BivUAh`#'Lw Rƒ=بm߇)7[d~+Z6٩/Zs[uKeZ Gʭ0z27D.ԗ L@N!j%˄GbZ/Y(o}ax֠"3 2 ! H~&`R1 -J$W+k&LqTmCd"_pr_ 7z٪8 [t=}dWP;N[-pΰ;=,zW,~%q %d~WeS/{nz1|솩Kd dx?= L=.:J[1V%zBI㧼sjKO?7N:E >m̊xf_yS{ek:s>^lo3y80{[ u,ZyM!oZW>hVh˭*L-F[W}ܡ\8j,ip@R4j-vViM2+hU B7MUf1yV 21Y3`/gR~To۴O?Ԛ` |WJ yu3XǢyȎQD[V}>BGUY쎇eR`ER2P1 mёs(dL]59#2NfQ #EB:683 x"ͦvIK5,_gL-~' V1³Si;_f7G]Xt) ⼚Wxِ:5@fEq_֖(S0@#UCmq=njfe?'Np DSpqBS͟+lYL5GS~hn唶_hg|Yk9<,; ƵK|E}a ^sGՕ`yTV.3vLKTj366xl Py1SI{`7;K_`%=cG4({feeV690 slXeL;;hZdHom+ M3pTʚV&JY "n)vUo&/$V`R_r k[+'n3y>: c#@38]!8=OtgKU fߖ|,̭Uhx:xh )d/~k fw=5_1)>0Ȩ)etu2\gk4|tsBf>Hz qy `о?dK4)i nWe$`NchT;:9!7*\w҉?=6" _4?ơÞQF"_K\@ȥ|Lg΃:l~bǁ;`/{JDY$uz,\ZK/jf,o ƖTo\u. P-]\q#U" N9["JQZ2waQhWLZ,ɂ͏bsfad2XܞN;.xF7 !$P/] yJ΢>O>uMj=jy9̰H8 !`OtAsVpJ K2Q3Y&k7U9w҆j+,}j 96ˆ.;eЭʃqE(8,N~ӔOmߍOYYZo8Xm|EeL1x MiOHMO/j!%~,}kY[&(DN]ـF3>Â(`J*{.ESm-y{{iU[}g!2Ng?eR7o'|}Tu n<6,w{w;B#080Wɯ÷mԮw/na$&Z1kp@̖r\Qwmf) ;TuݛIt~ Uk5kԏf|? 2{dвk1)D!?sWleumQϔ{Yht;T.x} ^)d 3*j>O+ֶקL8B&k~H$B([\\Ҭ T%TB)ciCm7ki79QR=~n;!L/6y:D8ZBȡэ:I76*rc{Ƨ=2 vV9=hZ}qv\-Y_B5vU>VJF_Vt}pH@1Bywpb $Z:D1F 9 6O +FO̶l z˳L%mmZugLjjղ3 ̵WtkdͅѣStf Dm-D)ja4t}R· EU,#:\B%BKѬlDZ(?N~ip/oY]䝨5l.o6Q/ڽ*NOiuhHpȕVYPirpQ!~mww-Pc2/җ- s{+ab kD,2X% A!uVEyEP$\$J婎1O~ҵ? » q@Y)gs;O]}qh䶳Nx H1;@ s hZQφ Oʶ0ڭt|_Sݹ7w3Qu8`V$ E6Ϋpx5r.ݝF톞e}<,c 0TvgIjz.[(WbUw*e&p8a@d㬉2:5~1(fCDۙENBhb ؃Qk',(ABXVfNs}K(8QnUP`DزV#)+pakٷ/L7Z)FI`l̞(c#M]zA5@nj),QZFD~(S噪(`/gy 0? .ӆ@3ޏ?F60`ЌAݿ%k'>}{'֡ -;tuCeU!^~ZŞdSZ?B hӖJ\N2 v^60@ S;$% j u/;vtƢa -b/u44@L +*Qʛژ* dByyiClƢ!Ꙁ5D^Tvb<^%J/+oFTAÉ<4Q3s>㐣lq9:˱("zl@Tց+VTC0XC,7*u!]3 \)/"̰-)Yf^ Zcá  K+f}1E.HY>!Tw%S#үk:\1EtF)?7C/!9\;nb c/QY}%cl `%r`\ŵYe֠ETJmxAeFޙad52 ЎȲRzm pn {el]48׎-w ٬5jHZA!̙.;Èi}cvs_I1w4bZ3JtLUlL w=NGO~w߄^ӟrbl=} cX~4~ MCd D KzʃʷJH { cjsΐY ])?yz\˻7+0_3;Y@ RFd*LIofRmcLz$rĔٙ_= X@ƾfu' B՛eyqpg#Ѷ"R|AD9 !c32|ɑVRh0rc$u)tEQ.ywqiAO,ᶧ,n)%cNtr(U>uj~ 4G_Xf], ϩg2I*~ciRBv?hAe-:}!k3^3st de[@Y[&[!SUogE*#b'x75-uvAݙ/W5R'է?TNCׂ9%إ2h6 xĔ|dC,EněpU>˜޳$=${VeL+և~)Gjɕ~~փ|g%A+*4vp# :* '\ ͒B}%G^Y' +4q5yelC +!qrN{lwgfz9ʬ!K8)wgp65Y7t ,#'֜u|"> "ܞCulN㝩YkoʯW_y!O8` 4̱bQ 3TnKZAmQR ~sJa[ء6,GnP*Pʞ|;Ik N'F0&"Qi-uFjI]2 ʊݫulW_L:$* +\'%'vYrNE2Y7G`DueI.uf㲔˧~=w*`Q/#ߵXS T^AO BèG*U ȨzDW.oK@2F$۝gNJs%N¡Oܜ܎Mj[E Mkx֣m0j9 ?[iBОt݃./)V8;2Ya^pAb$|u ՉZ@+z/T=飂qZu:GUWʫjc 5rHQ=|C]U+!)0s͕6^!{[aY̚O&ѭ5rsQfߧhw #y-W@ʊ(d1)LFO=Ԙ<ʌ"\;8a>Q:Sx[n\tQcc:Ƭ]5C#/:+$Y֙YtbvD+4,GrX5ynoa1ڻޣgOV^qd9~y}V!$$Ġ/r- {E 2*jFAct^>#c-ep n@F1BAf,,1H2} ;utX*je=eו5){z]&kTP(j(.9;>.yk6 rN>oW;.}D\iջs?a. QuBkQSKUWp?ZR s "i/R.90%F7hҸe^FuuL{Ϝ r*gfÙge]UyZ4vQHEfcU̮_1.4!@[Ly`odM"Wf}@Vq뗿$Gс `P;낒.Eb 3`.f@int?}_x3|2އx~|UmtE>>e#n]|]!g֗6=F*˷=tvQn|Y-ǻw(K~i<\y=b_cV_j iF#gO-rn\$وNGKH-wӰ.VM )y?;1z!|E{ޞC}"xF&3h\$a[wgl[]V~E+vB|ŪxݥX׏?¸Z6Փ~=Hr_CvNPT\əխŜ;E8-֣dćiE`=`  sy5IpIᰂHM&>?*\~Tm jTO!&ey1&bRcKZ_ OE| kɩkq N">xqF'}G%@uHp)T}[w:-Y6T3(h|uOaҕJ|f #Na,<jr<ڡ/ކc9UM3[и)`q ۺf1HĨmMlrmBp[睓B_,`F(fXQTTq@lRr/-JE\cMn~ &M -(q+V;Qs^sJWhTR.FC* 28Lzh4IAi!fq]$l65kӘ}?+0PABC]NېU;jNBڻ/^pxX݅~n.@ĖALF N{ymf߹pj{S 4 C,}gk'O=z8~e^oQ3ZVGñZL޾-fOi,YfntWxˆn~zRQNT[oYl&վ+achsO]^;NsUNXw1*a;I3VuK'k-oF ߷\׊Nz.1Υqol}T|p}?Kx*BPF 0kJȕ=ۗ Oy<6^}%{Uӷ6,HOu LfF9 R'l2$xԣ .uN,Smv_N^uv-МMzY³ ݗ ܎M>¦]ܛyl'Bz|kMa-uEzleNN;Z&ApT2)3t+KqS#2*g"֍}Ǧ{wKBwZVxX{6ynzN{3j൝ pPJhMk{̨ '-X`.%rba^_-j*'\lh=3.F4tt@8𛴅鈞 AA-]\HX[78)c\Uܔ-ڐ$Oܚ@29D@0~=[^`{(5:xAİ{r E9 &F 'IwOįN , ā%C#dpOkJe cA1Z cD]j WB ܈j ge)uC!ʩϐ (Fe׵j1^CD9g? Ԯ`DpCCy?mڨrB,] L u67>QcA8zin La#Os=IgFE'čR҃Z/F)l~6ʎ2&^@@0f" 7(3Wݏ2*!o{}2Wƒ`=6>֙.XhI5<ͬXz'flIOJ_)„?glmnet/data/CoxExample.rda0000644000176200001440000072651314046050560015224 0ustar liggesusersBZh91AY&SYBk_P@J⽞l{z7=׽몼KR{޽'WGwޮm{wuޥݵҥoN콚c;oyn ZܖλzZ6Okzo^f*롺z-w[kJ޷w׮U.wJ]ҭͶ^hݺzv헼;[.:tsowOw.{kjQ[yo{]Jnsm{.ۻ[(j.F39W[iNL]Owmzo:nvΝ۔`>)zoaq5v}w|zz ݀6}4>K4 >}(:}4=}(z@4}@l}=@@ 4zht@4P ( @݀Ra4fBO*!T@ S'*D*@h 2h*~LOSa2` 4ª!T M#F&L 01O)#0T:h#L&&M2id&L#&&i4&&&`#L5<4iLLM20 ʄSD C@hh D00HDIp 0ˠDdD?&00aY0]&a*4"a` S]L0a@`pQM6XyyK"3RC}& ՋVtMyeHfb6D"vFoܽ}Ȉh7 R~(,"c'Fiꞕ~TO> 79Z ;N{貴J6Y:"mC2?Iٸ/ѻ:&1"9ۊ-@u 0;\;7 \?]_*Uߎh)J1Zu?K}̳2qk'fs#+)Ѹ{2wa~tnq4z._Jr)U~yfYslX(LOnl8W6r_S|E]8{@b_x)j-y|R $Ip:kG_Ҽ%ŶZM)3f'K.ڝ ֿ<ٜGOC v,K1IVMz;[Q̫\ UsKݰ T% ."= P>+5ŵ U~0h_{Wܛm4+mAy:#2~AYWϓv9G,Ӟ U6 sSX1unOYNLpe /:6ɔX3xCnϧ!If-&079MjnYz|uW#Vm򮊗Ʀ~ג#@8h48ڿ0A)/Vyl?GMϒ/\꾰S6{1]*3QiIݲVwFaKGI"omm&MUVM:0~σ!Vm,9g'/}oc["9mwCzl];֖M7{i9UϚ댮 Pzo:m+xDPwWS>B~$3vûOO̓9@(/}dFJJT>y|% S4# L쵺w&\s #(bOp$goWGN$CNW0u^G`_ gR(yj)HGYp*_0ux3]Ţ{-܏{u͚[ԭ߄P4uvIND{J1/"}P9/B5M\;W[& N^*_4)z{hn!4So\44JU[g"ƒ=OG^rmSt[h㫅0&s4ܛk %nMj+>J)/!9R-\8ڠV6!߬)} S(eɰ2}{a5CQdglfӶO:to|ʺGz.ͼQ;)Sѱ?_E?7$ /̵$xvГp#قe+f!x6bkۼcPb| ln4]J?KtyRZo&a3}Fk>_V/ wFjghLD6S[1j:q[REF}SHѳ;RE$t 'R㑸hIg3B'Rj է,0} euE@x&DA&_g[G68ӮQל 3fT:Cc>gl۠"cmn].A g (,V4 5rޏOZ떏'Χe]vgR^mG;5M.λ̺Y.UW6T,:qUlݚlKO^? ACڤ$$Х0u~kZQ[7hLJK»3[ԥ]6dvus.MA8]$9p7T?rm m,.~` MX z?Mz\+_ /8dQhYu=u2'.et0O;zprˍ@Jx^H^ :+ tKRM M .2sPcVTYi'4n+|lO "eW8SW@XvoDKh:+ xabmQdz]Sۼ! Ъum긝* -5's7&ֳT_^paZf5#Czޤ=;ecG]%Hϥ|T]Lu36M~M)Vʑ m^u Mi#BMj| )¨شnhahF#Q5T'^6Y|(GU:N'؎!J)<S*P؉sV$ť=(neMo0)gº敼HzdS=Ƶ[Q bс57 g_z ns-Vb}u@U1 ~9BKIC"QYzik-lq Hz#3Ƙbdf BxJj F#Ao{} Y%[x0`U17`*pV?:t)zID(n| Gz'Zѷ  eaDrɳHD;Ky'"8]EBR}U5/pR[r: +>U~'F\'=$6 O[}*VAL!cGPKr ̈hQ3^d8ýU^|v 9 (ՒQNIL{(aP_ x㳤JeaٚB9ֵ,ί%C d?,vnhŤg? ®~  ]hyd? 6ޞ {IWҧh6wa12O|ZS%G}Ir"Ob՛tLA8`žƄ {IA~ MG lo(ﵾFd#xnٲ(_JKxg19R$*`%!ƬZ6p pn*>K=|]-XFrwpν|BqW3$5O+JS!{,6UeĹ ɵ9|x=XM3-ɍ?{my1:nnofM.؆1Z@ndCO5D-/^&Pd>K [j[g99Sixzy ڠ#tS}B kωØ9Btx=;"jjB*xi5P]k YvR X 6\Aa|VM*]+d)Ag{y>mEnC6d[/"]xat{y= @}(c% ?aGű 4Cl4{<8(pK+(:Αn^*f$~|廎>2J{NсLGq CQOfOZ u=jegnQ7_< ;2li2O{  k3 1#2HEe1|BxH@҇fe>#h[#DWz(~P`ΑQS~8{n+V)C%|^&D];§507!j+a!Ap T,`x"VdɌ_cΫ250(HCßH9`YA1:9ክJ=apkPEF%+qskT+ pYSgO:KqŨBPN>.=!P"WTJܟnn!C^g48|e*=ćA9z"H`1~o.j$yYCV~t;X)C[o6^pUr#wA(!9#18Bk<:;)(Lbns2ߴT [39J~k/)le&VEo[Bϭ܎Ut5Mb%P{o0Xwmv}/O+P2byxPe&~8r,HC]5CCud?2:a mGېːZB?O*{thf m%wd wO| Ą= fS{l2dR,u|g91s'o]ot`fiio+Sl98OlIQN)b06N4Hm5.waDHjw\lk|U\bٔelpQuA \* ױnOT[s;5\3 jM/ڣm&')ZzNF%W9C4v!9k158b Cjhڻ$prZMS#&d+PF&-mp1}&Qie'^ r{ص ųkʡ/ 2CXwWsÉ.~7EtN_Y_3=s"wðG/@YS>ϠRAdҰUEZqa, J y 3xd;]E/u 04|HYO>[qQjDYn>򋘐F@x2 a04Z[bUGۮNfDxv 2L&w{̬1tVyJj&:L*/) :3D# ˦Zf]ڽKo8§0A6dh}]bc1`[z]k"]q.=>|PD3-)?}gC0OhaZ)h2GAh-InƐq$`c"أ\NmBn^O. sݶWu.<D(Bv$U[-`ͼ1_HGmם4 ;A|ڹs@xۺ*/DX.=A:#]!Vf?1O+4|0 ͡pE86`Jĕx`< pWnl+XnkJ~@pN :u#?yYm+C|H`c[:_V๘t~HڿHf엪d'>׈YD(a;,\BV5qP3|J7d) Zr3͚ՕL6Xbz4/j`<{CfG񘧑($q`acSCٻ X'Sg]J(@@}z ^\yNW%>Nzv,߸1ɛ"|}U祸]-<*_R_>&+ QrSR=4"[̮tvH0Fb-h7q.%8+$CnhAn^NG4fO̘ @5[NUFRYhdY'yZֈyuyp*~`*M$QxD1fw>DAobeqLwEI"፺w ,@j^);Yd{gTY"7Ki6FH^g)-M2&!*Yzܼ ]#6-Cxd+ʺ~as5o|ZJ #:P#=[a ab3r 16 XHi#"&~CrR}j@͑ᛉh X{ɴwpuK'IXyx}ƶ쇹h%xu `@~]xD=3$zrPLf14S@/0,4 Ak,f/Q9VryhU{5IãbbS'O*e=i!}%E"Ġt'WS<$byi^Hݯ ] Ir~pTi>iNSKlKU02%gMu{FT +ѭg~ym v)m¸/)(ʔk(D_ tŬ.s$E|uy@=5,)\MnyA6 _Jo yr+6fˑEY*t󷂔f0qGߗ?)WN .O?x4}cg=p(I TvQZL=5&V+d{.-r|v.Dlԫ·TH%~Y Gp*;r;將|~\j'39;L\ޤZш -(yZ`T)A ePoK1!vfmA׌7|"-^o\U7UHٰQےT\g%u:h`l)X-JKe[5Vq(6ϗD%$inD &v6'=@n5jw[A~S.uBzf .AlΐH@ vwdo#7ÈLnOö (gY0NחG2S17; 0QVbܷwr+Que&6&dghrMzk;{n aVX3q]nU g[ $삵lΉ>@x7}_8$&;c[1=HfQdU2C̶~N /\t-~ӋVv`*\k @3tʆ٪y;ͮehj e#f> .`+xV`Qxՠ{ٰ۶I=& Tu9hɱ'&q~Y ʀZح5i۞6?'D|F( :e~Tel\%5?G8KtH!̊ [<إRlOv|)3 S9Qf9Ř2ܿkT1QCEAegZ&p*:4(vtz6_v/xyvW"ٍ g/hdϊWxKZd!K,ǼSm ʫc7sO  F,^o]麀h.1#LOHE_yatb߆%#vaH5-'ez9ddbDRT +so|RK^h=<XĴtw"sixV  q8 yUN'!~ż1*9%MoG! ܒ"}ih{7KL 1veh*s, # ܿoS&c^O{b+_t.0 `r{FK*8R۬;iMY3h))ݽޗQ9j_ QɁ^f= 71$I%Q0Jm@2zlh~8fGh W?G{-N`s!zO'b% mVtDѫ=Yy==oyS*UkPCǫ0qfjvgu4ƅcѱ؎Gojg: ,p5Gloe~ |}$3~eq'>ϭR#Q)v8i*2p9\f.c2J\#S ,pɕPom#SD7;ŶC[)"2am} ;.X;lRi ZfBuH:};-JT6Cߌ7~tWD:}nvQᓗ֩WoD OS[5Txd٦S {r\/ɞkz",Xtqs*)w}~cp_OxGTݰ=<$zSN>~,wKkv_ r.fKovc=F_ L TNRMѵ?Q:T;ѩ˃KGnv8uSDWL1E|Jėյu͸h 1!*>y2^sUgk$~3Sim$@:l֊Ld ՇRIS~أzYniy>Ekqr=d \.Ut AF(ֿ',Kf_tn{E䙝w/{e .(IR]SwDM.-gE+(#Q !?;oz%G?UI6 1|_UXy'R$Cr.rH\@Qǒw&O'nJOˠYk*v3`O:O1pc1"ܾ\Tz"4fn0&\?Ns;LOq./O'>dOx*ͦ#vQ[Mo?+Zoi7|R,?E3pTN.}C~}xPaӗcRhM=FEntn $(E2ƊGasXU}dk, |,jITP1e?"f"YuA*'uF`2!'x۸i|L̍|?в~'4e)j/sL<Һ:MxHH4!WVr ͑Jmu\ ]H" d MC/o B+\嫝 {βXW jY$ӷ Zqې"sO>%z ZWH:3/q T3-\Jq$u)o b t9o  Οו>- |@Dxfު{cpA2"M3`1;:DWxP5YFsD&[ko{P9$ѪjK >5Ű*߁#}WiUPHwl\&Vv6%m*QMhCfD?,[qr;.! E'/ =ʦ8P: 0dtL#1I+/;kȑy4ֲ߉#*f[;<Jrx"l/O5T{]+"vYYLc [U$}s=չ{6 IUd* 1fL`=9R ~{o0#TH%zdD`N yv+4MMnLJ|yL$!{SJT(~(g QU<"E{AB#zSFg %MZtCϋқ\a+EkҁI/#fI.MPpIN -ڡޑUpJr (jjǹPOgS-yb^1U"{kJ~ج_d˜*HTӑN8^Nr0Ρݻo-e(Rڔŧav 6\VȍetA<;_}dmt]FݷX7eHD?hV7"MxF JG0G J?@or􅃷Ij9iCJǂ?r:hɈz%h<qsd}g'k ?:.`Sl&a uHdgnt[tJCn]wG8,O(h*V.y1USOʁ<%U[3rɌDD꽂J}bΔZeV(I#\+n=q YҨ#G9UCzڱbvL$66-sZJl0 YՀMSr] [•1yuo9Mfc5[y@, MErsC~|%&ku!yw\=Fb些M#f1 zj> ݈!R01l|?oX'J!~R靸D+\ؿHObT;hB̀E3lz0Gk`  zz߯OL)Sʈ1Ih|k.{Ń 7k5ܒA@2?omiݕF?3G-z6(n6zruPEK51`:  V|/8U쨬KR*1-6T,$ʏj ^A"|pG}0vٜaS`BV9Yz ya嚑NC'L~= ɬ)]LehSXɆ?jmH_`vJ}aۙ? "ec^IHՂS <\T-"}ˆ]fegKyR-B`3X:YgJKw?q}v6 W=.d2WqR޵qS3Š"%=+SdZ7*Zᢅx>)~奁؇(K%zL?MDFB+;͛Mc-&EN0[xn`?JX7~h1W2B, 'IYóO~ƈ$wU$5J$/ƦarL7gG̀Sܿȋi0|^fy* -'Qh eClJQi]JFixrM1 _qHP'Ǡbbo+bix$ob8(*4yK*JnjTR^ûjn:<ƇFR!x% -3l=X{Դ4}@:0 a6 L ;..POH(0@ ׺5}lʐNh?)֝>:+nc{\i4dj^#"Q}]d*SQӬ,T}qHAnD>OoS$$&@AV>J'jS t//jXm#llw,R7\훽o#`d]$bzl)ϛVKubO4bϣ3Y*#"\_oioWwx-n duթM غVp;VHÃbai)}&Vyn(7Gw߆\[f*(QyٶH.ʟqJ9 &,7L,䖂[y2JFFyyqУ0LtMrfwW팙Z)sJ=e"ճă=Tڜp/E}=PI2 [A%TӰUJ}# 'gN\ԛ~sz)E`hHSV5^sQJ9Gȓ^{wL V$eڃ-ɟWo%!ǍA#t"Zi BJqчF+ .XL.LW ӎ˿Ȁˍ-|aw2z7LfE~A^FvcxsR@-0D* Po Xc<ݷl(-h0m ~0dR8YU`۠f_r1䓱U~Ԝ*7 x'(",_Af[!/DE2J'ݿdͬŠ Yo C}MϳWX$>Dm pdě/vCoz _sISJn>uYr;]`!1$>13fv:[}>/XhIJer4FSdv,Ri ElUƓs}" b&]{t|ϖ8%Sx9pQ苕Gaϒ]ƈ]IU1Ze!s;P7ٯPͱ f,{דU)7dpdFF@&TŚ%מ=JJ^Z\`pì@Hʙ60s4(tH?:b 굆d"_VvqMdRd^L>ZɎ+oU,%:!]eLI-z颕P$ݐZCPnpZf],sQ2ƱL\)RcՌp [ɾB`B"D{Pih5- ;M? )sV ,l ^.!:Z q2؞R2-maQ 6/yblXȭUzJIQ "Vg-ah_6'c"^Ҙ=Y&FM4?4*Ǫ <Ōk('F};p"ԚOy'%&Wm'([ ˜&_;G4?{MHZ󙔈4-p<,d3:*}OHj|֧3Z;$دWC,^K 5˟`HyvʉeUl6;s`9][JXn i+-Ls|H2Og3{Ȧw hmwROr63bn)4%L- ^'YbaioSAپry@+2ĩ`[kįTJurnk{4ȧ. t\ z >7"?ij9>C&q^^<\_'یyS_021b#(.ܾs ?!X|]YlaJ| ^vxJ͇7}J͝SY2|,w}gZ`/Eh<\m4tɔ%snYE艭e 羹# 1yVZ=UHUBGVz1 4 ؾb CbGGIhxFX\l3žG>#=W~5Ȩ%rHnS &O҆)qs@ .>j'D%fAdK {-yAQvy )!)Mba=U, ЂRZv_߅DT ej#IKllx4 dfJ]8D wNf=\;8jÜh`؁_"VYՠW\RG7 m2q64F@\I6ZsL2Mz|DG.ūzcDO TN샳u2fRJ  *(^C @o!ʀ!&'=?s_PFm+DNB,I_$D Qi/rЩum>qIT,&p2!T% |Uc=ؾ^)Qi!L%BMlԋz<&rOzlJzsZn!0|AX{ O0me}71+P_LiW>bn)&9'<37Ak4#_7l |ظ="܂'oŸ:mj;yUU~c@pKDP(nblFϩ#d@6eVҊhTttjC}bW-R=4$51@Y i%xCx~jXKP5#)ft`[]lr޻Ҝ(Y)CHtm%)߰A>w41͑w~"֚n>|)caTxn禈$cly& Nx!"s+lvB cUi w8J[y+trRSv5zGKR3wBv^?AeW>`5sǾXjO:71gFSbsQIvPB3_V&w b3)z9*R};?)_2|ЂE t^/QzYB8eHOf LDѶ 7KoKS^շ G܂+\gq6&hs-3`mspT P[r7͉_+sQi͓ۗ&Lj 1(E.dY Ό'VA ,KY:? I(S)es*4ٳv)( ޤ7Wm*Gmt ~~2!,uj(Ua[5;:~_)֭x4YSUgi 'VwɌuG9*-[Zv%I\FFDhY?}ʤn#W< }5ߟtiU!&',!2fiMs^Xy Ȅiݾmsbfܵ.+)_EKo g%MR 3MTzTKY,G2^Q ǣD/1rP2'NbGK&eo?pq)r}/~e}Oеu'u7wU h* H}t9F^R.UrM)9rA]C6+#2ܒ6(3ϝ->3VV<@1bwdQ`8ݼSGoUgjq}5F7wYne=hg>:QvNn!} B9dqspI&C7?`8sԷܴ_0S#\6o\'uk;Xt1AqgKNE25h(Gl\vJ;n  2OGwSHcPp8jf)B]8+ 5t`r('jܚ|,tﴔƟczZrmB2J;Dє]؊ҫxk'C@rKC4 LK%LumѨ҆ۼP '&mz{1F(To[7+>.#g35wI;AY"oRE>K@dua2AܝyyL#B,P | 9kWNE9@dC,&*{?g&}ov!=Lj#J՜XbUb"5nuo y`n93(Aq( Pt:Njt-xwLȨe&}XLfsK's?+F ՛1x %.#&psC@ ({#3Qaen<1pd)*rxaUzq_]bsF__;="m2xAPy5b Q!ق|MV q)rL=W岃sH %ѦpL(*R]Ρ3fXc |><'sD^rò>iې"Ҕ&#fp O7GUW 0wWt/۵mJ߲CIOG hn=ƢὭ๚OdK8A)K};6Lڗ[ ) H"H8նXx)YA ٹ5NNaIb2`HU [?2ؤeqO =e*-M#4`[ϫ%V=+(ݫ[EyE$erקSބv\|TAK~1 ',q/i` =zyIt(gP/T;9akd 26Mq kK{PsӐ!;񆝓Iћtf= X:CMxd|{Zw!:q蛎MI%ى_Y'^e,v:Mu>FwY=n;<=pDMr_߿_[ yko}}4}|bO;Z?NKo}$Ќ㷕ɩƎ]'.:JX*\:$|<а İ=t+'9UˣUe^|cT@Οܾ䜾g%& zxk,ACN&"_z2E=I|@xjLR]3 Ά!N@⚢>XCc130,_uQ2M~MoИ2Iҗ~/'rzNw8bXi (Q`tcN> L~,?e_B&vI^Mpe?\j\}C? Zs& g+\vp:L02Y+'K"$OweF5:iAr)'R:P;L΢nL yxӖO, ~<鑔葈?7·&șdno[[pJ;ynai[R-Zq& gDEi HTo+P` w|HHب NebK!&vg蜂O39և F%KX]-'io0cPUNT{H/㵴+n<OcY$;,P"7d䟶IBƉ-2b}`jZ/МvNa+lF$ 6?k7V"YpNi69å<]~εO|qHp"B^5|y`O],gVM5?5M Ҧm1I(Y䠑1y  :B`9Dri$D Vopi~I,/ljFSLeJ#n:`y媛C"N`0$)74~o .: ds(Da ǟou`Z6fŃ~[߳h,J젿/9HO_䫷q`JZfU ΧݾTJPLJ#`}~T3  3IGHT``d^sI.J/f'Lޛ ɈwdԸ4שm+0W 3WWKo[?&::*=9IU7«X0}&H{N+Pb 6m]ke=i5jp8W@;aePZ?»0"zr7yacJB E*kuU>6VUsE+VשU,:Ve&=8*4BҀ2$9fDy uoka7~ɇDb*K/O).7HSYj!A]|G?i/~XP?+KuQz-K's{1Z(8_R$Y#PZ.7oתYNy.bq"t4 <&%"q?q`o.ZuJpj2QL欔,#)1Ѝj箑m;|J$ebsD7kI˴$ABq蔛*BBdFe+ ڼb:| G`R^ۈJmtp!!'g/տ)-GIB1w,!fC%0R|;%1-Ֆ \D)5Di>KMԝa-B;\72`I+EOV}DQ̲{ӂw9@62ܠ"QlQ8NDOhO~ؠPRwIlݎ@NӣGں*ω]s UN[|o)h O];u 1 2`oݷPܨs iڒ]q}+kKn/~0,ߺ{=/p {yeƠ`Vל_Z6rנ>xRBiC-$3G;uf#5т$iY;x~0]tp'EOHQ%p3ߦ\[\B8f0^Mݝ>/d%{oeEYӟ%C 9?h4qsEϣ)~wexN{]~KlX0Ph/: {Yb,~ RCSl+ h-8z ~<0<{^ ] AXw[bĜPϪ,k HHO2T2ꣳj}n~+׊r 2[Z͜=NGuZ (Itpro]#ksga%Unj@5_+jY̞X9`[;RbQw>;&jhr]~!?]}~2i}^~2B9z~O$>!Le;< . pș7UԋϻFgyQTEe}qS ;+;ҷ- qnpnBvM0b.9"rݳ.9x0b^ɔ?$Ҥo ]Oq؆[KDсCS{.$)%J p+ /Gx϶b/c=ZMk(g`6/pS'$⫊WUa8NVF0wHs_t^9-ނFelR`R+K-G@J~TCo(ivZ9Ua؃V&1Or2ǀksޜ82$b)V4f OtϭGNIbs{v̔J9‡{nw ] >Ay:1D[u.:'pecȥXpb3zN6q(Hת q/kI2ng0ѰHQB\"gm݉h 9%)zX~^HmRm'BxT͓ڬ*6!l=GU} HU=#_P[Fΰu( "ЬGq"jyFd"pv9q@ "~ ?G*o!3(θj>QKRм%>P 3S}=P7CӺXnu ~ ;:PM-/qCH.Y&0::ѠNܛ?EĦ.;@#CHn ȥe K@1rDs\PWܜ`4z QݘIKI Y %50aX_JS%hC_+;zac-;'/eKSF3%ݔ#nOwzؗzuL> c8 a5zU+TK^}X iSaBb#t *L51%zL$xQZPuHK.,' TAkBb>Ѻ5 Gҝ o򣐶5^daY/xVh.w}kWUrOɴ$j|u Crmi=/d1SB:.OmS+zԭv;$, ^:T4/+sjBkL6[ L( FVHŦ{z ICqY~Z@k/dDm&95UؼycT!F!D2ojhmP5IdXPmLӆ=X}Qg:нG%-BK:JL] tas5>HUxub''ģ p)5?A `YF5+^,r$Y/2CWU 9yu*7ZH𡖥}c5睹#Z:v;MϋA3o]6H~U땟?֟)E9ӥ&s)K[t&};ߥg{4]2|+;Ϊ-fz+Di+ d9C Wjr.R]f- j!J7Zv#2;53BU܎Px]O88 Hr0,cj-MҠr^pK] P8qrEL|m[iyHV:Hj~ pwC9|X.bBWCLG{Њ(ˉXdcB>zv;zQ 2JQ%Bo``8nZ5F y=?4ES[tn=5^Xިo]t0gHw58M0$Ri]gme"yfa9xqoBk/Prju}T^LLĂUёNYg|"`SٯP:vMVLgvҔ RQ_=bJ\p?eN\<8PI.͛`N_Ph3Ջ]l\1RTF zv22[ܖE)+?hӪBXKa;IFh̞Fuj <#r:W`qk³@P_DZր~JɈXI ,pΥ? ;G o9(Ei.p󄡳;<HDҟ&KH|U0W? P `Ğ$F֎^mn:N𢘨SYKt^CTVncYmVeAvpgMh}5NX?{ Gb˳Rb.U`mcXeMOEUC 'j})'wYTHO$E 9|DdCr^%⊛ ݞ' 1Kxa^gajr^IxF7 k\2y9gzJd3qƱyЧP4>ӭ3%ll'ң!px+p+|9v2K(tc+HZ87 d~U G}!~+U(i14Ezu#|o^)]rUtuUk4S`i3׿U`UUM='<%‡^>45}s;ؠ܂qkѹV2q, kkFe>$$LQ(U¬>94/Vf ߉GN¯\" | ņYz.Tdu|[ZROꖺ@OX6Dfl1Eixc9XTY̒46+QN$˒]+A67GeD[<*c|Ûj*{@26# hƜN~܄sR[=Bb<)q($d87jY\_ݹPtHԬy" VBS'rf zKߠ1pmE[(Em?L@7)y(`Wh>5b>|/7 hV(-ΠH&4'k"Fx^+f Cs KPfFҨ6l'8m"=:&A&@M 7||~]SQ@,M (H|z(69TF(MJYT-V?\,mwTYm\ BE[>ԛBXD FQ/z󇪛W6|n\+gW Eud7󃙪k.Q,p{%Q~fON <(Kebn*OX.E!4xDN3N9#pl)ExZ0U;7L~pä*Hy•)A uC32/"=v;ƕeJIjJzϘ-N0^%Y ?~jP{c8O%S6,Xq:eFWhi/fս^zr6s^g4%(wxZxiAv#k tFsl4I_o 7O_M~`Tu06mTщW>K=?<_;9}nS}LkƄ Cf7 +}wᮒ87!X)7~}<~_8:l20^t 8]UQ6ܓ?%]qocy=6bC rhqJrj] p?lQī,CWGZwbDd2FArk2N8eˏ4p%&_zi4ST/< R,S\:a-0n7!Ppco%.X1imGCVZVCCg:}usU@t,R [v]@Mg'vm? bc%N8>e3hI [=*K*quzH뷇輰Bm5MjpW={5))$U{F^/|77*e4rEm -1iسh胛?ʯ RPNOs%PF%{K<"|?*겕M kU](e;zi/hFnBj7dPR;=teL5/G230۹;D'4U-Q?]"K2Ps!ij <&;-h\H߆Rxn6U:9DZSdoz zQ|7&N{cZewj儽uT3_Pe4[HsJ0,ų6"ae~X›LMbOkG͙yatWjHx#EHt1ԉ].J;u=ZD@6KRL."p:\8<+-r[&Eߛ˒c+gnũ2QNDlڪrYBr7^Hաށ%,F(k]k炓`hUy)NNaa|НDDG2t|?>OBHeFJ?iQZ6d`= [ Gm9;d3|iU͒mR܉6Q9+гkYI~bԬឋ0G5xayxCL2\PH$J>xۊ+>e-,lڧF 5ofe)9|J̜f$DFseQN1*-L⾆.m#~蘒r6~$(}ek0[hhB%}VjGC~9صmO+C\PR[0đf3; Hfc!DXO!jGu"S<>@b1Aca\eYa=m۵t{VIe*LHufn=$z?8fVo\y- g_+&,l= `4+Tf{gB4sbMiner\բe&i8+liL_kFK$vY} ixZr=P Ci_8- '9B iƂV(a+ޜ _EOtS}֤wI~-7MS(;Fo\i;F? AMB I֕>0ݾ7Wt[>9kIf[,>0DϡrYCK%峠)O}k й8h4PaAKS&SAiN. rl4 Հ/1hFQpll[J8g2Wơ@/>(/qVqOwzU!‡K4[73 m~}Ev։jaoȾap7;j9~C;@^F'gS0Q`n>K_F,`MgzքtO kjC}^S~b=œH.L[|נ-es p*`錿#ʂnk v/yOكʢX:2M튦i I SK&2f7P ˔ڊ3>8ngs{oaoqmgƚ;qV˝yg 눹MyDīr =>v5OMI! oHeRc_sT ?zj^+'9]q4N?iiҶ]GpHgoơŋ%ׯD}qGdvd21"WNBYkomxbZ@ƱmqPY:E ]: qAzXbő׿th\y48yé%U0/ !Kh=Zj9D3ӽ~{$ỳ8i?zU3~XSXcG\W#{'U.uNzZN&Ǡ'&WD@#t׫:/+*4y?vq2yD#ovߵ<81 QM>kR2sƉ Fsu<6覃63FTT;yL1]5Yѹ0'}ښP1HO,Iv2۸ ~+iSY7Gx?,̈́)aKL^E D 1bX XLMm$iD[az-w}V$m3;P5Zޜc,'S囁3["rzWF|/͒b֗GZ5Ip%pe ZӜ9\ZG(c!d$UɠD:tMtcAk5a[JsUz'bLy̝羙j$iqDk2^ aʅDwup@LjaR`'eQ4QVz\`DL0'#nvnwPv9*97:u%$A/X>\iqv򄞫3ew45`} SӢ6bCq\5 aiѬj1$4zKqQivٓJw#==Tmې=4؇;RmvhߏTlHP@g8ˑG\ ;G 7$GwmkWD薧ND~mUG;2|tOz?<}dh9?3_yxNy&`ǵ`_ 3 qᇜ, ]\Ŕs%43FPTR%0H v,+L>_d!uVd&0%[w穐r꾲; d{}+* Rwr;cM@(hl*`9y,畺_o*EA%T B\vP •U6٘څE|5$t6l ryz=1jFpZ,VEfPߝhNF \V=W=1 (/Rt3/7'؛Fȭk,ϟ88%&;UC|#5i-pӓm_*Ȥ}u>ȹ](xE6 Ȧ"no9啪8<‚hr%܈iE% T",Xf]d`G5(CY$u)fgGļ[սX3]gA9)7mrٹ'ʵ^:u,%fGII/R"B)(/:aL!"/<5a@M)/1SnM.~NoI1<ܬQ)NU'A.~ozB5 WyEHڷ2vՙaR?P 3[A檲RZQܿ1šaFضᄒSP2++藞Z.ǝHO<<ml halb@|OUg;b>?&fW#\tױҹĤ[:P`UQ .3?GT̊86Eժy,4yK9=fJ 3L`qɮ7Z k&7NKtGӄ _2׶.IIz'I4k)< 9DžPu\9f'dV]\pZ_7>4-=R;|t}S[c1ےhr#β]SQaKoϳEn3]2ABrˣ OQ@҅t^<Y+Qr<:[:?."5G߽@aj6e+H (:ן&!{T6Ep">Mu ̪f5{QRƺ Dx[e'(kd@!O3W˸+A()^3Ԭ}ch7;?x*IA ud;UOk BT'fR Gx~lDf;ⲫ/f!EE+ŷrbT63sVF@QPV?uoϨcZPƭ`K^Bov'zR\uthq;e `Ux.F<zz'53JX5 ZˆԍDs- Q//h}| w}3|:h@Sa7:'wDK<E#j& wac6P&!{#) wĭ~@yjz\I$=3>]N-AJZǸ0.v[?Cݫ"^@׭ј#no gK a0Ie l]6oCnHo=6=AͬQqhIyM)5[tf)]* #6aɧZE] O\>āAn:g/пpxHJ<QqP xs +1`#3zgo%Pt1?0_<-G7KH/UyJd]IeD@Y*2/:6^p`L&!v}R?V ԉnU>2x/TD鵰Y%+KhBㆶvX|Bl%!rjkC #]@̊9ۛlFoN .%&'8ү [x2U61^9 \ 'fiOyKt[ Cڮav5E!=vaՁqC.„.5)׼j'nlSmiZ2#Sa\#rZyvɏz-GԽ@\2$S1=K`pPBi |qH6}`!-ZZT0"Skš-O=:߭mLSjfGn6"qׅ| =3T V SSqX~+<+v\@vʒr{ZpQ*`1*ּM! gqɰ̈o0SNlї8ZT{ Uggs]z($ `9$彿]N'A.xe9ʒD/^({ `Ik͸AV̍iLJiRhkl*uސ"ͩ@PҾC{Gwn+qVff8/d@Kxƥ!ȘTT?w 6א0í(WbJ$\NkG:l5V8ɚF}%m>x_OiS$i\Fn-OFab%F&Ħ÷'.V#|! }y3,~h?=wnBi GVXwvIoWe̢nd!;=a@u15|l4[еC4Ï׉|'x՜=Ӯz 1N*0O5@G"8R4KRK$bV6oTj3) nѬ;5"kUZ` ~L5X$:?lr&1kQ+ Un 痑Ri[!uk5ls']FtN@x+^M]2hcqo m&އ9+6.hݵY+m^X8$phP 2L\ ^g*7:3}@ e7;ni" ܬh 4FӮS \(<3H||4|8;4Hi1+_REg&fmZc>$tj=^F8[m:bׇaIj Jl$K}%{h}H3l@r5X7y' *63 hSv9B:]a\32@#Rf`LΚ\#S%yQDHTlr}8,}3 +ɣ+Wyxx^m1meI5p\qPEH;<-4dͣzP4H = t]-|l>S_0`'>CJ.S>3L]Z1;f) V#~crA9ݼ:hBx%]cF.k MpkŸ-ٺlt $ Xkt͉< &ڙԗ'~s6,PDco!Ʃd9iݓKIS]FPHihS$Zޱ*t@㙻؇4w2B9&;7uȭVgiԱFUuGepi-, Y> ٦d7VZ81A3Xt+sffu Ћ0]Vi}/箕% ;mɘ:1UiGIyG|BX{#:#՚}[kأSwX T b&}ȔsTn߁&1)M31atJ~[]UN~B1s2uyk7ucB\?潮9DxmXC@%MzY*߻9r-p]TQCVH`-fCgu8 i<2.vڲ2i9gWu%d b|W&-Iai.OjZu\z<@8CQȢ/B?rc9MEI ҫGji)xm{; -/;C.aj(GL9&Wവ ;LOG$q!u|ޔ2fK\s0z|2/e2|(; $l=a))THt ܿ*q-\ۏЕ+'㻡mLh(+_^?跂ąw=p aU7ӊk +q8ޚWWY׆_bt;F2Vnډ,?)qU؇ޏyҊ]Ƒm?HpBM,#)w;ad~E :_#|k%'I׼tX!u o~YΤbSLI(|òFI̕O5{c!`pQi2 8 rYjYsșfN./Ӷ-˴06Ԟۯ+=j`xKGKfӒ߅c oNjF4xF:m9v?_A5[`;+8bRN?3k:C%di$vӂ mY*Loo"vaZYreqaգ.%ZJDi5[)d5/LNPo캉V)ɐHlPАχ٠X.?_"m$|wyf.'kf;@n)UyV5u&^]'yqrQVu8| vFpNcua7?mpѳZHbXoNzULa ^CuYo}q.GF&X{~m)#B;,J\ n?,d}CԄٌyFѝ82N`ʸJ + 'Clw؄Ruey3 )<r ?j;1/)P U.ȺJP0X]^2 k0׶h/YYEMiRoW926.ߒP.ZhUm9lM`Ľy@XBOjnN5^{B0{B2FM(YwqhMd53w#edOo99Ag֧iQPVOTe,Y ^Ei1ue:gA@8O%!ꃭgaE 0JY,Fk]ou.PTߣ'=U$üCb5룁 Ee`E( %0NZ-qRc{⶧!f R{mZ@oI_ɽ' 3伥F͕c˾V@I&\&#yp1;2%{5Y, )ȓxRb፣Adtkr^ÎϋRmԋ[]&~Ѕ+ @{}l9a<ȍe@aTevo+E!#w-XGy)4O}SK,rf=FA3r#XIV~?w0&x<Iy5ި.A2Za\pL6,y{70@bipw̌T0i3Oh&+уlk hlm8OpλG9@$$R9LwW$٩;׺TQ۰FoAOvP=Ɖ,85N u]@ĿZJ{rDyw+đa0N .Q;~NE3yq\N+#~Fi(#0t JTlEq{qi=.iaXGtY*|ubXuiOcsY`_2mc0UzF6_AfLNWΥ#Ik9 :$c>,ЈVAdJAnV"掵G,"MBgs:RY`MˏQ`}%F9eU/;aNzD4k7\>Qy':Ley!ViB,r~`Pߢ[rƢdY,,J0S9.|jpsVukb'PJ.@hg~4mŵeCWx_2LPH_J*V "Z8VJX׏tֆFK鐲؍e{#Mu'>)꧗'3S\xQx)|?OCѻ՜@~ p1ޙ|yPfZ wG-6_hIE;eo ПG#{a.+ $ eiz XN72qPZ:,wƁo׵INIX3 _P+kX00b.SEnQEf 0ϬU3Nhˆ'aE&^k@˽UG4WX1 XN )̎T TpHFBAŀf cv)WI#2APfNZkV#EEޒ:N7O JTP^4|1&%<'.*Մ!U4XR׆@o%@z#mQ'R)ܻ=Hc+rU 'NR%eNSH_uKZ$mitm^~xkKUQo^sRx-/jw t+PLzs|`hQUC%"vJO!2_%/[* =5C>226Oy-=m΁Vq8iU C(@!eF 8%plYL׍?o¹O|~jj;1k!e]\$[AE܀s!g N]`9_u w`Z#%}ϧ04kgNFKyuw^diɍIeotre QY]֋ί2hWe(JmdUyOLm"D4@ɘ}S(Q$X+NG"doSZainrp/pů=V]vX@:7C ns46rD.NGR+]x5e(?>vf".K |3h~5]ON~cs7 aK ~DCw2h+d2nCWI'T7~cg([(RE>֑ϣ 9rc=,ʾo&).ҎO!Mވ1Ѓ7?mp}Oh?}] +瘲ojp!!` R&ZEed&g:MGgl>zY5r]2G;-IﻐGQu3^XJ$blҨoJ-034P`\.g_t=L /Wv`eN {hQ8XʒJv(T׶G~_0RYӿv֘{H؍ (ПbЬvx'HN;F0(I=4D{&0 Aj`Sj3K*Cau6pɩ;br0nH;+wx\ tA͔b<ySoI#+kI]r9=8- I';~3S}.Y_ۨAs;̳ʝ/6k= Ō=!0j,hIK`c>OjQ}GB%E Ny2GwB@f<~,BezFaI}Jf&sw"~tcě )%`+ht!@p ÔL@ UvqA["?|uUY_W7H95u<RF R$K[H. ÿ͗2IR0\}ɏO(m]^<)]EJ{LD<&B"5،(9o7a4^ I89I`yx ,r$Xw31̑UYtVtqڹȁYDc!(]!(򟣊!d/dHeм: "sQLpJ_)V 0R)Ws-~e%`X]þe!̄SjM6hش*ɦ)oڦ?+aGHwL=>jǤۥOI[F֫t-<`+H4L$u*<0ꢢsg+̞@.3dɄ <[dNӶz98ZABb  R0 Ŕ&VhM:3eX@)iXw~atI\/Y}\%;q*z9fX.qrSg G *pq(T˜}s83++&8PiaaLpM1"4f(l x}[Y YcYKnU0ɹq96Oգޔ.6JRQ{wf>.WzXGFSLtw3翗q'2gH>瘼Wa,`.]-*UT9r3*ũ=*]Ibaᵶ?+^YݝգN`xygzB񼻇VEX4vGϚdrKb9(|Gi <,Ϧ 0gSzM jh" _?~gdf_ ,p|i3 NbxgYwU1,؂D4Q]zS?N>mgbE6wq ǀQt  1$4X $ͤtUR 4gq,U5Pvq{:m(;EFU˃ldANP#̸O \}HX%P t)t2:ZWuSq$'OCp@kWpYo%Ԥkk- TtwpRC]19ҪFCs]Ve hjwߗqV*8e`=#X -lot5"eA?_\$% ,-05yG ~_&*Nie]Clm2t?/J#@}FQ3P+}lܪZJ*) ,`]#'8X#>㈖Q OTXA0"-Kgy1v;pC;bUV,+[M^fPqW̐HoZ++u_\τdC ¥毾dgbD5Jbr{-oc$` V?9W;b5}EFn\є!=.eH+sZT#јvPc@_ CWndqNj\$V F&͹²a&nT\Xr<vyy:>E@HI)Յ}2E#؁! VkYmKi g){t!~?zb_7ܞa3u/-Oy_T_KgɚW;Ѝ}yr WCـ-۬l^5|iٓQ7_\W %u^Uk+6g*_bM/oj~e f6'2~Hţu@n,\|_IvMC/}`2Bolu+wgiqE3Vՠj0Fv .5fSK{_<<@ O#ޢ%׊f*B01Ϡc0#b !Lb1|G'Si^B`PFa;R" D :^u, ϟ{vd==@*ܹxylodpw;佉Hᚦ">ԥ2RNF{bcM]L4Zi%+y"_s"`_e"(Ўy>KQorA ^$8/hPg aL UM"ȞGV5 iBKN\6>#ѼJ4ZYςѕITeĮ5Eάtfc. =ȃ~&\-~9psy5m(py+nvtTrjWQA.xÍ؞|rP?|1q,(WQðܣǿ|>i^y6Ѿ %"!e\~z]O^H˝<? 6|2:>: E\Aټ6ZMV [~fܒ5UjSJĦ.%<6$;^ϣZeֆ}.XS4qRC/%p{*Rd>O%`5=,] :i|807X5sSac^{q*>ʿGPa C2*0(VxЃrg+[FݭZ]]Ai+pN5nUʷvI|s>[䋒1u,~l`.K_k_)(&IT+( L%+12o𠎀W A YD2$H#ctaܮq|&L8YW%ISTOPe^[hd8zcB'~Vj]h(gWrk=A>kV#YxL{Y`q럸Xݝy cǘ4d;=96΍;}qZP+-x$ Ikރh@ {p/Njgxt])H2v-@lh]T,A+|MUÊɾ?EqdkdD.O"hPzm VWb'ŻyM{؀{ʼnFb6ژxIA6 f_6:>qIT.Yc s 9&xU>b%MKq$@|([M49ĜXiry.N ~VTT׌mp&Hon:TmF|ys$'ZB"nQ@#ti5 5rAo޺PΙӧx ށ05v*JGp s57N>ufIy'a%DΰsKgxyy*;(;4&"BޕEugNgAF Ʃ"y/s &k&FyƟY\ckfuԐ4V;q{\gUж>c'Qa|82V1nEO 13T/XChF~;M\Hb,8`Փ֡VbOjԌ뛈ȍ!f3Y7:}Ѫ5]Z!!_GLڌiy sj]Nt{ 0Aጏj XiKQ`[䁼 :x@e(8bJ%lUᲷpI 8* %|I ^<~щɺ֝ϰ<?X:1>lx`1j2.SQ-0,Cq2q ED|C~hRcf3;wJl/lVC=lþ$U 1dL|OB\6ПٱԾ`5籥|-oqp7W{.ε3@}j(788) +Ly L4QIcRpٽ\< |Q^hC}T N,SOŷg_%bUoǂkR$&Qָ*;x$QR~~hKGЇdu,}8@:qP6t"уf/Vv YPnvJ=b!1vګsaF;_!k(-cyOS2 G>YG Ů1w>`կ}]ct&.SO3@iH̡>@[ *-({*B"P`{[|HTMgo:V.#:p8FWoU7ۭT>u7ÏA'%y:)hYVϥFw9 di،iÓŀP H~ Vt'!n]}RCt_vC=+m.ײnP`].͛1Jb| *sy\ڄ0VE /OpwsJ9n?o !P}5~H]%tJ.r25k앫JO+7>5M~'wV ʁ6Otr}!%,w˺Ð\uf3rQK[駎VT n|_ʐ\}TgQUzaOBz-zΨ/v t (okq#}F|2o`6]5wK1)?gY OI{vyv=1gӋ+c 1ױ4xB"M7[D{\iYx3{m`3 c~=|{MRbLZ[nؤkHKM{K(ʒ7^Q<( v![Yw*SqϰEjwP8mR!QTL(pGZQ=Q6+Kj7 |9׹ $xUfq'&KLiZD:]媨d(6Gz~ESC^s)2Qguؙfw*C<FńTD,))R@|9A[~wuJ%Do0FӢr}ߔ׶s Ao;;qC *G%P?`` PXpr#oβZo>9Iǎ2*{C1hy_8|$%<'#y@UiRab5Q.qX3Qt'}1uz8ILK@b{MW!&\4dzczMh[8f-vazYZ{,-E I~%g0S[u _axGh?_RUl Z_lZ42(W- ~EL8dK/iǑRx}[!Oۤ;5Iud/{5Ѧ3_5b^-03ʋLcD&d&vLjf H,lCEJ[%2c 9Fn9+S AsKIXS%^os.^!`dt'xV;ARǔ60a2L} N{҅LQ "F [+רڒz r"{V#׫aḫu(qqe| -@,}7Pr֨Nl. IΩIR^IPqNg$~^G u_N_By+ӫ=ٸ=*E=(\9Zd/ä5xf.lbI#l_2E[ud)jjaJ.t{f:U뼍nٽK ڥi'7}*V$vc2v`VYF5>/Ƴ;cdܭ,Wu+aTtCC-7nmpԱ MlȉW @\unA4߻ovrwgE\{&O.G xaןM}G)2bA4TfyąXvQvէRΝv2/+1[oڟxĤ"}$o2.0-IpJ- axЙSCfLXZφA#NC##*;&Y/ʻH.Fu։D; vWׂ0gODȸxqT[1if< CԻ1=:ݜ 3ю-Ӟ}ѱP I0T_5n.LSVr[ <A39,2"VDbg0\ D'( %2)p<_"d_a5[X>~1[Ŕ946oR?w+"Ϳ)u4|*ºd8pV5Ӏ[MA 7Qꪆu/**94:4lXXbZil*^ޠ -<}:E>|L#bHL6!VV>pemV]MӴtYLO kv6a쾆:XGSTVo~@;d( {6ʗ4PۏPB k'tkF 52'9Y=לUB~d$r(wl hOIO7C0ֱ dq/d$*,KAE>(lwM6 /`jW 0e'p>x"eI!3G4x=ɪ~T{hzAC[͆k) 'N>Pch1, c~D,Yޔ])$RpBٷq$zCLףt*v[^}T/{) 422Ƿ\4[?.SukH}H¬vSM_^fW%<_lȊ=E bJ' ~NZ-M V?kSd@̭!DtQ~80LKj"YM2hXµIg-VW)C#kcΠh>-U. ە媻Ol=IR5q}>wxXÑ6alv(k{] ǠdL{ M:VzPlxCr"P-BwIAVkp_I9}..]s8? -bդK -WtӨ4.k{sкTmpfhSm<"=dN>qsN#9ҿwMiOV+u1.j +XYq}3VrzR7WU iwgohiNN "]Z~UeZ &mW "}7[9'Ndj. :^P|Q)U(Lwf)y@c^ἌFy"(sB+/*hYփFd<=g\0g$AN*0,ͷE#\ gGYn4zZ]lx_4_yφ@uVe<_ͺ[(eמM%x ^TD> h+^_NS SdyFYaBBgׁ'} 01ԁ/B wEaΏ1.EX&Xh*,'';I7VFyK` >=, P{m|Sj!wj-{-_N&mwmG u(@f],G, )=nao_d/e{2}62\7!"eB Ǎػ,=t҅6rKD 9R? <"Quܬڲ`00OhSLEGT+ND(zMnh% ^ܚ됋}TdfOݗG}Mz(eԷ5:!?9SD p#g!ת炪" ٓZo4CZyKmҀ4*YMIt+A z?VՖ6 a)jO'Ty)T5A na"bӌ8ٔj\][wNAi X"'~Hi-ٻЧ6npYz&alUմf3}4GlN󬆱k`?$Y 43>7z;^_z-&XzeQsp/e2AH^fU/)UΡ9g+U*O^x{5~G 56K9HWL9'=T<ϐu9E/o*Ufpjʣq5('|8"^-:G o(6Rf6;SrEф_Ws _!=i(otR=(3g6.Řh0l+taQ2#I+|<ǧDP$EazKt;7zmEr8eY5= OECU:-J]-X&Q$J_'_8?oNn>-?8W|?>voۯ2>7R 0ctD D`0LD@0D` ̐0`X`r F fɀA2#Ȟ0d 3  fDaA D3a#0`& @FD`F D Fd"&a@%``  C`&0"*@L 0@0@(0Ff@#6 2``` Daȁ#0aC$ ` "`0` )st#"iM_N{1w0†GncO3g^"=gF+)cm3W@^ sBY`7$nGpMnAiyTaهKcک 71(J$%)K*ˤlU}%jѳ{XM>yrǬ- A["?c P~|g|V 3[Jq05&G 1oYu_~yu!Y)Gת|Ғ%-d6J=5CZ)P[$סY>eGW[1YDh35KZ B= {ƭФQNrےrL - nJf򻅌Ih ^EҌjI`vLzE`::58 4/%УaД({l$%ty B='}0>WXp 81Flvu[=ej]mdcBfjiV 3$oky6Lt{Jxޗc"hR%Lm̈f^L#m\}@5(o ~".6#| >;ehm6R/Bmk`'<5Q "liCUF,UL`H;˸8vso>wt4fOPy~\CFi^p5-d ܬ&S[B>Zә SH[VZO$hǛ='2qӐvM%V lR6PM>@w%PY>t s3̢ W~ FΜCk]xHu`ޱz٣su#(D=.JۄO^ 8r UjA|uzMt"z0ue 3OItsDY\N 2{ݲ|ɳѽĐs],K2E3|W }Pݏ3}-M2qt(>W"us+L~J .fU*$>mmY#hw H:DҺi6))4CJ`|i!Xg-e`&⩍Iߊz¸Sl^o/*Q` ho:o#,#HN@?dQ9BW0̓ GN!'T'e%;!WQ.cZ0d%sss6*cR3{7' oV%\X޹۸XIcGy\.8]DžNO{p1 CdK/t ԾfwJK$L&u-j-PbXB'T v7]vxo)Hr}tB 8?G:Yħy'@Nǥ##_ÙJ;t jh,jܮGr` +`TZ`wB[B">yŷ& |^J>޵`hI 5R O9k"z"ooZ)'T/.dnn4-Ŋlao+-%Pi&[ :|iu.gc4$DH^96)b&e_sEBR@ݻ9~fFݺK]՛a5lc ??mgA֘pf>_|D.N^Ԝ kG]Iw21@ƣ!z Iv-QZ{4[>9N΋fhSycʏSOΛQ5u wڃ {|}Cpbk6CoUX$DSCqQ3B r\ھӦ/ 'ޒCd95(e6iگ7}uF>ijϩ x`Ϲ`'Zl>NOFMD1a\L[m]B-\Q@HLInלH Be%-ʛ$pYkAi'ZX>1mkoj(^yAn$(  GٷuR֓E6VVUGd2pd8jSܝq֪-ݛ|d ))!m1O{Vq҅.QU#rF[kQ_~马:eNxgȻ6s4庴X*(p0;«!y3 YdLy {R6 c'D1uKzz6?27 q\ITԤXhȏ }8gΥxG?;MD)}ȍ,D.}2tL|zK%ҍIñ\D_."0 m8*xXSJ@6=|J'yduE0n~#k;&k[Oi dO0RZ_I+ɳkbfR_Gu0_";S*S}o:gږTI~i\TM>/2G .}+V fߎ?{۟zrH&M!{s #?(7pEU%hs)+vj qZ'\*#c2$Y@6eb]XFT Z^>y>LOxl+ e#їC8hyK ™{0W5Qd[Y#ַA7Q :s, 8Se7g!`7jƙs>h;oCȃFI%喛ҊTh/cwb |uGL%!lGqV>E ncL67h ۟cn$ x>yYk@:Pr X-J{KX:RvaIL+uM2Jp{ΎęnDž]pkPQ LN%49`ѹV'#*b]8J[M>Ǖ;Gl Bk0JR^]ns*q |6cՆ# ;O,;Sx #4ɥ~K:fodϲP>љbv)Q:i l5P7<7hBC.E`rm I5.4O^Q:`nHM߶ծ~(:"[JGo'/Qr9͉KkRQ]Dz*l8?1դѿk[VoYcq V!-Wm-W;%2~v>d%n׷ Y+w05t=^5`siޗ@DbN~ĉ<7* Єkɜq[vh &!wDiMʷ$#Y߶KÌaR M<Ò%=Il*w%ђvZUcn~p_9WLnKGbA jg5v?1 ]^cJ!1fbo^aBCkvfO$Ci]4Җ>wapNu(iJCe E'nkY#4{kuiT[z s)G⴩ڢy\zƺ̘w(&[3hii&(s S-n|M)5qK5'v$y0EZӬ@RU6' ?jP {DgG 6f # 21 Ca`;\hRK*/6/d~Z6c4NOeLK q!T9 7iHU BRqê;$}AӼOn`)0Lpq bG E*ҎqYyO&^Q@iwd:?`(VFrv^p+i쒻rT*8-)Iw Obo10}Z0I $:(tmkX$4G[Q;ta\=ӫa<,jH:x^*}fL=و0ˠX4փȍax*W_-p&5Q= q)&`_H:h *¿ }E? W*Ss4;jB X 6M JyC^~X 7@ gk}0Bu33J&P[0[A gR[å2Gf z1GzobKfoGL_9M2[>(mȂh~ X fBxPoq<5LaN~*P`H'%#i3a6lk;hIM͔TE`Y Es_NRI')חB9R-2rYsXL8~)z}-7?:{m6áhgFVw) :,9K4$nxaMHSaRY84 l7)7-{.\{F,> _c[*W&yS+nciE`^!1*.S瘈 -AأŅMKXx0.O6q c~?Y;Ng [,7@/**ꟴ<iqJfU(ݫ)Kmj#3"_Qf_+QNCr}L!MjUnpH LሦK3 Tf4{gJU0 pr67JKhOg45PYBXjy=*71U Wwnl׉9GOrv >0_$ĺ׵1^MPԹM`:+[m8դ)<>DN2n ]~ƥ} =BfOU7y:uwiN 0-y P)M óAinG!xmڑ,U د;ҕ9Џ ntlyBvG#ztQ7)6j* z:/hA*i9B.E~` DwzB-TbD3ZWggV?%Q$؏OE!6gn3=@' IEm̀罠QEMSWW"@eP{`Funl1tWM=랦Di u5I:aà>Xm\R֑s -5;sD9>_'VɏF5b8fe̗o]t^Umf2ڦ5Z\ӣ7~o7]/Җr P%}b^IB п;d<‡'dחHQEE7UٟA/y}6=1XQNꕄU v!?h9'cjj4`jn%aP=K˶cQAh=9LyYrLd6S($MG)׆S{DžeK8'5:2LjeņLCe-w\T,2 pjw(G-(!*Z]:3U~j9E(E.} pxl? dUȦqɏȠ3]λ TosùiV}%3CȋHL_~<h,Wa"0Ngְ'ά2gHDՇbѫ⡢ΘM4+8SmI\ӧ-Jw%ֽ iϐ5~Xuu_̀zw9bg?ГV{[jG*7jA}oezFPjhi%D~[Q{ఄ<Y(KRp=@ւlA%(2 ][Mֻw;D&d+ b92$H ;6 ]3x.48`Z3 /8ED_vP mGN!lK:":z9}Sp{.\oh#C6&õbVu#%q^I$-6֯Fg&%qG/^:vZS4#xt]2dDkEeyneL7V:X/+6, Ү59!d#g >&G'_Yv0,s犋\XMED^jN:+_V=\p yTẃnNPe8`w1:?sqn]in(P"a6,U3S!˳p[D)@ Y&45VeJƂ;^t3n/}` # ԊM ΁]>#eBq rR'J ֕*8($KWw+ky20 ^gR1V_+xA=X5) 6Ǚ-S,ASU^!&ijq1]b`*9oHZ 3u bQۿbDqrkuaoȿ*0%?D-!ZLy⒓gQ܉ U̶|EmWe7FdgPhcH0ev3}o~i}IvӢs] A.E 5F7|؋l-Q}r!~eȏ Br6!aZg;F|e)l֓uq<|XĕG+_N熤O;_X2P>os+˃xS^n|rJmtFJZc!v'e-q57.;]BX rfnӦ嚇͂UXA A( !>n@T<"5݃M[fM ި#G ȿ}>SYr-+Ѱ~^M5LLuF O8=55ĵ$ /-L> EI%[>V+[(L~okc*UO[{'x3kkܬvSW>N|3FʻBY ,f0Y1u:/ʧl Byb~ԋ N*Hh Wr5ujm ZLFXbٙV{!)-]Ѯ ܃>ڟD_1>IʵejQc|'}yj6p9㉙6Ab.&fLqS?&-׉͐xj9?5Bu݉[k$ux4#d !8g}G'epZn^9<Ī0uL@j%wowGM2%e,9j7 T5mנ4'ޞ'9%2.Y`=VFH5oImzt.]v`/5BR2(?."9 T%9b&1ak#e;R}_ZYw-qP)ˤZBJQLxtLiqB ܽA< +n9~ l X@,Ÿ5:,jYGGw?y\yfW׀Yc䲵\IBZWb%ug؋[͖8`/-:uHǥF+?(tn<Ge5z͍֧v $zI>tXˢ__6YNc3H5MGv|dD֕1h򯴱Au àA:ÅNXΑ;P4"^ʁ(!,{2 4Ύ.c eL lm~{j*ZUHBʊ B=WUMzzB򀖩{+qxtbiri xQbSǛ-f4ӈn]X5Q"hњx[9S#E*__AuJ0HojtOJA7dݺuG>PTɥu7zty+&(mO ٖ⌇!=eLrI`G,Fʡ[J+P!8=_n#i(Nׇ|%?k)ƗRs|br|Q2ucSZm?9~[1vO |mXh:$E@ |d ':bOY0CZUu[W/v;3̡+!$w ̎BfLFDVƅVAXlrPu mnmB !M[o) &gvewAۢ,ź'QNkSלD5g `"sa.=7"{7GK[*yi+BD/Q?_:MЉB&㯻$ВxQTy$˶7tL/MoYK 8V:dUO3"$٠n 0иp:':N&zTQA?ܲ?P[ijaaQ8P)Җ16Ⱥy;~ & qDL!ˇ&{2)w'a'eVM7GRAQAET2 ^דg(#meгûPV;'c9h Dnƒ0l^&F~5%rh!9:%hڶRClT$#nٕ7Tgr$Bz88 4ק3jAHP`_ݔ Q2 x\ۖ`!TN8s<?V/Fd|E[HD|fYO1Zk&HThe*ZG6n(ո,"35?ao}lFL8^Jp9Vư~4}'=a?"6g^k;}F^# 28g¿}2Hq6ǟ.?o355ő=kLxѱ@8T`jF8cc5;iHTS RBQ/:FVǓ9[yEI ֏]ӳ"_j0,EHˡãG4Zmr;CʮZtSHNĿӤyB<gzCztb@ n0Ϥ_.@2;0ĴB790͕B^Ld;{-2alG~jA7X5e!,j0\z2C`!go z{dReH%FSy $֩+,.2@U8tI*4YSTS)rXPHI[)tV x=k8>͈93ݚEͥx5q9mx|pHHÌT806osqe8'AV1ժC1/1hy4gs|}\ 0$޺ )&N TL#$fO~6NTxYtfKcJ%[֣6D[Ʊf:U;Jʴ_,/$)<ǼȔ=ߧYm0m)c o˖?\W &ίq-Enh=;BCP82|?ð6 woz|ގ?|PR +ZZ"GT EV;=VKm3cDM#| qɆX(7]7,ݿMG0?0W., j 3\G}E9/[+2fa;D"N8OuDe΢ʅyq>z6츱!]Y{ k4)#VN{]0q4`4S=f]_Ё:GF WlbN+`:#~GmbٚNj,SP%aALBR0[2I+ wp(7Nw ?swsɄ@j3.+TDCҨPXj21 4x5տ[Y<_Aǝ\>Zߋ2@s$jRb/VOl)uu@nkl髯^ ,gquڑhm^y B8VqY'oSHy+Q"kJ}"7HaިyEGuxBۏ%iDY|fZ {ƌxxv`SB 2 s\U-ĊM6{;57U=u-3GJk[ [u1S`A\Sr g<6`%Xk*A"Sk[ d-cX̋'>se$%"kB."<^aE7B=ZlcoO 𳏼 @k?4<yb>!nӄ-5Hم7Aۊ1u"'ϴFh͟N)Vj#GgR!;:ÎF C'd-24_թmdeo3d~ 濤}nNfv<\I4o@`7c0iZZxiI3AK+31ݜS.xH} ,_Nk|UtLŻk(;o )Vg3n,ehO^ v')Qk;NU^!G3蕊%_;*P^X2,OPYQw3 h NDA^ĮJJ;N׃;[5ijzoQ쿦XspzA5uݻ ")mEq2ްy߮8OvןD߈g&m"G!BAl}@ ^ 86vqõy!I-G7~n;oAa6N_hKkR4\M;02߰ fBiFՓ߳AvmnP}'J(/G }Ő'_nk=)̊P|56F{ed]lo%9tIߊEyR{[|~0:K.<3HB rh^-X,ӻ FN ^ ~E3`hr(&VnWmL䊷;iCn#gdb ymz|h-y2vuT6 v` :OgEMh2 !i֓܍X4Bq4;O2I8OIp2{8˛&y"/#bwaݘ].@si@tdKu˟}b/Ǣ~8Jsy;6ڪ8%"kDBi yꗀ@$dXC3967UH$b#%vo[ְٱLw?8Df^x[fʠ]Z VPfL~T-iZ:N`驨v5'>j'KxCw҃y7hZ֮"96U ]QJ5][muX{"V EtO`{x*q,sMz#WQlT$ {GWɑxUFۨԨAjڹ8 frSWd>N<F'-[.-=sQb mAef۠d%H#ǟ:{  K9E-f8gU%rѱ1V"\8[/  BÌrQ; Z%4f9@dSr!-|{jt`qHtUsKBաu A]gkQen|ɢ3"!!sPK.%aM2Y &M[@G 5kn+iVhz &@Hc?I'1) X+pZh.͠H xl)$ORfU[6Ҍ\jd ׬CEO]1 f%!㽋 gQirc.!K̃xۄmG{UO/*Q'5Gš?K?eguJ{.f4ycߟ:=plT}uWx"FWž}8gH_!aA k0]XI:en4e]Ŕ8-G+,AsՌȊF p$90._V;Ic%n鐆p7 e/߭:' ϗS?نO1Uȫd9rLFPo\/ " 7k*klɑX31vD(yqcJJ&fWdWN*.K41T eG4efozV&[r R[NV0G0cD7wz6%u UK'HU" \o/(Mm9[.¾Lޙu8zҿ~lg_$t)=<@D:8+nrmq .>%JD1 ȺGt]=rR vw67qRqE>ZӣDy=ꫧ$x.mzytC`ݫ~5Mkx .d9=ѿ(cډw9}+`Pi rqVm+0NZ= B9S#e4+Y H(wQMvSBMF< ׻%ѷ=ܦ 9A2mц,K{rH0=Ѐv\S>P @XID YrbnjwtS%ոqWcpdDΗWU!FO/>c`F,}޲2`.L4KĢWZ"xh7?Zodkˮ,Do'>zɋ z UN&K ݾBxux}P20R)rυ?8>XVcxMɥ`P=7 V7F#j${\zꠇw΢ Xk[8`0$kv N C*4&CO ea;tQfE a(H 0q9/JBp9۶Q`LJGTt̗A[*&dp9ipѲ}]4 <\5VZ?XeIc]ƹ>eY~6yS~ 68Nv/=Fclktd--(ziY]͜! l )(|ɐ1l14.3+Y0o׷-.m1ϺUCԈk1%YY"ž"J9.S,4/3}f\x߀GP/*-1#BʅX$YSzPgS8dKf鞎 j"Cp~*?SSͿd΂~LUhLkOliZ_GcEyc-:WAPdUjeESt+5A[vvP/8!!Ik? =fO ~zvW-f$⫫ᮟv::v k+U1G]tm_󴟔l_.cƪE½cu [!II<ӽ^;lZdSgL:}9j5^hǺ a-Y%T/dz:]k C{ 4F7$|}сPxb] ȳEVLF34܉jGsc:{-" ~)j1:aKz!à`Ϸmu}5sO4Q<Vv3pp38kZ3 )lCdR KJa;x6G7VЙ}u˰-J H9?ui1"D&ϰ2t~K֋ ]Q g%SM1/?äe=p{*! j-n" $_нj3nrxBnM)} &p6'^=b7:e2?ĸB ׼y1l 2WeȣkȉiݓhJxKR;;go`7۶]#fEny l0$74` owPs{:I8A:xM"Y~ G&5f=֦>cA,r oX4!pX5OmkB0ǘl(<2J%L6Bb≮Vƭ,_)^(>լ=Z8IWo@L=Ho_oLD4uX^sJDcBu-_KgIsѦSkKv.]1R8mY/NmY1X0Բ-_CB hC?Cr.Llof"`UmSE.tPH1}Z{q`4mlΈkʶϋ# pHhAdBkfbB3kLyfe[H.vX[M,)A2VTj|xmү*ugEEvPQpRn 0PSNj&WYӯ:G,(lz2,(4Uc@iC%9XasrEސKI8 xqt)8m ݭ\zp8bg>JPz姞A$M~Vf|7bҤX^f9>p+Ph^ޞ^vF&.wR\5ZԪ,Ä&3mɁHã~v ǩI :tUɆʌk`.R?iJIoa%VZe6{߭8Տ9^8B}ˈX)c%"2;KhEH;I, T@Ԭjpfe_)fpZI),>o.Ƹh,Cn{vrg%. vp*J!p&r~>f/|PZ|MϹ2_|oȋݼ"AKh H-x퉫Qx0 ʿGK 0'Oފ>b  F!d9϶}(#E`#%`K+ (lQ2]K![@M$ܚ9OyORa ?_1jmR)!NbQwmqCӱy T*r l 0M_( XB@E8M4W#YikA$.1lg~vzd#ޔRH}XƳ-7nsȆ/=?=8fw Ӓd:7}7t Im& H?sF/ExZۯ],fe0cYv !nFV*Y{paeu[| U/ucJIv.fF (~n~n( xǀ `OpbMoxeMxwLN37xmMCI7e6qcxY$L!ZpIzVtp$ ShT1Yֹl=,&pEI+ZWwI0h28>s|'`4 ֜A1XU?{ VxrvOϕ[X6swER⏹BHe쵑k&۠Y[|h$!o(pSAY3o-̩慈۲唝eTB@fHwwmSVǂyj/}xHw@r]NSaIuwͻtC_LB>U6_[{aNL"^7ɝnKDg9]a)N3e**mŶ?B|p >(`':{PV^e4v~F`KNFEr3a,(MǸؗ B > |Պ}|KI O#58sJ/2UQyF] ATw d.GƘ{e6@t1iЗ5g7VSLUObޛ6I6T-|䋿D.ܼbUΉ* zHר׻'1ܬPrkD 6Ѧ~o1ҷ>eD m%.'NޮBVqo>"2e6tyLQs6'A6/kn(Xȗ|jKze 2U$=0d)m6"Я6cbL0=҆ZS\[Xܱ`WJ3[e$D[U>{%N";-sWᥓrzҲ 2Qܒm\IzVfYOQLɬ4Iݖ%jeDŽ礯?(fpΗ>@|,-|9rq h +7I}`%kCܫo5ߡ-@䯁!IҔh,ܣ)LBW;.ˁ CeLKӆi5'Gf(qommrTm_M;kA@waڛ]6*N]uoķhkPfƚQcb=R [Yh^m lnbl{o"g `r+m /3Nd7qqqsx'Ø$c}chwԧPޫ'i |xӱ-pE,*^S"z^պ5')'ǘN!Sxќm ?PnՋv s뒸u8u#RjNU31fc$gF N{* 豒+Xw@gm iͦA:T7͖˗Cve+ $ fh(cBMPn}ș`hj;$B/0MFޒ HM<"aR?őeu$vԺ=&^5 M8m0ߪq%_,, 4ukP)3k l(-nY| PMŦFm,ޭۋheofF:K"[ t>YI`Tkf߷4#EPsks4Eja@j/ա@:RG H3 ; i0Kx!]Ud\W·mp J C˒#/'|ӥ:7 0k#S] 0E ! w# Zgf2Tgհp/z V\5 l2V` 4q2 5ANc`B'(96jQB /L\K^a1*B mU w>l֊hq׋x=ozI0)n}k7Fh^0B8#__~&My{1Q>Wܮ2eJ,ɀdl.?F+oƉ2q$*O~)!S"N[la]w6HgxQqf'`u+]I?9*;Qғ-WH-yFy>3ZS /[>A_-Qh{+WeŁ,RLŚd[hoL.0%r$4k +h|ߎZOjf|WRJ~ AlŤPJβ ڰZJ*ȼq&ѿZ82:HQHT;7Vpâ%ҁ.\6B&N.{Sub%AXQ롋(Uf/[MM;v;9 QY*TMh'zhfTmj5zKT<΂˘fIw~ea 4TA0K:-6I^ΚUaYXE)bȬ8aTQe!`39BԼC'RZKAۆF7 dgdl4C߷j?>ïP/|3fz?Vkb[4C݁D)/,ЧB)h5*NEf3nګt1fمx"B za,OkpM[sQ_wDה7r.>|?Јoy4qZq'"OGr+C)IdH+b5dR趺t}\-,ٶŹZ)/hhwl9#5{6;30?8^!Ha6uLN_\Ok̔#(Fi3aAX29uZtz&3 L̘R/0PBT ӵE3/ (s8q}kʍQCr($? DUn}qmbw7ei>gAbzמ;PB7Jf5wi1´QY|SH=:Yqw| Uk|VKF4|%;f|f92|YcsON؈ hBa YzO?Hs$[tx_MluHk%XIY9tI/6(}^'@ Y{e lgiκ*hP֠c6ÆT3"Q.ݾhQ]\\cX!rtR˃_OK9".'9vE>]çU[)kO_gׅatXe*~;Pǔݼ<{qF")s7#*-ϐZuNeToڵ0dvq:N L Я9N=ROQCB=RNvܵ/1Q0΋:,1- {c(ޝ' VC:D6'%F =XT5:Zb?"[P 㞍1_<^Vβռ-/|r@Vy5]D\$zl3btzd( l+yMǃ6Bax;apr1 x?7WKD4H'Ј1))߁&Ҹz_Xc@`')Ԉ+q;]5:9r aA[L{Ky@GA1b2-pnJ)oPK2VXnEM5EQWXH[21pcY=w(l8gVMt$hn/<!W'N&kDtwW9@zՍ!b@xwv6b@{xw-$ؓ7n-Rxnq~*s\V Xo+ܤ3Y6'(|tZI@gj64x0C s|2pGܪu7'dyT쩍LҧF=k d|gϛB2 OmviWöb.i\jLx_eQ q=cҵdf$)(''ߪe qZ+C:`rwGXJ_׉=QԈX‚٫HZJbc00D;N1' U$1ۅudwsEk*H#z+Ft'#@b^ؗ`,+m+/հEPAQICBH[ÊSyJ(FW,ZB Ӈ ^xN u xǃ{ٕy ㍒Uw"/b۹PA Xb̈́:/3ۘ%e"NFrlSrD53lpT6(6;Ҧӏ&F^qe Ot_2%,E! _8֎8רZ(ŧu/"Vu A N0b2ӄ1W60˧PMUAĢ1&џ Uj+Rn ZcsxcM|ĥ`0Y^[ӱTvʬc=6'! D3la0$9*ZWK*a -)YOq8wFXvn5F)%;dKb*Q3p $SjK߼|R$lWM'£ui8^R$uɸQ.{ڑf VX,=CoK]A^V&΅ebg5Ev3i6dBS"y&BxQx2`E\@PR@L9դgNGɇӏ ~s/YOud~<{(TE:{`WFSWMq:-^O 0 d27IvUƨAiΣ3t|&He&Az%T|5֜ɣHq[_@FSE4Hܒ˜?вr^\$Q;?3ޣ(EOWc<-nɋGl~cصc!3#KAI]'5D35aG|\W{<->3}4 u0\%hI~ V`wq 3fx+ܙz[І-Pq$~4ݥgX'Đ#Qj@2x3La?D˾B(Qa ~p> 0z@^5Yg8P;>Ic]IktG%,nz0JA0 qIevʲtt &u$ .-dDNg.k 7&XWEE1чg=Yiu *cH`+{HKWe9K ҷ/\FH zpL +ƀN&K1Z>P4uNy|_ Ha'z~w zNk. jo&}Iò"GNażAʣ Lvb: m|ϓL_6xx7Llh2@*:U孭Cgb⽑33DUNu%$E#D8 %ű̓5{&~&MSZ[_WM }K[mK?w]5_,J.|tۛ&&XdceHE9dYIc`#[z#URLc߯߃D0qPs!F6V:*V_{`W\5!w8;\W8MGMlf"s 15G45&h!l8O=}"YcѸG0>vs}WCp2XEMN=\DL4]J9xbᘲb(ƗѧuedRԚ É~CuQmq|bG=ԯқWR EV=%/SS z) @3:{)_]&&'$7D5jNOS_,:XAiz0/}%.:B ML;bo:}7A=E ,z(RC` 7oN@X7׏ pD+uR">5$諹B Ce7>Ly26\W?~#-ᖉMPDaDbhЩd-ׯlD[#;e|cK+[?,hgޔk=6d3K-8X1Y೔JKuOuYC-kB8Pj,g7~o5-k#JNs SGۮIRM$5HKgm8V-o uÐN=~R+fۊfT@%;U>wהxPەu`fV@]bvzR5O|뎸oHЛlD7~&2Ϋ9 mD{"DŽxyϗLolvC AQ1IF!fr <DL!ԉIoK9sw`{'(0jzBduZ ?kU=A,=Sq1*-)- HV=&))x'2ؕ5m|Dh]#u9ct}qNbi: vLx\- $ed%IaR/_iB,%,#4Qa=n(b:t{z-g X?EK}2G@\^1ࠉkSMBH/b%[D웧뎝kyHZ9*k0à,yX*kz`JH këOLV/*s9ݟO[ @v^O[wY|.7C)JrY2o1-X+ssO3D += )(S,=yOɿXTh̫ L>bGĥ\j:z/GfЪ1`%6z\l>S: ֪vM6mx˪A/2h`&G+rg,O2wq! #xg^ִ@2nPdm%~gO B$zoɄBmjDktO bz^c/G%g*wYtDۓ@۝܅L9`.[ 1ڑU 9=iŝJL$>" J I(Y A'(/bjmokHP u\[ >_&e=ҕcQl!n=0! O+JPFN2|-fHET dQI:sє+)ܼ˩oD@ű)6ioIY_NmK'E\~xc!q<ѪG8{kϨܕL<%H#{Snx(h/  Xʗ]Ibh}`Ҫ9KyXYm섓v)D cЃ]@ iS=F5w.2Dk`젬)b )& -rS7@-F΋)u|ņ41 =p.AD-WY^'v,XluT<^du}j e2܄0IՐnqW\ܚvX8 _rMn \a#q0?۔t尣btbYPy(֭d%/UW q(6 "S5Q32W:o&)W&r^b6fىRM/)HX"28$& h+.,%lFg bS\@ǨmZH/jū8T*ͬ>+jP.ؾ({{(8@Kԩ$?+)EmV0r˽-)"͡8FJs7 *J5ɥ>KdyBTA#P6 j87v'KTqXKP!t?"wbP3r HvP[A;ok.hT!h֚$[yGS~;!CBg7Q^|kja G[:H03#jyAy>-!1>AB0Bag!S1rt:XTEjiYʽϲ} [ 5*c:qV."Ra0O6hg2 .Xؓ~;pk짿Uën&W|S: 3[u6څᾷ`^`ZUj0nt х8ۛnkƳ?ӓ`tי7+'`ZP0.h7?=4~EۢbSD0FM5BMyR،ڼbv erpFJ\ ss8g"yYv)Vy{)={rixوd*({Z+s@zm_ƅH**Aag<o\i1W!v:A͉bv#u9r!ل hS2v_::-Qt-$qPErė&ŝQwj"{_ML'0(R]*¿3$l8' _g񘂩ҭM|xiߊ)+%$7V6fs#|h4^yzÜNu/R n:Өnj\U%+I5v嚒ˇzUm: %N &͎ĖF#_/>)<,dM%\}I/EC}@ňܿ._RI KfY"3Y*BfB0T0O~S#ZЩDJWe,h*F.dX^amI>B{}|Y[}Xn^ff̩mE.y6H#r5.P[T_{Jweq'avy$=fS/FL*,Orze8uY }FZܞl0!v%3yf /Ǿ@xzyI4~Gr.sp≢C2S۞K`a%:+ŕ!yF]q5rG[E1-ۮD04-W2 pDDR2͆e*EH{TC 1d!tڅ֌kDs@K)ZOGMe$mZg{.yA?JV䂖q5/z]|y1b֏WnWZ,9g6Lj]xs˓eX8[Y=S=MIm\D@#j_V3i$Ъg{&WH/uCNP9>Ҁ S̖"aW WN!f^?RA@汖, ĺ;({Vƪ®jut_Z(u_ċ\4 qbiC-Dy.1Sfyoh#m A^ a$)5j/\@B*h>'wCzwqT[+w9!&VY_iL" N’k}2Cd.V yry60L5 -19V #wl/kM|]2[JE:}{ۛ5MŽhA({ P:S^^Z Q%z3{rIucѵFXks ^FH/[0yp3onquU'yуtBOlF̅L*.)q)%Fe2`̷Q[M>`qd 7K2xXV9ocKG+-`$8]kěy@YzOatkBS3a9$fi::1kA69*:ec1i* J| XmZk[>c+3J+&zaغG'm_J!{]Ύ`WT SZ" /tnR"m*',@zZaB+nqBomYȧұu hs]|`a =/o+xI4MoY&#]*m*2M˥cbAtZeeTc3N5loqG*_||x :$ҠEp'_˷ơ! ,e!͆zaZi ~Ȭͻ`s6# \-G}֗}]Vi3::5Pn~?]CCӊ,X Uw^:V&CW%a,lO:=q,e`Y Dc74 1KVGFW#p:Tp޴=m n:&V 03H̤NRηu#aǀ7+Cb7ԙW n0$ZhlҨ3miNғ']RVf>WWH@.3~5?OP!glq$MLse6!SUǓ 'QO f&x1"'Pe wVz[IF2T)E\P[yI=Ldätz8ćOM\ږH]cq'I#q/f>gnrTۄ!8j1K_<5k' \2Pjt8V| ~!j DUZr ⏴WѝV0ʹIKQWFH8Pm{ypoFs4T)uE|8T^ `֠bnUCOV-_]C*0_wڽ ρޮANXx+AQ=TNMZܚKEe qj]u_\du#_*" gpF]^XկP=n^2v ^b3<{|֑bW1eecvtoJKףfM~鰌֙+jR -VLP fďm~2U\s6'ǜ{TlET>2d*Ժ-9Tq(Voa%t%sؾo))MV V{91cI P I>O qE82eLD;Ѩ&4u|{ +sg=gL]"² xDm+*]rmc̄_3ulcIQí؍>4)d3,ЙL{ya#7b2n=Ҹd:z Q5l N7#~ɋd.(sHm2E>rD`s;s Tn Qϖ+t=}՞G÷hgpuϙfv-8#4'abyOߘbϰ|&Vq\܂5V麷@0s- #ǫ5:,ÅDA}8wÚ>VXP9ʪCaAd8 xנNdpxT|z"iŖeHx F!LC8a!bB5!ˡcI܉:yD9?^rzEuBy, 3*^\K™$zRR[9׉H 4Rx:္%;wzhs\ n. $2)8D"ºĒW4;: `/kq'D oҝ=y*+~`ŗ纎Հdž}KjtZ Hq]oLw-Ss>.cçAv5A$= 1q0aǶhtJA*#Ӓ!a*yhxOQ6)GW3(QYY2*T^vOzzt٠F$/WJ$'W'glJ\eV^~(Su▐`7]DvbzO30JxP`tcJtY藺;5s| IP݃dwm@^{'b#[Eu-:zz-3Gю`}!$lP73$K:M$T#S%yy [yي* wlɽTqq>)ag)j3ÀuMjIut="/[g_v7k{nϮejOy:9pMUhņrBq eˆ# t"TZu\`V1٬;@]P+;܌!8QA=vd>Q?7/ 6^- {i?~t%ZCq ϩ-Eأ2Yξbg%D gDoċZڳ,P5qCqc\\o`6OC)A pzI3/nWkd"Ԗ9 aFR8hQIXq]y*%1Ahz1[[TDJ <0 :SrL)9bF`\0ۃP9}@*I-دL^X&aQ{-P;ow]E|S р>,8&jg|[k!zX!o{}n+T!3pu<\[,fD!p"2Xz%r@Xp6Y*Zn2 vS0e>h^'Xva=/Y\D Q/uoZz/`ȺRϛ8eeUlpQ*Rk}M7c; (/3wSD5t_y5‹( vSD+A:I2yphilo0;.$~DžfN0yIлsZ _T|ܥ6o46;0ӾQNrKeќpn1H@/Ғ햪%*jg ( )O\ev:qJ *{DJ7%&g2 Iś'd2;@qbEC@@󎼓U)qxorUxk?A2N &rYf: {@o-hѽ'7y3N5O@(FRS<04ģ@!g,H¸bHbޕ}Co\AƠ77,Be ۗ(f#T3wIh6rC^B} y>W +x}11T-\7" -t`}Y}f3Q%f;Dⲁ(>cmM|Z5l*\s&!Z|"\E#~HBTʜym͊ ţ,dg5ycGgՈ0!>eI! H 8`yiHIYG @M Ic#`- \R#] exs\I%E7ؠ'R,vW}l⾻I}CY-;;=gA,#ޏ-0=AjhE$:ʋ@"P;N>|"$g.=fx9<͵fmEظ΋7}kEg&?4+?Bfqrh ⇭LA}x+n|} J]"yg" x tF}vpGӶNӳPRB`r)gwGH_švϒ_Z.m`9xiTГ 'oFSiEg K5Kp)䅃OYi _oq3DJ8pZk"lvIeTBR c]۬z2SIۏJeeQVA\P'VA9_:VcLydIfR^{wC!yeR)\ @]]zPsu{F piDB9 &ws@Ɲ@ U&^=>:R"|rUO<pς+q'e+\>4]1*ZthAE5Qߌoȕd! %!FMAV Ls\3yl*xhY^h1I5k-/.Yij'tBeM4BBI^bǽW%;i:9y8`w%ZzOBus+ݴ~KўPGbQzLv7k o=Vĉ m:>3jvF?[ա0pRS="qqS~DO8NG(7fW%u2f@gN>PztS nP~on۪֬qχ:5?Ts: )ھ~V@2'[xKvDZ- .v?<IHuBν_; +y!gY'C&ZxipQ4r\)p](!6~QTBrKÌM;諏bG c(^ PgϪB?6&6a'~VQ%`Q<jU[a@umjY1`{݁.HR`-\䞢,n wR@Woݏw"q!Wտ?_=.^1hTnW7Дa?~Rms٘m#&Lf}Vⷄ#qkdǁa̕;6 1,y%+P5%"0?h 71Al Ruά鲤d}1MhxVi .\1EwqrCsO=bqVmcuPƎ/b>WpӁG%& 3[EDMٌH2e. ?|c9o˵ǦNU.TtR 0c7^mg;, Gz +T8Ke@ LlЃX~oQNJM_nK$y{ rdR⣞"e$ޔ&zW,vN aCݿ˧SAu Hϲ[4Sn0^eKASF\4 tF3LLVǼ rn$)= I-ڪ;>[iChw45F΄JNxO"+M9{wYDQ0s beq@+A 0@? L`4`dd95ru[lPC'Ah>/ =\C3=y2mb覌[>0L b ˼(lXaGϷ ݥ.>qE0tm2"4iHT9yQsARc#^p_:Lœ%Ճ8*S)(}Jв-Vx@dXs3~-!$90.=j$ 2^%[QlBwZago Kܜ5=!}=b T|݄<&V${9Gao"Mm1iRX'W-+3=HusOfIesuicTF0P 8.:mpp0lSH0X:1vb")AQܺ&aԥ~GÞHaz>>}z8ܘȮ]㧰ڃ{6O 28vۡtb*oAxBA'|IW=ČpNkx`41r':`@ QX22).LZc MP5 %,LQk$OvرX FP!qMng ?Ϡ~/$ᎽW#>< ji#^)iKw]Y^i#aPn/g 7uj@ªz#kh,C5=zE)Y)(YlU/j@OI/[L|W\=܍o*AW^YƵ߲~B[3WҙdoS*EO(Ki+<^R=W] !J d$pg<ى7NNOP18q'i{ɱq,thN9M>XMe!GqV@v-W噻^'Rd] _n=A1(vQ >.Qܾe-l!icj d:Rny=OmmM XBl4\wsß y׊> *OW0W{l2]`-H̏sm<̽,8#qOZ:MEC]z ]гdqz.kgY`>ډmnY62e;1 2N8ö 4 dƜ^ 9vV2V0Bj-0̥[yiӔBL 1kǢG vxI dNqfdVWg)Yw&b Z#@GyC^{e鼈4 is 4t H|$s+Q: 0>uEMp=4Kjfbw'5Rt( fPǡك7]b56y+.@j<:w3Oz]NFˆ6Á&-=a͠35u9Yk;ocqo Cpů1]_[Z'>ou֞.t17(n3ͧqZbm4D7\| v@[I6h +:]/YsN ZWy*D8D߫$2"kqXVA*EBl 1}_&2 W,WaޕaOB=@GE߄ItbXqX₤xn ڍ`9:귲y5?{l5rs# [>al5)a9W^pp[C7 e0!3FCFPaLqcWN]#ǐۈF<'Pq4g# Ws1ο_Pw)nN5ILeڴ[:g3rya+WNKpPM(|$8t,OR=_5őqZ6VYR }/GS^XB{z1DJQ]wC`Taj'rvݍiԝp-,"GHpUQ6W\u~J\gHgN:Pv’С.F`pnA\ C_b(y]s4h/tPgH#agBT ^&>Zc=[TPSJ}BsaiPݽeh$m 8%l9׶ @ ҾVn2M`)qU6,yC8OǡAЇ?ǰQ?)-ZGG ΦIR<2XU0Kx o%QH7k$8y"i&6[>}ʻԯ悧> 4W;Be3y%7⠏?\|0شtC_nilR3'Γd9*< -!iSd3g/BI]/8Egst{wxI oQh"'3+8;B@Q"%:bwx&=}ׄ )-A`u"ӕڼM9fUnYZN\+xF8[7A D%A] 7wב4Ù+UGVΑzj7R CERjX7R!mVBaǷ}Ĕ2A˾ 5)- *c͹iq#8gNBm"xpfEz6YZ!  I婾7@j0ҡ;<%"?}"lTӛs@|hJnfxonᷢٮ7] wJ(J]#W P|GFؘU>_2r"B"W5My""O'yʉ}bBWxЩT`nUc7CpX39!1砒yiίo(%o )c(Ps#qFK^R3ilk\oA͵[#'$pq f} jɱB" zӛ"2)PmH8mLոRE @F;XD[lI\Tgw4J>BTUip[L yx)2Y2D C؅sϩqSf`/\Ծ_<}Pq2.r96Q.tR\eg>Hn `)jlht;DRS P=ڔ[)pR-ڞ+rK~$L;1;TD )tr@&2^%gll<,F}#~kk&&wgѣ죽:Yo; Έ?ݝڕ5y޺fdٜbu̒A"(_YװzQ/pG=hl2Z箱 @8dȫfpF|40PcV66L_@`m+ /_ őf{PkEDTތE4aXXƯEv&tS\Od{p{\;)%^l`7߂fPFW4>jr͊H'>FvPHx#W^'7%e\@Gv 3Hr@CnbW<2껪}5C]`9!S4wY,GA]1K,C?CP|)a} ɩwH\NseiD4E+/%;\e!/V^B;xI t`Ln@^ZB) fLԋyR yo-F/x*n|*jKF,] v)rgkXF(-`1u´U MkWz bZWGSe%TpEQ|R͡h=o-{vf i!шj$ ]JEH9@{_w>'iY7k*+!dl\xNc.(NvGmqXbIj8XTm>zeJN!)ęo:^mOe5{oV;39A͖QӶK$;'],k k8lno VJkݱZRzea)&cLO~#v)j هf gu(vU((*2DlGK, MQm._ɘI ҝʼQ+~B];,M*}EKĿCpdUBDS_|HeG;`X?ŭB pQblgmo'$‡#^EqE]02YK~ ˗BN/q8Y[AQa>xJl?3Lrֈ)աip:`4>0SKc|q~qx`A܄`,-3Ru[Pӱlq" % ^OcW3vs>EPnf:J=(bA`3csRg I|5]t" jExbd\(ՌS yskVJZFNzрRF <$PR!:L!T]R}mqJ,{zM?LcC)$Bb ȶ}1gLe  0j 懭$SȊp^Q+ eY? ZЬ/`9bE7ui4[(h3dazE1]<\:4np'cV0##[kV N„;Q8 )=.M'0X-[wC{jUq0[ c?o<<|(Su4OVxP3&iCOȖS6,۸2Cs=ԴǶyg"m7o j@9z>%bu2K[L+TҚPCG!D&oTEr>ܾ졉$Cn5]FO%H>k7B4io*Ko+djZg?|ѿ_o!s+Sk`z^zݮ4ˏ"? 42}5r+3"Gz_խh 17fl&/7P87_g#K4#B2։ qύ$%-h[T'HP%q=^gހʁ+UC' "*fvh5 /#4/ $i2e‚i7`]޳/E ڗ1eie_'Vf̏ լT;;JZ6"΍K~q,Rj@5*EnJxt,Z c0nf 3w.N ?I.sJ4_-`e)90r1x+%|/d8):0BXS*4"*0Ίh ؐ2GX?[Z[j2c%UyJP'Άo5ZN{ X)U#nKŭW縿V$6D6fg6th'ѥ? {LBEarp['nYCIEgVf-iPa"ˏO7O %*BX0ѰptK B"w`Qi6עyT\/;k*&ME03jN N*jë́ŒzhBCnO=S(g@ZA}3k~2z8\3b:ː>ڳjq S=C a΀ oo*e/`Q-Ff"8 M"mz1I+@.T(餌 9XtaDayqqǸ6QWKLU,/:6O& <)bF#{J73D:gA+ &xܴ$ឳ@>i-Y`)D<}":i4BXwpz t^QQ&C?N/=yxwZBaQo*oPdfO *#"< LZbNmZ0MYqy@\NPY (㱽 EI8.ro_`~jBP7 *CV^Tlp3mRUfn[.$)|j]}^Mod [(P +㚱]ІtϞ܉HDC[]F[nVGIIˢʠyYkD(Y2<|~M=Pl,ҷb\-L;I;|xf/:s3zփ98|p4UnXT흴Љ#L[: 3 'dMD-j*e[11?V~7U.>>׾bf.|y3\jc 3S˹lhNh@kh{!q?/v;F 9o3eR|C|7q0* +5ߎ7+=ŃV* +i]VAv+sޑ_% -Fzk>nxZϟͨ_U=FN2W~icV x^ 9N'y~ ?@=r!HD.{T_mANHY]GMXX[o+V~Z#I(D ˽8T'.ԙ{ SUhYs3+W`1ͭ.lci5[~]\SRBw DR"/-R 4-M;>I럖'^o6zЪLH*S?ĢINa8G /ȵL}( ν a{PBp4=E)\AA)lr;eD4 ):j(v@;Aٵ;ux}.P d }{/F/l6|R<"= ?TsAqϯYMed,7ʳ9?F&**2њp'LjիٲO4wМqK]4B.Kʠ9q;bM^9Bk18Eljs.!HTB\`A6r9G@S $!Ѣh=drC ZenUyDg{.P/$ySnP^˛!r/qAFX@dl-Ϋ깥1}]Gٮ^Up(LhW9iHYq_׸n)h5WIZf`Sl>bf Rl6)e|g  c@}^Q7$ѱcךU@El,XC%j,A[3$'5>wɉ#M4@mmPpyApjN2+M&KŒK7y+@Ϻ}">"JEv) (_$t: # ғ“?:P^_ŪdjγJwFj M):nE^,B){iH>*f=$h2!ܘmp&9{g 6x~s4ng\{2MɶUH=ga8'ãO)Wnl{$Xrx^'RH{'pq'),zeB i pL@r tV &Ե9Vë} ksh Vӫ_'N+" p_R'kcބmL_|NGx_H$J'JO?5V*b^;XliQZ&u_hvB\eo?jweH!"˶@C>I?$ |ku}@^тHj\/З|˅4|Dw~ qq~/dC7ROBWW̿m4Ah*= 5}'ʐd8pz؊FxgYIJ ᫖LjT4RVeo039!u+W>Eۙn9I͵c2 #(JD]Nb~j'RhtF_d.lnK 6;:҂m0,nB6Cф\ma#]E5%'GܮNmGhv ]ȶ'4zw-C׶&ÄYM,%?"U:hRy2x5h%,P>abe>UĀ;&sʜlHm鎢JȔbHr; ?y0Nbx:oXDGI2%z;(Dw6ރw XsD`Ќ,MeaUO{'W%&5 ɥ!9 qdw$LAvƳ6[&mF*L,O&<ղn(<isSy'; d8߾Ma( 3Si;Æps^^ cIy1)K&E_=?OJq.qAjѧc ^`/M W]C< 4{Vx]|YۛW$$:+ tkcfIPc? Vھ.L={aQ\Ts3E@V6ؒ+Ij O1 X>!UJ H@?D7 \jF*"1uv #@o#S+ETMms}=z9st+,ܺ,uwl*-fI:[ X@4৚Z[-$ة\d5Dhjsσj&R $svc8~joҹH-\Q>Q*qˉVE]h$^aN<%쏧׶s(v-b^(iKY9тG,{ :)U+,LE˅r кt[N[Aэs@B7˝93u4g7aD9)Lo\9 K*@utp\px=)uDV{|ºQ8~jTsACrnP'l(}Pj2t̑g{Z;G9ة1"tr *fBqEBG 5=%Hm~e?(`Ի95]STGQgY9}jX;^G'VaJɐġICVrCv#& Xʿo7)ߙhe]J)ɡE>D7#̊0Zԩ&V*:uelE˞R"_;(ah[ye=XAoEpT\ z^ v!֜;}D@I@-xMБ8ih\p78oڵC sAC*i"Ia=c)5X˰/CkBZyamvMbr?b~Z&(TdvTCɚ%hy7;QaY(nl}v ?m `>F+) !i-x^$kIXoI`E0K l.Q/ˋb-!6OTQvY-0 b9p/JMO0␝jAG^$u(9Ȍ0/d4Ƶ@ ΰ֡W@b2 zV$As.rg2syrI)#"fFd4a/ (lC\Frt}Bt 6A&ۯ?EK )0#hJ."\-&^Tt3dW1|՞,ET.;sb9.NS"Z Q\>Sqؽ bb:qSgLJ)\?(gIr.p0B3=sryqD$iEHH͐g[^Asp]ҡV$zabRx4GLX#8i I4<4XD"<4s9*(z*΢SX+$G-/Da's+2wZC"*1oMu>,tbEGEJhV} s7 z{trքȭPD g뿤WR_86/8$|AsbFY}m9I m(Hy K6!@opql @XkffN23 nm1)|)k}˗sZLo GX)hJXЄ|9f o^rw ˆ%H`>lNŅ>3jj龖!1h ECK(LN\-N.PX,`oew>\NvbwE5 v1N%VvlTJw6ڏBgYof!h#'GaI`MRy{֍O~Bć#@$P,0KJiJYXv\[칈g7\4Lj\b''R-<;'kDz)t7s]!Aΐ?LX=NC.@CkDh4i( I/-OChr)ԭ]$l"㳙tn$qV,_2ƷufF(oY`c٭\,LB_v  ]i`|>v+ԩ!ľF8*5`,sk\8>wO<9p}^e@$y#7Uډ6Qt99D*+zյ+2[RXǞg?!yuܙ0Tx.?%B RfXt[7>?պnn040`d:5{AJ(2lDC9%Nv0qkc"ɐ*{ dp"7/CBm.jUҖ9,lN@4bv]v:pQ(]щrxY/4!+] ~ذBN?O<x(wrJEd1w:3/W1 =)%jkO]^k]D]q=jppnŜ߭I3Lϭxʸzֆ^ Bs; ҋ=`k/%ilYwO2= 9#|3#(muRk;OV]o>^'t!/{Ppon9p@q5ѯ%>6)` wBMwgU_yfԡ8r@c[iHR4#^5_'cM+Uwb:Yx8.p^A_>FaNFR)/w'Njs,a_ Jo/+&B]|꧄+DU树"TW $3-[) ~+[sVA~(r}nսRe1C!6};av|B4; sĊ53ů&VP:^ ;l3?**ᵇf`?إo~ .|@T<͹Awp"NwĀ0mOYs/ Bܘ}V<昫jE]:F5*޹kV^gGȍ,E ~ GݨCgl|Q_D wZWSVqw"K a4'heLiOn*x/6y@Eu$}TneͰ6 q&>E*K9eѷKUr͙0_d$[$Ҟ9y-`YlOg;?=UGK"}%MOZbg1rjo|D6&fng[YPRZTO$Våql=xa׏pXr^eW/tVC"̦wդψ~l.E=\+\oT "U#Х<̂|݊P\$imScwUNLq`T Ȧ>l-d={]PI09B"o?_1MeR`O=@T9o, PqwdWa|Rnzi&UIQ! Qmcs=DLsI?._Բo vM+T>yTVN.> g]g3UHP|u NYaΈd&N]IEN*rt3]]WlFZ6xgluR_ϥޜqc5!}!KE (\r e !9{gl~ 4#/qr^ BE9n'3Ђ~O49J1NLbĐlcj^$81D`*(]+421rp;Z\+1-!.MS@]=Ʒ)IBZ])c܌αM㈣Iȣ$g@8Y*FT >kDBY}q@lNuƊ=2zX52(z:W6wHGDvUE0?bsY9x&=firs Ej ~ ;QޚMx]\*V< {ՏONF0TD *$3'qn"YH| __bwEhH~8Y0דY||| }H}KѮjph^b C4ºyG{zX-#f;Ы S;ꉵˠ2WB mgJ-h0u6#V-]l&Gf}xM#H֢%I }YhXEI0D'7{q OT6^i٬qO+n\ Ji ȊeFSgȭ9c]1L\*g~: $=#Jn-$0<THAzr֦aمWMy.W@\y3brP^g^g/bv&U`)3]ؒCAf2ĺ(L{‡tEM{"WȗcFT:8>Z҅˃ 0& ">=t M-hޠb> -`BA%u/M;x[zmMV#ӝzWZ{Ҡtƒ!wr, sP UaIezE*w" rV $jDԯxiAyM'\`r;!x2=^g@`A6/CfU$ʾE{n q跧%A+`t*wՁ>NٝXR iOuwhqf(,ߟ{Ь0B&mzm4|E>nB:U0n4)U D˶hL0,|\*c3)|aj?a/#}$j)T}Q]Aj$&jf.4p;fβ?@P(3C_u$[jD6P-7jU\84U罂OTwcRR!kRSP B%|3A8,7W`l8 ٮ Gb;2#(i Fg$;( `DctPH8]VC>ԥ ʇ>^!ch~ x(%FC/+JMמKCxF @8'>NaC<@Ni31:H/`ND5:$ԙKsie(Q1[,lٙ+Z&X*~3{^J饟n}ٴyW-$EXYnmyYa$&4P D0`Na>T:EԾ,Z"ڰ!^Tk3qpe $>_]/ړ,S<>3TWn㥕B: Km)UnZyeX%Ȟ 4=!G٨ez*ٺnJ]8S%cnN8؎JjVӁf  _P}+EUpw b/`$NjgЇ+JRA5zONLٿXfu蟡Qzd vy*2/>tq,?2kfwZ9=$UHݭ!OO^We@4~ZR$`fVp:5W3TZդ"EYY?BLpBí%i/'/CW*2mg')FN#T%To+ZC :eIӎ{S9PéQP Tj 1Wx!Vb^ߒ7y bYK,@-]KL :1V\&oYߋhb#OX s3yaZ~,B ,bl1sguI \"y`MedP0:{,.U"]zz4 9:xF%Tkm}QX.SQJLxψr7c'Oa$L>毧:,5_3dtϭA Hȵt-05M(ȺxX'<_ل&ʶLNz!1{F61հZGK8rI֠QWOqG s Cns;JORl,}|`u\!I[55\Z0 g/ʋF  ֿR~cQ3%UڈYFS ^2@7Y[S؁C0ydxp]0$hźcB6:ξRJ_f+I1{P9v H"h?Ɠꚓ#2 %m6Ūb :ݮ@|dӜ܂',i[d 9)H0:tGUi2yC{ٗpZo9w}af?VSU-h(s֘ =2,փe W8}ʃv*YdXFI-nO]AU!4c,\lރa44Nk }+:k&Mk(s}UyK,&QN|ʤKy_,N=N\Da(ofQ̿rX*PJl{AP!kԗeZq9x0;lWڀowqTUW+ $E, Uldm_i+#H'}}9I; J8Cu<.BT[_EUҊLvfk.dx5sM#iIpEvr/Mõؽ"=${Dr[.I0"bNҎ B#yO`7w+j=kb=-"12dR-Zc;Y ꎡz^}Qi  Gm"[H2]&I\޻t/?cYX $<C'S)W>eJ>-'*IPs ^rH/Tɽ!ZQJ u3K"e^t)]3oGǿ0 T]3n$+K!b*O&TJ$_@opϢYӂ9 3G؅f/ڽ Kl.А=` 6o %e4%$ ~_D@J易Ŵͅ'vUqZw,;CGVϰU#}gv٦ߠ-NZV*q<v7"&َJ-QkfR2sqU qJ*w" { TrJEO½NԒQ3YEy6Or?TPDO&(u.>5m8(b B⮾EӽUK?3t'JFlH_K§/V79ڻibT0":n&2"AϽ6vQ]֎=Vfj?/_K=hS>t.@עXH^NL+U|0D;x:q' 'igARI])d%ZdG_wt9J㯮A tyoQQ/N s=lUrIHl ]a.Utqpi~}&dXRzq_<XL7P|=ܜgM-~}sb H/Xxٔ`a)x҃0R˵ك!5AɒeJ-&,E)T{*!$ 6y t=0XbA\x^i1&E@!?@w GL`g҉䲼j(tx_0/ItŭZ2F4;ĿM jFn4; Zec2{nduebc[6jOH/imnƒ>' 79vȬ8"$jzIvDy63rn{p ǁZN΄j v5v\bt. g!bΘ#-#kJd.Ju ݄KߴFtΈ (}O Tf`IO Ui־'O&Xfֿm? ߍBΧ`D1 tG. fV Sgg]6 `CM ]Njʼ&W~ i\<̹#=GRTa|W37Y#4G( 8f-D/=Ӆ%="o5}j3"#(O5*[§Q_uҟň14y6{*fh;}">:KdnI6:b\c3 ~6nChs_ata Q-VDМCYh(c.nL @[r*NWd,L6ж.d4biM K}ntcG2 p%qx P`no2ō=PtXJIؚ=(:7 l#,cm@,#!UP[ LހEL{1*?$ MF-/1#:I!i9Q SVZٳD|E;Y&S%e;' nl &+KH}}sNJק(?\ pC3@E Ufj0ݹnķ(K{TۋaһP9W512pmFSIȄF*k|hbu αrF4aO3R*rACN"lЍ!D^SK6M{3>p=Ḡ2Ǿ k%DЂѤjwUӶfǂJ̇HؤZpyK?WoetU3^`}h"j71 !\s@އu @谡1blq!mI3U|KiIB?k]\cmvbs_םxl}c;”@2ƻ=l-J|vV RUl鶾vGQ8rBcxp(;iq||ZƇ-(o>@Uwv!{ʀĒ;-C=EqYg^sw1r*q{7_R}[S!Xx~*_hq!M0Q"\|rv R 8wA\Wp10pNdf8 :E mPF8J&%x7D͎'~Lpe=ح`媞ptAnsHύ.GGhEQ+FJ0JP ص'ڸRƥTL҇Λ/ ݝp?qi"z#7}I!yCR 눆XoYӯru߭0n`vi/uR(ѱ=_67i;@ 諑ds 6ACNe:˽mPT?t#H@U*`MȟWyZ\=F.`+3? >|cXLZ_.1aMԼW؞T%mj 0# aRd:g QA#6f-BzF5kQ]>6ikFpB$< ~/ ifkB!ֲ$\ ~PsK)&geP"-7ZHa-gB@u`4i_lNd>d+C%EjVO8._odO<{5OoXt$-2u29wHC9w(!7,y n)S\7DFi 7Kir\7t2Y{zָnr$up(@U|@G:_m*C-+2x@W 8yD βYƒHCR3;W$m)#nm,-ʂ(嗽v;IE22$9DyƐT䡗4 FcW F e khSn|z2 r<ǾAw_%A[I; UؚуS^`AwV2˞ _qN ݐHIǥ.F_uUVi`~,Lvg 'R OztAϪ&^REzP S3]lGaAeM ^/4%!M,#sZ9Ub0'Y\v'8`XlN2䚧~OxU6Vf/XLqfqPB/G=5$ô{v]xftl=/㫍.@sZٙ!و=R' 5x)## zד!\{SL Ӵ1dB$okb9v0j/d iIJIŚ4Y\ʟ7)"t}⺋ 7^Oۇ--S?m g۵xkrzU1g5ѳ@8Hh܎^l,§f5rOq:E&m|@t `P܂po,ӥHUH^r1%`sliɂge1稆fSІ(jqYpG+I6أ /؊2t'Ble&Bx!OّH!l,o% vt(2@‹I̥BСrOfUu4yU`5 3Jlw>gTAݯtތD$lxR.┭Rw[L׶7?1bPh1WUVu{@yMƽVh"uUGN}dGp!C9VH!{/TRwg腀 )Dmx" ]ַuKk_3NN[X0v0ZR\#e$@!eTCԁy-E[Ce(B &0|os6] xʭKWuNjW&R ݾ6u!F?bRg\BB4g7@]EM̏O9JM;PB5rE)ȗ皭э<ݻ0DJ0ѺO| )mnws.,}\RFBͿUEӘ~9  Ғ )]|ZxZ9F)3]mHy{5ꬔ/L,^nz˟Hcd:;կ@̡o#GţN>2~%y 0A FeC>L}[:^DeFU&G[o= bOP:Tq>*cLf核NᠨJ|i~!"=P".WR8Ӿdaseqn)*eB/-MI/_[JvZrӿ*7yܶSi; jmRBiq<FQ:^%bΨ$ξX:*!āx[竴I'H.tYJ?3bhiT}U$lH KͶcNKa/&1M6wlm}lit;ʅ@L*,edgY(),&4#hJ}%N)w?ݥS&1,гۨڢA%< }"E~Tшx*6-өDŽ2vb`zI<z0VC@<ߐAP͚(&5ywxS Digq@> "P}.Ucv@)'"UJۅm5a< !\Q,$L:i>'::V M'ڪv[~{" #"y]q/`Ή)dt)٨q)OeŸ:zRB1JJRUf<_|uWc-G}p~) v_oqwBg|96+eR:$Շ'B !^O μԿ' Rg&ӫEIxt3m0#c8&(6 (N#n(UǨ߇'6!|TU*C=u(u1gWZ,C`y[S1 -Jn 'yf+ ;Aw MPmt?a|ikL:,:@ko:ʳ[+͓$?Y,IEW* ½Y ~ [=lOr8t!P+0+ۧlH;kE h0دBKDIt NJW!5MH0.@0LvrPC9g8-=C#j0"?siFUG85@lX~5 ZuT \s:0gxa$ʤmR@ǞG=)h@!u1e?5G,B†9m4Je(#,DjTUnSgqHXA!/22 sA/qKi1~,ΙQ feiHHWbZc^qfuIjUq3@E֢YE,Po<"iM5ڜ~Hy% 3͢WE=[pK$nU)XO0Tn(}$^XO<QAe ? 8* ʓ0ju`q$R[b#Uk5[r:ߒBPe(6˸UKlH_S&`NV[nQDvRݖ03d 1/s&F PmD S&C8rg)a wWRGc.9, w1f[zu8kqH`eߺiVABogGluV:M m3 R@(VϺ )՞aבo>yQi =K,/p.apzԁH,7`8910$Jp13+3F~*) xnހOY3f!w͘!1f03/$LveP&.[{HjIb 笶q5Ha8: v(CA]21_g7 HmO{l"UkuBI) =KM"aTk$ 2Ww=~ b?V:֝~[,`E6"%UIQ&̂̇1)3P ;7ͽlF7yCx4텺FGfSS[^4' Β[*9 ܺ{Z5m]+l]EQTAmiSc9pGzTv?Ȕ jݵx k` \EP1glc׌<%2A GHQK L?P _k}4As%Ō5 v9.ܧaFTSG2Na KC`=14v^2iE*ć1vQ5|۬K^)CYaAq|w4\;p@Ϊf<N5VB] cU])c9jFHQ\+4OM͟Mslrzwb#x6_Va$p"?HY ap%EyenjAo0x+jR{o.+û JVw;}AH>J6͉*g&" cb22ʴ.봖3d =_;NaioX6'u%ϣdb.R(rJFn}ID)%3IOq]<9 )o\C=α:I +bҘR:joC1ϋIe~#}H-T;f4ܑ|=$koV`N):f !#+A乣C)AIfsS| ɎI9-'bmr%o] /,F~rT#1ȯ|V&UfHWH huHSE nm5_~7:!u8wURT((/"c&Ȭz{>˦f QrAfખ ~BZ kwn[|ΟJ+d*y$|kN6p}SRdſzٸŌmpxHyAm~i1G\.[MQ>nsM$գyK WgiNH)m`@(lg|JP}YpxXZ?k@I^72FZ 0)fCthCBe kEUe{KDl{LjJϪ{c1ؓt 4/…\-mdSɰ+Um I{%@]NNS9 8`49sp/,@Kp'[ZcthNîI-^Rm,͉*(D,uD)_~1N.y-p00kAM2`(f eV6@jsȚ42Zu0<[@52JES"j| m,;,&eزUضԌ:>=KהgD"4p ᩊ->Y_x9W[xmǷ)cz_C~.neR5/G+ 1BD ex>؝+oߨ5-uW8pmuyW~ P09 гKýzI.QN װeit@ѐv|`yCVF=S65Z{(0m/7=O ;k|qt_+ w+r=V"@J<겠:*Bؚ%{H8" *O yu qVG3ͮ1ifLh =tAToZ|4'ZZPu͝w)ٱF[>VubNZ$k &^jƘpJenMk`2o|}InO Kuʱ4:^LkbNyhdJ%`]׋_7F}"HjF1r%YS'rXߚm5D&3PG(ebW*Rv$D:g /:==!C=sʰ?-pُsף#,eǁTENVojLˌ4%DesIg3YTH%Yyfc%` lԑDz?G\P=|3hmnT%cPF0uHrq/_$G{ tҀR;yh. EX4;.ȇ/[jv8mQ5a|(́>55Y3xC#ܧd{k_\վP;EDRop~eԈ8W[X;;*ޗ,Vx0$-.LQލxkh$QԲ/}%_oVmyI}F zK-S`<57:_g %]YɧSr'S(ƱN7㭶ybEmzlCD'׺.bM2/ ڱ5tG-X*d#K^ZGgThO;HH=D=`L+ wmqi28Ak_RLhBz^d%J@, ~253T7h[1?iqg3{qi ˋ;߅d'Uo'U9jz6Ԇ}DZ;>(;ńAB@nro6z[F5 7D 7lO$m|bps<'QL'eKRU®U$@r.dOB D"ֳqLf!ubu-+yj]gfD#d|(O< X@1VXNY;O$t)3|}ci98FVwq!_聃ܱk @W$#GFh0dk! TzO umƾU۳X\T""rȔjk[239rfC]2^?L%9~H!:!TyhŇ̥5{xby5ګp|k%/vVP&B:xw!#/peϥ@u<⯐I=Ls9ˡCJ'zv2FK+-ڣ_)3-#F ńX[e]j.עw65S P$?k1Jk@;@\ƀՃUl=3ݨ7^Uq_cV,2q8v?8r%5#_o#X롥Ё~O+߹y8x ~a8֥^fjA0^3GǶ:*# $FK= ~AELzss!dVCgw5JRl@Uz(Q0 x#v%໩Y&#˼v[D.dnS5uSǫsdoq+ b!wp By27_m~2c`%G!fRNDZj"$dfȰ7jHPb%W.?wQ:R"yVVil( ul޻fW7T0ΛxF{@a=& `NQ='QFky3ëDd8lC%.GDEya\5`W+ucEH $U. 00YDP, /0,] Y-E@!xz; =Dz "|Q٣:vh31L?gg⭒Pn4S7*ߚFxupҲ5(Pæ!eڤ~Əd^ɸD0Powx^b H'Ӈ0&e0?σhi fdv=H˛?K$y WkQ:C̞4 Ѳ]H-uYIY{6 0 \[Is"TSf1br1]H򏼈GT_AOoA %em1c fBu7L֙ZswfPi!J| :ڊuQ:%`13T4zt!t ,|L7l'N1NėUFTٱx^[pcR3dR= ?Fó)#@Jo濼>x񴺼m!RǬj! B6 :fh%6_{M6%d/ʴ'7nFwQ:̒z=e-W̰zk})١GP]_,tW=,sJJj;~x'"Rg}C :7K¿?%}wٱRYpNd0DP,dЯ:| 6x?ki%.BTfcHHV/zRA?{uĒogx晵/E(7:őiCŜbd'\9.'ڭH2lP Y֍":M@~zq`Qu;UEeaEsyj`F{6X;֖ @d qyֈ$X])*5S]#3 ?t$.ƕ|I(VʣFa"HxbLY; !sD(U^G-5(hDh65ɘ5RNu?Iւ,V1ďʺc{)`Dް3^2Bc|nY摍HG[5DCfRqKQyȞ[7kEbQ8/E }H*o>Ks=#]@kVNQ~,]I (:Fe͹:rS3RE12SI1a7}\ (I,f]dܮa\! 9yb!=!Qa[5ޘNt7+E,L+*"5X}TL9}34/ウZV N n \ȧlUr|$iu3LhRצTg-puZnQ$d!".gd I5[9Ue\R8L6NY~Eפ#ƓAHi6~Յi#b`[R `q̿Mp&$K]|B) Fvq_qБ.foD&PȀlnD# >*s{~|bg#;`H8EbИDyV Y:DB4(aI.uyf̘| bSaNd|OirCDT fG43q;䖾4G+vyWڂ%d]; Q*3k]RJB" H@DG܉X>\.hC$j;EY J݊(gt!8ҭ=h2b-W.v*$H@@z,eN @\z.A"]>՛ Lz(Br|NEl*gO'lh$Г oq/O7+ޅC;H!:2qP.WLw|'guuZ`,QR9G!;OYZky؊뉰ƈ +N D/9zv-R xKJꅯKo ?ܪ6й%l ~J{5zVt)#sFSG܀WFm$^T,mQb;(`C#6qx T U[-=ECnAUfOD,̈OZi>&*tgv]kUzԶqyۑ;\THBhOpRU @IWh.<2)KF >yVI "s[ 8dA3.AJ_ L7k> \k yaB9%t*\a"\&dm)YZr@C#y49c4dcPk6޿t$⦝]0ƣXg3C`k}\fBhr%jvqow!V}Akغ"1}ǐ3WwƂxTB_ >~^B={x(vޚX;B}zҖn :)ȏnٍ)1oV4 ]Y+B23o*M"+i |=}5N J)10htО4)raltH ?RЫǁ[$lNC-XD:1\uпb8Iid ܗE RjgW\ ~oitA]~K^#m\ՠwWY<MjW,23}>;2ϽhT\_om2lN($b lΟ>46=Kh.MvNޔ?;u>\A+{5]y4 !Q_ Oζ AGfHKSgVG( .ADmdeu;׮|Oz]SЬcl&~gl4t!Ak7?]R< 76\j@/9u#N) paʁd!dI@4}#񭡿|eQîcep)Y0.@Xzb)y_{/})1isHvѷxս<;0D0"DLfS_x1~*2nyZ.'ϙOV<vYl:}QI3ݘ=v%uýeNalab)PJ9J ;e\SW*r)RWtF/LE9[mBQ<11;?b< ًbV>./5h&8]oʇP0/{~oi) 4T"_̒-ϐUt юh].|8î "9B}RdTt .O1 ?[`'j~M"kdyJQv 2fcs? 6X\/Ƴ%3a,?=g =' =Wi#@.`}׶O9:_QkRw%<2;Od[䣹x3=?yWnGH\`@ɛm5c$❣v-µOV,ĹOa(u bNO,&c݉Dǽ ^U$0N792mMmuޛJ3Hi'y. nhnOx֭7ͶFFBGf P+ AJ"PDi}bP&_#wF2d߼@_!xtiiwI7,yP6~^GhWR(% ;N| EzAQ226ӡHL[|WE =o1Y>u X\,fka:,+uvІi.qjS~hڀGWzOj λ$-exTҩg/HQ_5v=CR72o 7vX[xi򎿹vO?s v/Pkmu 'Gĥ;w8Yȿk3`[&FY,UaN$Ǖ &29,`@p.U塃o[*@=_>򗺄ws ]jCH#u"i#x=q*}IKgw;m-Ɂʘ{iX?־6~p2`k|"^MD[rmMG):3$ꈦQjcS_uSB˞JdFx/ 3S q7OS)$ Cߣ -otKTnaҏӹc͚EB. -˿on˪2B<`;eE?*€y}+@:!-E)0WG\T>%O6smƇexqD60Đlcxd6bj%, MXaP+X͋Iغ<λ_hLACPFc O >r@8}B{H\D/x!&ɓ_߁6ɗZokBpf!v9v޾h+zѹ; 7 }F2h"օ)k\jo ?"W=X$c$ ӛ!^µ0_F/ E^ uwu@^F9+s_I" LgؚΚ9 Np1oud<=PZpLˎ' z7i)g*} !~|x%g Mͯ7yL @Ә[3c.ö!g'3Y*lŦ0Ĥd"P7Zj$ j~mDlFv4F[ &-Vw44a# RuŁ8>~l/qҁi9ڱ<4,F:*'wS~>3kT_!M6A$Y wZT㱸)$r}U =«1'|{|fi#u?].h WiBߗFJ]?u<,,.6ih:YΈuy߻ak)pw\q7}'!lu.hlYږ L8~eX\PZYjҿ}\9IoF=eJ;GT'Y#DuE$yߚrUrbEFd5ԉȈAg1q%Hb7S{9?ɜ3@kH,ok`F1Vs1!dۺkh5i<LOoMS@~4(L2$)E N:ɯfĐ>Rf ^HB QvdWJ{sZHګ;8q7UҀT*je9fedȟTUuٳނ9f @.;Jo` RyƂU-;*C9_%G<(g?)Ug ԖQqy@DDX.$#a=xEih鶝\tbgD Wg7m`/Df<)xB[DcwA̖h=t#o!4 ~ 4i~i@FcAq|@Ͷ:eS뛎 wl|89aPwXP.d+)^mz2#w#n+dn,zfcVޤ\!7jP=7p㚏o;̿f#$kdz6+B, +#j4Gî7/} )B:PHJ2TU_ aHι߲C2G!3.}p!)K &F!qW5v3U֕T’W oj=wolGQxd{Z*y1n}nYngw^[8~8X`j 42߰Qxjq.( 24"nu|}i'O| `@ g2k#ԝ|@V[ VI<  x`8Ajq\ހI}VߏУш(\;U ("D_{t_^53۰^ԙCY}H(ԥ%2.!PnMS_PГSrN WPBUA#Ev[Nj,3Kit}F"Ix;,1tLbƨ`1#~BxjNXef}scq{{Z۫f_h]%D+.XpH7-.3m卾*@]$5MSqppyTRhIv ¢{N7BqlB[h0G.[9:k*{^e n\e& zxSTct64Dt2802IQS[D yaFrхB;yHR7JpV/oBWrEE-8l%0ho;аfx+4+ 2 ajDzBx^RcxqC6 z(jA bOfKY *lRϪA\]ϣsXp [00LTu2 LmhMCRj!nkz>mSAlH:rw0Z1)54 ibP3Nq=:M->߀i{v#?^?g(^K2N?8$< jOR@+𱱟Onsv ,۴JK} mk[t,~p*u: ׼m.dYf2q?d'}L)*>pT8/ʏ{&z$vf 6Ιއ &U6|f:iXel~hx>@R!JG&H!jÏ(_)0* ]d-̒ful  Sy"x`(;긝"܊D4"j 1oKw xPB:Wn(s 8p=sp+YІ.Ba\ 7,arl8Wh GigBP@5 Y9 a[DEݶF}4bI1*#B`aG+;2lU:Qw)5Qk^0Dwt]yج^+?Buy{>9虼ڥf]w 4sJ^ o`|7 |$Zb >[_]eW-puHb`0 ⃰ ~cOXROf.`\a W xV.p~qRPq=`㛞)-@'NS}OMFP=ZAQ v(]*LG?4CO5:0^8"TBQOcRAOi3ڄ IեW%XgUJȸM0\'PWe]Pn 9,]Ȝz`Y?07hF0[ԏ{Ƿ@G AZ4V߿cuAyoۉo>!FZv6Pg+Lv hN ĸ*c Isb~¶]q X҆@ P+ᎊdΉe-NkV.A{@Z;󜬓l>8vLsr>OV| tڳxt=@Q ZJ~٪[*p\|Л΂=Ma(_O y;[@/03|H?nD*2Md!g'~o fm!L9zm%b Xz-'ܛsW<3A՝\{x@^M  *s`,?w"H6NM% /; EK.Elq#)!$:\LNH?U6'K" Z}`y/Zʫ:–{o]5j9`Ϻ/U)[OYv<44x:'Rs/1_U(D u{B}bS`nr\g]d/7g`]` l7[R4 :?u7_,-˃ (\'UuE〼wDY* ?QG~>qQ`NôiD==@ug!UDa՛i/Ngsy_k~ H} c츔X}$CIIG pɎQ{hg2W7 N_x(R0*a%' -j5ona;}+l9,0#>~I".d冂Sbg$}_=(y(&R)սʵ1rD+@~S2r.T׶GW:m=L$-CƏEG)șpEWs3Y E689Oxص۹9t6RPpHx¨(Pww=>$Fo1Y_q `R@ZIVuր/! b0 2a" fցfI=;EiÍTb-: {nt˵ jW궃@LWp}8 PC2C?)|~ ] `"c}pa{٦'7] IS(` cVG זYe&2Ly ~Ȏݩ%\1L`Շ!nhkM#.SE Ʈӛi r}1ͨwV)>|PO:!6Eaje^phY7@i~!~9q#?zac$r*ɰ*?wE95%酖u 'SPuf/z^]!W3U6<)8( պ(?=A^y>"L)iJjD]n3:΍6A<`U}a ;&@m_k2L#߳@OhL,xYBMmA'HQVrs:J%A11_5[bnzp !׶<$Ci*`F;:ٞކ薭C73h !:3 !߳ߔJ%c\!7؅|Y"A_%'V ۮ5VKIŒ4jk|,)`_ }VAdMy7R-+N2j`/lR& ߐa֥X4TsomP۔9agS2\Eti 0=BP`u+ȃXwPx3&@k1^dᚸ᭽n;lB'N)ly;0  ކz*%F,N`}(XW,߸aKZxxDC¦DX%QHq`nD‰k )g^rhօi`/cGeZ7c$ɉߥ4[-Uc:Ҹ:p:.2Cc ,㚴2p-3v@J1lI}}uBWTpl5xm#w.A!7)nA(3Ļ;XalIA@ !}`g;q^ sRk&553QIKjY}֥'ӣHJ=tettR4X.o(? }/~ϪH7 Ű|?Ys߱Jz\Ԯ/'LGMQ8~8-|n49x^zg+YL6}_?ƢA*T/ X˿iKB<'JKН7[[yF( >3?{="?p; &] ZÚc(Z 4w0m\m@*/dcYSoor26SQvHc}2/Tu^<WO+yOa"L@m{1EDSh7`-0y 4 F]B&WT㠢gYe9h%CD]z?7C@t=!Jy O^qϠވn:t4:e3η@mSylZ.T Aa?ZQ%Rcag+{~7CMƃQdFw3d4/ # cky ]7I(HZ6nT=M|6^"%_ɞBkYTF%N_j4ֶX SyP{j6Wrk"@ژQ9Ewu!cTm @**.+)sʋ .&pZ]1G't#x;Z%8Y>P; =KZeT`-;/+u{l  ? yS]*F#T#hT(?= ;/nb=PB.ⴤS:*d#x&H!p:4AtN_EMSEi}q+ ftl =;5ɔ <H0A{p+]ng9nUDŽRW0(o =1zLH2C(gg+=4D?U~ V^g{rΚzA8t@B62H} ]Qi))@o9-e_-\C; f~L!ïWnMQVbvLB/Xel3SUF)Ʊ`\Q_6.#t#Rw5Ok[`>;b:sIMɦKn`{* % Jw#7BlTo`6s>MW{X n9_ Js$ŜWR^ iHZ+|1}e2jB44Շ g5ײ9G[X["3h L[![ ޛ "7mɢmlZ\8&?D!^,dնp8e+-ߗKɵo؟ՒYL{Ս}pѹB"DڙV=tg;:I{$T̓5}7 *cp: Ƀ (5dg;AI !qZ}'o^,Yru5I\y۾?xЅ (& pB& b4yxK~޶_~KV,﮶ E;ꖬM## 68گ3_"O;\BH<7y0e*J9NçkKznrF!ڇ)0@@ᅵ~-%ݍ )h,VS C~]ɤI@ԯl-|L!-Zh▃I:'Ph#G%جBhTE p$}ʀ2ı):^ԶvҨJC50TMPo z6;gtVez!Ʈ6 XrR$\wS9Hd  ېR^BتN/YƒGn τ\%t·hڤDZK(B'V'o ^csfՕSs.Q+r4C*^Zs9KvLq;:5Qr!*T}pngqV+}NiTX8aKĿەhZ;^ PG0A/ʯu$QFaJЫ>NÈ` j }&)pY],DOZ:U !!)Bx>bGww631dQǪ*|ʟTjLr=s_tr9Hyp>"jݔsWnɋ .3M|x#+$GW@s_곀, Rp[2V@suҲRwl⁲JMsHg2UUg|6za+mA)ftRE3?Ӂ9{v = ؀>jqC@,yv`aI]qo4 .ٴK4>lE 8LFe^Tc6k zE'䫗:-`bfJ.N+=tQgfV-E\oF;/"W,9j."֎N0Z5wGDlI.ˋtbd,AHh=u)"TEw399 zn=_Bt4j,t&pc,O3亖GJT#SbwQpÓ6%(v .D 8&(pyFM/4ֵՆIjE#{M G+ZNJMm@yֶ%! 5B&o963F^7ۿ~zVw|."|[(k>]\Fީ0g-_#%%zftvŽXvF1Ɍ\"ӌJJ۠P'cgdn’" _VL rDpr,w[TE,Z^"P j*Ng^' Q29糓bbcCGM||!v2<&Yeo f:K6Q$RUYֲk[F\1YD N=8.< Hi1~~#GY"[}sem5߂X3-`'%Vumx \VǨQHwt]g) `tKS"9M&J 'uT#d uþ1Ї*0]np"*.taC{ !Zl& D>KFhKM0B7ľHűӮtZ!1ab%f6l$D3'zK"]s9 _g? J(m=8Zr~w³Դ.bʕp'.G-_\ 0D UR3MJ[p fJ˻iQ dP_i fm cӠ.S h-8|Q/y8DIQ*WnbF0M@XMi"{_H?8d<;~+YGEn2b`'z^1<.me֊ZF2߾^yw*/?Oo4Iz1&~iċET8lɆEu o@g:we\$m.|*cxE f*36e ]R=G*#4k ةnV,/h${`S}#ڮ"DJLhDIrsSt}ZFD%ڣ n3R "N뾐v,}Q_:Y&"N`&U7Ӭ{(&sӉPN4"Dg0+ޏz|yUW\H,nx䢊$'zG0CaaX52us=z/9Fl 3<5\X't+'O5d+w"A 4jcԡҚ`Ѕ,RKs^mi 1ؙqr{x=Zu?eAc V;$ q0-7U뵨?8eG# ~Zd5-!&ޔ2>}] 76{ w$(0p ?esbD}9үĬ }xZ=rl{lX\o"b'̉M21"$X2bӻzXDh9]iu~#.+}R Vm]@Z@@T,-`r%iG3鏑&f ;/,X:b8+7|6{` }9#-5[?n|YD&giPo|5,ٷe]%&mb ~}-ϨES$ؿ+p涊әx/XCmg-]R[}-:72+E` C^>U|kyH TΓgjGu9,FP' l >b=1dc1rW OOrR2,8q&ZxzG>xoK!CQ gA ~Yi;52:lh=tl WQvbQ ;j]P5m Sݺ!.! H:e!EGGxU6ujb>0K[US3cAzvKafTd/(bNUi g-g@枼ᆓ;Ԥ(U}]wÌɍ#g  žF99URD=fWs35R}L6_>YWta+wKGA?#N":N0O;m) aH[ӭO d$!D`t-ލϞBt ."}rAfץYX"f;rR8[SK\k9{}fd!|7}%ZVfv0UOy-EIsb-Xo^e 2n'|&` 0o.c=5.QK!S / MogQ;49akt%dޑ2ԟ \aʊ8+)ED8#sKEȍlmˬ?F s}5( /q2  IW>sȭTnB.ȶĹ*%FR7b"pЬҫ]Nnqn|qDH.`9Jf [9`ʤB,UXLo.dh.kQ.DYlcn+"bv9tXbo <'YӼX:LTѢ#g-ؐbTM}#B9^ōk}/.h*Fjcr`nFsgKU79[(If?{>(jBMZ]g]5|f3 ݬa[m? trRxV5 I萑ȟ)Is-wE`)'7(1n}I9iuxBT.*Rhñd΃g\G;x8õ}%Ms60vclZg]EvEB8K%zs$<3:*{J;}'%5dԛC6ebЯUbMzK2z!Jz)4|Ħx;\@U4' .,cV*amߜY;H4s<&iӅ ٙrvwf顿#lICQ$,WYjUE [F$ ;K29j2b_,ze_|ߍ. 7riXrJ':|I/gtG?I۾+2))T& *mG\+z &||%fH0 )O}9{BOݝILAz>պ>`@ޖuAU7i jthUdɢ`Fn#9VC5Nš;s44K U9͉n#M/S=+j>8q"_,RZ_C]+KoB0^zUzCTNmTeO}on:"/<~*|M8BI/{e~,SWV_$WfԠ {v[_ ޳`,^膸gwTL%_*c!~ K%_=(Ən94TbXIgL _zO}D6B{ۡ{yz Oi_nɩQftN;qäd37!o;,~plBX-SZ}LB*Nh.6j,Z6qE->M 9X'_9|*64Y&~6eסVϝo(Np#>2.Hs}ZnN,~ EC3V'mϨˎZ؛)ﻃ| AZꋺ<[(IAa6!b\OX>T.ZæK+|UDǶƩ Um~|yeug`mYv@n91RÙŹJ*%@;*jp〧wnpViDwD>7U}be Ҷs1q['z7AT2oN$3ao\n+vlh"F>!jK#UA#Tenh{HI&p%s=JW_9ALƽV1G@ƣ+ۇ$X&2nX+ 8@yCpܟJwK,EGxWDP{-`krld[5,Ȥ)6V77xG> &M܁8>mQ cbq/%K XF(R |j8Zqo:$$fpek_.s "zǴ߄vmN줹ó_4C9 uTB:r >1>ߜ>6 B'<;KAe[no҅LJ!;;FIQb‡D% Ot@ӲxEF,pbCoc-Hv"^Q*Yvo\|Sܺ(4N(͔CAӤ ƨ3JyPpKz#ḷm4}"tZՁBP93,ﮟYiahie%2l!V\  mNW^I0-x _ew0 lMϏU86o/ju9*uĹH.u+ vi5;mG9pלN~|.Өm7i!@#Q/˹)잾އQ= H&gf[ CcYp /KˮDOeI#^;i"[F;o@ zxė'eHB!ҳ kqV:ns,(O `V?(<AW7wo'g,ҥte &ع~leEʂڶR"%QQoƧwn&_+ص1%:uJjZ: y}v[~ۗ(3:K3]b(xkK0-%X1+)6M">wR·{ϔ;BAl- Sn Ms ;ͅPɯ߇#ʽT~j(MDH/ub^J4zjD~c;X.$/"š5]3=EKNH"=ŀU[6;kn#Xa1"}LȂnl=aR=SX5a|w1m^3}UGPdǫoAhhzR5_o]4$H`{F@VV<E螮슠"{_赐տ{42%]]'nf9;~"8*)JnNPGM/^`ҸbU6D Mʢ[ *@֨q_*iH1dH< aӆa0AQ$"59'؋.QDSߵQ$PŨ]_-LYЧB/KYlyp(?+іOt}NNtTL7) Ì,#ЗBsUDvu1wJθ,T'j<ۃ4y{e`Uc*OBNU]]y(~LUU $bW[*7lc89r%hrk͞u| #+)H+j\= S~n PZ֏l={k#+%*Q+sPwW\ڽMkxF!mh[7g}?_J$Yhۄ">f*"CRFNa~#u?y$gS S_! WoQ1V(!g 2 01r}`Pܯ$-,(ѽuM$(VR}3s#LLl2[{U Rn,EC?̸/(O2<° 琓B8;TpM4q+1Vx:1!|ѣZ=krV?erq FPH)vr[5=ߟ؞; ݆EYQ  3"5׸;Px}NKa4FVq(3$+'x:=&[cK'NQ?e}5<6`]tE)PTwF) ۸*CH !dﺌ K:ޚ^^݅lzl,NqMz?-X>Y iƃlBMʐ{M*+h"]_V]'*7Yգk%JnOTkj/-7 P7Rv0a3k~} ,8`bU#B_8P̯үW3½˽3б*jqlajKVp~1p}ds  wdS7J@hrPt/aٙ[Ɍk#=˕^͔>%Ǎ Yhcѹ|YzGWjwd|2_v%Lg/\ . _P iCͮƯ=PhGDZrp|֘fU7P}=x\nt〓ظmվ]Z{!gM $i=|$eZ?ܢd:\+7dK~'AJlrsE\.-Oh(ot }ov5EL#Wg$zyD1,:FAtF P\ hrP-W ͓40c->52P#]qB<1%_&ig7?AaӽUBZ$<-z<-RGP[5|$U&neVñ:R'Ż'4tzzՌ |o5`.|ɞ2-HiMyPn4ع6xT#E1:LJ>KZ>'h-)jxX { CG5,ȧ!`dwE/mXrxYMQ`9Ѷ:ɋ+T0bњ*,ҩa]suH +~ݕAYKx<ˮ)ېM8xr^|wԀYsATמ?DdHhZ쫎>LvZ`O9tt{ς=(E -PۈՌA^,ʈr%S_#':Yxt8n0JCF.!䙯c5!]?\t)qy2 հtTo#lCѠrIlw}buq ptYl=0NiCKJI8/ˏ)RM}c Ȼlw = 6:ŝ%D hs맂|wJ=gxi ԆS hdP6;w?6AkvײuL-c5S(n ǘiNIlJaz?ocZNS7űl~_a/W"u>9B1yضy+=b 0 !`h=Z4ߢ$BR983Ig!x_&H0 V6!LJCxeƎY_8_Ix!vF#|e7/[q#%̫eMa·n|t҅ȼDk =?ܧ7փLF|Yf@l1jz 7hI?o17V e"͂o^s5,ڸtnچ'S7c4 ”J67챮v-sQ&+٥dRO1 ]~Q%P.j]fXAw}WӠd ,&:e-b_e%OT7n9 OR[4:Z Z~@DDT6-0$2WjS9 &D~"JXթ98\*"[Lo"2's j /Zqt{[qi6d;`wCC)tha2eLڛF$^GAYRȌg/kx{.mb2LbN(LvNMD˭ǝgWKqW ͯ~뼵&w* )N?N^<Փid@f$0U{R c[m"Z Db1nGGyڥۯAc`.Y TǞ 8v1#K=A@ d~5an#DSO(&8ؽSl8O!:ywyQgt ^n&;;O7"uQ ^\i<ݫTcj? >R 2Qŧ_}Uƃ*8:U W%O?Dde6 Xqv z!ZgQDK}1 >?ίF=GAi`KX|;ѾdT;u^k&W?BψܱFlVuB|8qP4P0yp5IOTAPE3}X/[챰R+/W+Sg_|R2lc9 RrCб$ZJO{ "s,4h3<<ӆ3}+tmD[{r_?,ALH%\>^crY-}:RLI|iǵt5(iaC+ߍr/ifxʆn=,h5T>IR>a0O5ᕻ_]ތ TA3MxKk^YXI)P ^ u/ ˽ ^A~oܻtvY[Oa7s.*9 `F*k^~E7חSSgP>ۡԌ:q{pƿ*8zAyRPa DXG0t!Fj1בV wpX_t;R] ѯB^[a"ⲇqhR*ȡ͛EB :9ujhr(!L9kqNrqsp-hx+w oGqRv?IiK}X_;M_\߶(q9m+aTa^VlYyyW!g$OԌjm@ wIMajzى_ۂxjE[AE`HNfԒ!)ZUqtdIӍk#V^.ZdILx /AEw.cVpG?Ed챹]؂}9FKlU+nn}1-a8v7pX'(qy?gŒgBVh~*x%Nlgc'ʤVBbI|PFrՂ%H/*$aM8wA8 e;ɊL ܎)[1᧖!h:if|{f\C D_`1"{Ab )99$DSG"mG}x$0cmM`~Lq(0qtc pIbhN`Ta*.=Sƌ֥l9=Qܧwdc{ォ]CfC/|hOݧaؠQr?G E[ZA\;|E#C$ J}ԇLz&r"5 41r K/zOV2"HWm'W.YTx-"!~=I7J9L/4}q4{'3Z*ˬMc9uQOжCDDpy$7w ʬCWVd] þҠL64Q,J [ 9^9j0hő׋/.>11IBvn0if N̛G:>>'R8oa+Ru?z u&aV9̆aACb`KIpzޙyx[uL[OXiC!򍫲2cޘ'Гful9+ܢXc䪣^pS4 s~a`4U 4Bk;5s!I{l3Q+Ĩ&1LO1^BA:aB4giB۩ߖx]M`]0Nz"||v{ ^ ]Va:JDR]o٭e8z$Ʉ~= [vt[modZ߰n{#.A uv  "_B@)l`a:ǿ/ 6B nK1 uX fmןl|B?4ݿZi`W.!I'`,!:' &Qk>IUM LW̿C 9w;GjuQt?PV꼹AS?谈(=`)cB9|)~so$e:qX}'qcӁ dBLOe^mp[-% +m p  pD{,`uo2V?4TmbM}'y:ݭ6@KH$PsD(h:[-<.[FMk?p4snq>֠wMD rZ8ò+1IuԘqo/-@dYT&]}Na4-.R{Q%l7V:D8T#(Gz/VF1m" N^ʆ%\TGgHmSd x0jy$-vAYKpm5K%6N]i&&WDq+ˣ(>Q jEoɻ7B=GA$DoMͰh<z sT)9THNIfx Bs0`탳hU|8:#=0AmlxPLr'?[Ee9F ŝrnW҉J*+gA'qðּ`I~_>?ڝ'K6ǻ N0Z:+sN; ]P;oF[E6Jj! ѷ"RtI1!<ay<]Q^-U*҈_l&Ѭz"_ wZ\pD ٧Cx# 8JWi Ibm46dhS6| T)S9J5K0WlJ5 c@^~Nk/Bq=H5deG4>w%[MG*ΌJps|8m]z0vu#ǓMU9f %w}}QyKϲ{(FvZw;fA`%*f{C\zȅ ^i3q CJF'bm<{%F[GU0vZ1]N#E^i飓LΚ:DVWhRgch9U !xafny4ڛVbԡ\ң<H垾`@n>Ydl˪CK> PJ moACSmSG yXsv$4~k\PQįB+ocM^žFO/6J4Ap|߮d%D$Rf-<52е [bhxd * n6 QEK`R$bQB9ַ7GmHBy`ḠgG^5<1'٫+Oϐg\k(\J쉦@Ca(|&#y&Η4`fE.ɕɪض|9b1庖"eϛ7ny?6^Os:l')l˩ju`_0K'÷K$RΎ>|A@Lnu(:^/䌚%ݎ#I,w\xL-Uap ,Fy(Fpłڭd7HGJnRKS^ԜEmE`Wz3T:6?ݻNyL~N%-t}g=E9ӳ~Q/ UFyLc0.4;S%@\;$Q(M}1ЮS+t?* \l7wb w}Z*I,>c|x^vqke|[݋>dZiy[ @\#Ud~>ԎҳY ХР@oeU4,0K>&d&'O}st2ڋE 1#SUvd彙G/Js"L<]-58Ni#^1$s]N+rU;YknDծ ?kxQzM$.ƆZ֟Zq||.5=ѺQ9賶=?>KD<>hGk[Vf;qRvSv6|D{_3-  $eNӃ[-sk[;.7gkO|`?tj({ |k}$gl!SES*NQX;o^6?p= /W*.6 iI75()`M?h?J.kIC"̄(FO{.4L+c>2_ٳݢ< q]FCÄz#YWFYzK9(O?cV;t}J`|J"bͻ>K è45JQ\j^!5A~6HQ`hiؚ6O@WTQ@d;- 9D'Gp+;K BCv:Nf),-" CںFN (Zag|RRnm=)D!)L6a#ɦ{i1䨧 %=pRh\eX ^(۲\HPooAwzp }f6{%5aW/A8ۘ-?V>P1b-(Dy7@Ę=jtSqz #g;4_ l_6g-lt9y.al!AgB9@rc‹dSvҭ `_>o1ɬ E$g ]^ uKL8 &VV2E9^j]xfPwu;uO(?Zc΋?S704p@UF8*'slY[2lxRaO,YwV'#Zi]LͭdȲMI9JZ ̀fŌ7xav/7bv XMDZ5lhv.iHˡh32;/WfTwY EnQ^ `]@-{.Tc5*R~\VMoukȘreL^g% n.#d pֆ?kQk,/дK׭vw"iodS{mKa޴zDtӧۍ6Ekj<*ۤg$;Kj_$ElVOhedSB{R_5,l2>)~S䪃񻪵}`E հ@* {ad4vǗ. PJڼ8|(1(*|NЄ)@J1"'Tpr  Lok/ߓ[I^I:crqow]33Q{A$YihhZ80J9F*:ٺ_*Y>u&q~-&9?-mpg_X=iۍY֛O=Ws`âFQvͲNcɗwzb*rQc{z]}-gerveRP xڹ Rb!@U;,ݹs s) ys_ (GN'T7ğ-jnMɳz/ <Y;/ۚ7mNכuFqxoEX3rH6t_ `wF0B\*/\P/`` 9 ߣ0s;^=Bv]%odG ?Ρ?qV vv@3?ޠ߲'Mj;>ݵGG1;U[[=T'n/zz.g{UkOWu#F߶k9w6+SٻnCMj"tXgB&ɛ<|T?*aC%^YZ?.)ɏ9ͦz|D;2f)oW19 vKćYF /h(+P4_t À[Taù^3%&\'A&G9G&ab|y'A8WƭUl5XEbw з7M #>] 5A](] @`]]TeQZ`6oǠRD?fJaiAo?ńf;4D3Y2;\QΜOm(r{ *f4ԇQWkO0_l~؀=|OsOKJб*-dMd]L34X^ܹzdHmٙ֙N&TȺ>{4bVc܌}Lj+r^e&=Vsܥz_w5jxD+"z]/Y98|ű\ {%~Vz&Fp`x-yu@r.hk Mmz)b6ߺ&|z8\/. 0Z2ȧbpi"X6ƂK whSg\ώ n6U"';h)1 ޹3hzBsQh2d1ڙPkA] s)?5?vəmoUf] $BaJ̪8S. M\崋WDw!-ib_y KwGtN wp*^|BgCd?"eUYDz4t&UJeSt%ql)g5GZBG"ՄK`Jl[j hfz~ YwX O M{FlBv7s.4Џ-zUn]Qn@QdTr{";ͮb;gt[ў/v5{;[ JT⌱%Me '[LR6_ZtMF\ή<ϭ9Gh􈦤u45?>˜[ǥ@=tP B .b^ۗA5W~s?MszkI~-@]k¤KΫr5k9#_.$OF窂#[Ju@ςtX`a,L[&ozȏ:uIc=M! A 6~VPg;$<]R"#Y1KZT|  m5!HWD)ꟆK)ت.#}fc>Wy9$pRӠC BT99*U~%oy M(C֥M(txq]{}2<)&AC+ޮv:Cֿ?NTP@!Piՙ5ÔUpgo$(?*kU 5N=-,Λ)ijh=*ga̤ ^}8JҒvkYO Դh&_,0ܯⶠgMRPdBU~b0"I "щ $*([ז}bYlx?q.& [KXϧ -`kUɻMyCɊO.ghvbwmR#5뱳TSpβ[=xL{uz}<#qg>wث`Y 5HtN/ȠNu&J X<#j)}=4څ&<< QԌlk]\'tZ i :1bG?5k[M&v\glql>j6\J,_Tv}<ɚcOc~bG#>Zz7=sGiikӟ[87VO'ĜXMٴ ]w{@Yu5f8^롣m#ϧB 9; R\D;#R</ %_SÊJ59 (ZxKt]ޣ4;{Lr] 0LMvu_Ǒudb0$|FuNĐCiU0)ũhYzRL{or( Q2$ r})|~Y#&* $z*OR:|ŒzV`~x H'`qZ~F?ae|a+aY 8DP_7N =8MJ2x!yg\2bN}:jJG=#M_ŗZŪ6=L9VX-.-Gڱ[MLUq7;څp8*=ݯ+y]}ym.ϢӔ7Ya42OW=J'OծT :mWq[@Q ?.۷/=gao7:gpPw:<SCR?&fL ʡ ?XFqbXGmP˞b .ݶ O^I =(tΘN A 枻g_j-T7{Y P(eGXl,uѸO<=2v. 0l5 hg`{JZȰFOTX;M% #nŘuˌ _vD4[Q0u`t6Vjږ&nrؓip2Q{hqgg Էa u|`cO D T? bGR;99mW!HiR&qzZ|z|Sۺ3g+ i)4ۯ~Ƒ$Rx[K;ۧ-±8= $ >&`XڜRq]녊Q^zUGA@ѾIs=RܦRyal>_+܋\8 'HޛWF?Z25D=w7F1'٨+z!EG/Zx%vi0c8 [6瑱PEg?,_b4s+JlVᣓkV_uչ~X7oSIp8[N9P~kR[δDLf:;bO(e{w\!$W1,}#Ddoh_1]cj1&WL[:XɁ/7~5M*9~ G(/le'+*Z5?rcꮟ[z¸/l75&\<V Һ=Rkv7\]Gڥٷ6nj`|g?X5<=CCw/Ț_#k*{f,x&#AT%|`GiF#7\<Y0L QkaÆ79 ^ ۡ7a9 ,BQYHVB 5z(N\2b"hE`,)l\~!HwX|C0za'+:U_k՟pיt/0*?T#N7~"R.c>=wQRE¯UQdshgT g1vn!Fr=D8g,&؊P!SAȯ N }{n_!5P8_ʸq?"`=xUqQbޑ >0p#:‹D[sݒ[7~ 7j,{zVф0X-r YX/%ѧl)0׻kYde0'-O~M_5LM)ɷɸDaI}$O*W6{m3or} bPuvNB>h VD;hXlz3l-Ά'\)/3%X6$Ts.u^{ 3 _@. !y8k鹡'x?}v̨n`P(xB 8(aOVjxq&vר <նyW<6y? A@FCU @EWycgxL+BazHX!$A;䛭KE` nD)¯ uU:I__*bwJzy-97?tM PH(rL>i=ԧs(MǮdY"ݛHb7U·} Z^W>Ay5٢kh 􄱻s9t-pL{0o-(/ YJvf85Rמ_B.l:4YF&Vi䍐oJ_ 5o5y zIFE0Қ&ti\-י| NLt>-x00WAy9O:wN*Or\{˨`$_ф-K,߲ȻLU'A.(/mTAyz$`au9x@-}u' ~PiR+v^M74nOh<ı0ޱo A/`nA9z!//7W CtϘ''mQ*@'?SF4 QVxv/=~X·xrrG2 j$Ɉߌ}υ+vN/&+m# ۿ6dS;z]ffax<Y>kƣ27Yw&\⌭`9?~IHBv)6y~D3ێO_/9kl)Kg>UQCaelX>:׳d//&y{exi;X77`7|F1bM!*3g)o0Pѡ,:c7j*ЎÐ?(}>R +m"52~ 0=ztf$$۞gPsP-+)U^e`t(C0TP` /zH H9DP"#hz'>v|,̄.Kh;Ő/ny<˥! .ՋEJY4Ȝ3⭚m)$PD% YPm~"7ƍJrG Vӯ֌үC?͉|ʇg+ 1Rν\֟[ؽ Ƕ R~V;W3}*1g\+KZft9J;$mӜFjiPD3e^Dp]}H- 8F%0QOiJ6@]_zduPF{J@<($(/?˜wO|=ђڀxjTWր)XH&r?5xS\YL-;̷@zYWNJQ~`+q6!7ٱ'Yn`?hґ+ <wк"xO=?>ۯ!Y(CT5p9#), ?tInؗ t}H2O"D3naB rg/\<h(g@=)u_|(Ar ;xC;w3> =ڗ[EgΧuVt˗q&?z.}m3M"bmp:JkS'vqk^bXw9|G}8\:+7jJ}w4i7kU_;}/s%yPoOe#2{Yro A"0יY(5um0AO vH=ZP\.T6ol.'۴Bg?j (/7T4 ۈ"n" F/7xLvt FkF)C˷wfC}uqZZ2 {^ã' sG(/i]1T5}[s.K@qST/FNުW&Vn)r>EAp?dOfe· о7-qph*L7U\(e?KB( ӽx_ogn([C= 0 NwPp`y4d2Uxlýp~H- ,gR=>6A8{e+yNZ+A/@<U` @rb `fłG6zӁs U1FHg53ĺ>_$[ XY{8W֦W_NwWǂP.ź,ުnQ2 hvQwdfc27pk=mYLĖTT ?N~-[P̳z J|Y7}h*8<'@.>XfFh(nh&'1=rS4b.fo1nn,5\X}&>ҷHԸp85aO-X}G*PJVjNSNX@BDhNaO+X:։ ]%~ѪѫWyxߞ][׉aW(k߯S*SV~}"R4s``ApIƁMǮN -ۓe3BBl>ΐO2d>XӰ}vE֧ez%l)M~C7{8{罹NvJo04gPez&tٕ4„aiyN/^> [%3eKrrv-ߘ78l.@áiWS"ٟWW2S ˥HE[sKSeG>l)o DVî`o*T`̭xܾ* ݹ79\"=f4z5-֩5,S;;.Tyãf<'ElEMАTZVMt(V%?w٭ݨյsY(sDf&k%}渹 ZT^ Zݧ%0^Z*8=6OmxU?>FUf>*ߒt=yksY=D} ~E3KKKL ڶ}g[hx]n1]7|9d)Su '[BבMT"u`fc૓W;J= JV4~˟xq" Ϲ/~ywBoB/I5mN?EQZBDʿf;Iv87ȁB FroCL8ͯ(7Yd7/4\z[[Y2̝}+º-g+sY)z,qFRM ߎ,Z_`yWowˌhYg Z \/qOQ~*n-b3& J]nYl,(~Xis|?+𔩌`(~CϋxO ύnlFW j+qٔPz=.1a*Feɚ!%s ^9F%P巵ԋhHKW6:HyLnK7cKhx|‡~ڽ#~آRf,XUs :-i*2Wf|=Zul bgt i腠i[ʗq[Df5V_Gi^͇c ȓu$7UnX9.зwQZȷ&G43ƨ2(Ry6lp#W[k[m wjeSMGBQB),Lć_+' a2Lѯf9]F+{5To!)-i]_c/_ZGgD&ډsaCd'O>ӧUU1q)̵!ce2F]"㷡e [:s,Evwf=;{3/rt@`UͥRy,z_Y_~]vI~z}=O^ԽLF*@2 ;I/;ǣcǩ3۹A-HB'v!mcE+UܢΏݍ<,ٻ/'&K=͐gYfS\_2i*u,&| (X>> 2}mL'۝|yj^6AVsb%CaVl]2X*jP6YU V9e7\hi7Po:?EO^Y\ #`Gzbg&jD6OET<˥4jB:RoJNRnK#@k:/{!YQGoQLwACm{fK g&/P{#$Ys2Tn˾"{-5eʟ΍X$۹tj\A<ksºuk% ehϙjo+*^⎺ޢinHh/:me| yE\}ep|<\/g2`5'`- :LVy ~¡ W0/cZ_C-,EТ 6mLYsإy~4t΄n+L{ZV6zCC(l>̮*4MX5lӨoη* bXN:P󰋹D>tS_SD_6' p.}mlF,enZnq,4~VG(U&{Jgy5!(&u}O(f--qބɳ/<I֍K˳7C3 X/e2IOh֋ꍨCTZf}2Ѯ4_+꫌ʃ?l214gYeFxׇ2knYAm*cXa@ne^8^`X~hbXj-ᬣA7ɾZ{\,V}>6ZXUe tS(YX%xN׆_:B|NT]oئfªQSSA\|uTXUtGÜ, fx/RzO뱆7"[cWX7{/yyn[ܣYȟRAwԱRH Nrlz?ɳ6/dC[ !d|x 1o66V_%H󔫒^Xѝ֯Q1N2eSjGi4ujͭqo,><92.փ-tSYL3ʼe]LW9tvY-䦺ρxkyYY-l1#;_ 6Pm+aѨ4o^3ͣ4ZЬ#oTϥٔϨ,e_&x3~s*e`W;fz:ugYh2ʝEDQs\#Auvר L,2RP !q8_۲ҚLu6W6m߹VN<$DwAk\cMiش_Yybj4 X=ssXjfzX-Q\.Gw%~2;LW},J)T + yW,TwΑ'·w>#`=](óPG-xJ27[ LvCgJ%m$Hy8k$O`l )(nsԞיKfgPkZ/)MD86QWkTO>+H; mAG[}̸s}y&ԽhS<ԇf۱ymݗ㢯Ekʣ:a]kOc} !oTSzZ6kӓcq|*`'*M9c-s65,sg/U| k;KlfV͏)xwU^jU{(ѳ4ϼao)vSMa l[V6ZҖQSN|[f`dbfIRڅBô[7&\PMtrULsNk3C+:C,)-ei߼S&{f^E5wqw:`L*ցMp'[ޖ΅t;bo:x}Ϟ7#v.G~֍JgE"XW~wjvKDv)n-jTQu3I5) p|7O 8חsuv[ g*"pE v a8:/_URi_ n޽[Xs7r.sSc_KQFo.Fy?a9ZVbU>VJ(iAu^\3'ftuCRoNU6ey`Egߚkk%}VZO>7r]j϶on.fJC`*VuG+o&bwL{Hpl}IiJㅱXzJUuOvQ1|q'|Y]oνݟ.F֒c\nLO[Uܲ$1yDB[ʬ *о &^Vl8!T\Dz$@LDדsSon`)AB2o+=!ry>*Ar6{CO5ybN AfejjZ ;1e9)9AٙYVNaRNM0RͺYRR B4Q69f} @],W%Cp(O_> UPf׻x.!F?.O4JBppA뽡z^L/M\ %fk$2zklBYv/Z|TeiUuh4WxBK},n-ÐbbСMz+[*117wL5gilsn=n*CGL_γ&PC]QyN# YgVVHľIOtlE%cUu2MnVΛhXWMV@2W4z9uɳukNM[8Õ1-}Zy4| ĘSlTRmk2|aa79}ӱp3}^(o\5ts$;<ڋ)Ok/Y8_bYM}7s`x:{N7@[<ڗy/΄ˋi+~ٿzQM!~-H%)>VSvB-HS8IkOGN\n)QH"sSRh^X$; Gr._2t2K}FS2""UQ XumrC-ku*tN|_{="mv 2NK|^}2ū$sYQ;Β5\OɧYRUMO)4!Yf{1AVY=[6LppGﶽ«0+TV&AVRW+Sy8ݫI=2xg6Z74|APYM^ka#=:BwZgl}([o,I>i9-'O9=!LO?CV.CP5mD`j}Z ת5dTľGK!V 4 .h]Rk|a͓ްC&Jq}Un~n)mv>UD=_(_}f̙vveQWwsdrX{xոN;YӸoDq, _v_tin:܉VFQG1(㛮#peFpOh}|j߲JF1IMCbL;Ck %qh;C|&6Ͻ^¼EtQW#ڕ,i+'gewP*3Si3țww \a/sg3Uڞe:yӷ)YNtt ŸJ1fF/T;ӚTMbi9$Rs%UM6)h/aI!Ro|ǃ44P,i-i[曳/Yv~#y$l]ÎFJ;,0dJe@&m)n!Y]3V*2R |gA'c,_? P.fpX@],3FF@$+Җbfg j9\݃zs?R&T՛.A3]TIl(٦z*ew昕;|Q##Z'mO]JȲa0j 3ћ 5 &=n }*X,ïVEWkG̭ȓހC4ϥ8|?y%⛀5ڮMw#)% +n\2ZVm r'I%V@fBsW 4m0#'#dBz&^kn*׮AK] e*Ͻ)߫P\s0(ƷxKgT9{֌{ Qߩ<9Q^hT0iW^OiI0sJ.!yp%a[S۫P6K] KL}r&Ȫe+ȋQBXl=ZFiT[~2L:9=!҃t(ː2zj tGy4\23{9v^7+.W{GفaK~=>bVnh_e;Gtk=Q.6l[ۭm4Μ*xIA(\4Z{oF"' ʱ,8%%!Z.xz_H7|@KE $%Vsw}/)tqs!j\;|p 1ג^F!X AȖѿ"tTL%]E^$q]Q-R8$:[ewOFԦ7y˒Cc,mz؎dx6P(G0ߒ]|Nl>e̖xP7&t&#j,߁ zp.BX< XDIO*_뭨W^3pŅ\.ޤ4Aņ'!cZEeM}Sf3yLJސ1/NyRdGOl1 ?ZTp沥xhJibچ6XkڋLHY{ADo:)d!I&Bbh%btĕlQ_OBf^RI dPd?;K ^=eXZzKN22fB{nygR'~Z 7_2XkN?ˌh[sڙo'w"wӈRl~? \euD]2qㇾW S}L],K)>?z /UMBr@yPZ!=m}xJ1^$R>a}WeBhY]=Uqdj?x7><؛}njZA߲fQcuJR8-Ԩ{]8ʍcLfjs]{ uӗKzV 5vK9V2oO6m]#12jCf0a>>aw+CQ*}ƿkh[ZYwy)@Rl*eMwj,yN/ku.M@Rv]]-\T%2Wr:+D^oFVUI[\z aftvU!g&Sӿ-Vd>x*m`\&l{wW_6yx탴=s6eEB!p|Cl81RwYDV#Lƶ=HYA *N 7 >73N.;*$!c Ωu$>*_Na6?9. 59 fT gW4ȺYt2Uwme73dfz_P SoΎއqI`) !p t]UI19ej}s %RUwBf?s›VfQ/VO<4c>0;YS;j~*YcqD4-E^ ӢřMZ'!=SQ+2`1}j3eŻCz9.=;c4*yb|wޝixf.ط<S^ |K].yNjn Io[m@7oi=AFz+,|'O6o푛=?kCj1oU>iۧ&tE;r24-pw;I=x*D=_VwSj `֣VGgBQLnrr@ d'j?[`ցцAD x^ҰE޴vpqmSkWnMurY9tbvV^-my9ei[7dWtMF3uT!:8lWcM~L\]nDŽccspS}AdMlK{m7t#c/KImh=SkUW<\kޟjN5R8Y}:vC(j%OrvثQRq"\+Bލ5&/X̢ȉr6fJ#/`ӝjfq,ߞzճFHW,i`,h(Xdˎj31i4.%ᯠ%ݿA4e`8ξI+ 'w-DXx}SDŽ W2bײO`F bEj븆 7;0~g&6MC~(_(pFjV9](^㧍T)Ǡܢ1RmOiV8Sefzrպr9oH~ &Zo2-mONWS̕A:́RB6zϲBy]_+=:(D 3 e,5@- K 9h^3He0?t;%嚿O!ʣ@ƪΚr\I9 Gbwzx*!k vsa 0v{) wnM^m5TNz^]A(0VZ_)Z%TJ֩W^WelBzWg_5e?3_*ٗѩꙍϵsrL m% =BPQ:/l \<Տ#O~b+DKxN J$yqg%[٬n^}Chd^Շ]%AW ܺ(a..2:KHTqku!>n}-ނfz7n.y[PNMC)ߙ+UItsTJdE4qQt黇vMpT#.\Kd]ǵ;[Qo%l㜹 5F /FT '/W+_;'cb0L/Id)dJߠ  !fsuަ jޜwQ')'`qK6QIi7Yd 4u^A]TIT0xl So\3SO'GC6ڮV8e/uߤ~ߦtt(\,Βn߱(BiXH&y']Z[̞:ŃWcUi;a*e-gAL.~ :p }+ NËkaPI~N'ݹ`<7T95a3[h*IjS!;)UY[m\wnT1Vie*olwHrk2юr7BZϣ%:uwzM6,P}9Fc]1=n)\+;?/6Ȩبk[&k?:j3iCu;z\#T3=Mk7 r${o^ԱH%aJ'oƻ.*eScܳg<)C(Va/u,դ!b05.ljF TOrp /AYѕlמ+( Fc ̯1f8:3 4oyL(c'[g5k'F=.֙H/нs6{=ӹvXXǙA`oj-Lވh5{5\n.zo\~wg14 WyI8*RJ%yr7ɍ#/2LNL|;׷lsXJYKc;znvG:7_d/sW^=Uv4WM%X϶>]-V~;kpnQa5Ewb_=jB>#aeL{+.tx4k *g2|y2=#N ˏ:iShkMޙ7O?s_)(pΰ1=y90 tl/Ω[eGy=̛,]ZT5'_Q,BO6N۟5I$x)wi ":MdP8Rk!S5ۢ˩(xb¥?"vj<dH l\2?Azt'z6ZQILZf @Kg}+b;s[;)\k9!IѳN,̌l(ƈ Ыe-ǓQP|'fɢǞu0V;H`!<|rԢDP3U ºYnz;M .ӊ%rܦEgːRMԬ{ql;uzPOa6vtb:A+!$&h߃*}qsлi~ eL*EI3Sw7{C{g뵒{0w*piNgf wY{)GY'\0Rׂ*{ѮމӮc&Q0O?^ NsIK]:-QӄDQ}ӭߏ:͏C|l4UϛO7SI^r=U R 2wZ,pٕ+_%X݄GlP &t"WCѝj);ShJO}煭eE=_LnZ|aXokAk`p攫RBB#>F25LUn/z%Xpkg[us<|<|&|UU)lhau/Ԣy1rϭJdij_;zKIY3vj]Un\RwM_KWc%jx}в =Gaظ/lwFin0) N3wMu M̦˧`IQ,nyJ WwiWk=:XU\\Ah^{4e9SSˉgZƿ>Ne_cAN=>#Ӆ"4GM3PmF@/S[{I|i:yeyDRkjDqVp{8u$Af2YY+Od{pdW<~#LƊ D>`7[C "knpMgXt6r>-g#XSׇ5JK=yÂK>`L[ 7\.U[0k?򅐉hx)wn19bsdu o [&i7Bv:n+})5wKwt#/X[OUpr%{VwGX̣V-:LjK/秗ЙylE]A:2g nZQ;a:dHm`6Z#_:u^m8s[⥙%I_2SGJs7S2KT@Ŋ_+[.e+g|*F R($>`TuJ_D])ynTFa(Wߚ2 ^'Z w&IW*'omh!9djpc $x{Cݵg>peX\%sn~T[˘dR@ۙĜ݁Ԣ$r9L^Ish˵:Ղp /ԩN2Ģ9Aԭz?-+plLhtڋvZ+zXѕ(UfvОiN\`ϙoV9t[{d06$q)픨 lnXl: B=Np<5?g|_7z7 NӟmhEy{b+=6D߭?@bޚŃ`FQmF>2LrjQl/ڥFn~>'ojRѥlҡ+}ULtKM5a|@FaFBqx@`ED2i!sP?M<@um޵%M"`S9I Y;Rr=K;X|ƧԨq*~6bM {uh{zvE=nڸsdph(V"UWD_5„<[IcUu@w52 xj43wXjj=ԗ&VذfY5-`$BeR4Q9L%R>y՗XvS(\TtwYHuprRtF;ξ@]rdT^0[2W}WEqL0`Coqa,KW ƴ  Ps8mm5,{*0]MƮmdC^R&:лST2vzg ھM>d,zovO6~乓5j0\2%aܭ iƲ}հO:sgRj_<o3m~}IJ7L Q\FfOlYOhҲBu=xF9;R7*X#S2c-_{fM뉈L_Zj@skf)GEh=x^j*2{>Zpc;>ikZU͠5V{Xָھւ頉Rϳ|Sp 86Ʊ)6xo/ 6@ I0|:A߫Nl4Wy;EO}UO2Yăl>,81GUbA`tcX=_?h=|&TnH_Ob9Fix/!ÍE|<8 :x=o~4x?[  *B}dcU MZГgVU޽/h`HTN&Hg=ӻ_PN/$DiDsڰVj!fǟ#UBr2G~[8`0BL-D*'t(dLRqOMQfs[DT.ח[JD tFqG) J {d ?zoL0]"һ#/TWq~%h=+e_%L&d&6ZrmF3Ik+l &k>f -.S-8F8'H 6a=`ȡ&EyB67"d(!+,0cﵤa,sfX2J.z&+LZ=Cq>TNsq]<%LC7IO}5oUO(av91T϶AJ1&GՋ(orr6- y;ES/aO_ >jW62d $94=( !5XHne m]3&[P Ȝ00XhCKK*<2"m ,tbFjIer߲ND+eKM~p,Kϲ-WRA|Q{Jv,JAdl iޥBԳmIDvPC6-V̪T>Z`x0 ;OS/6mdGMYP&4hdjS:p"|0XL[SgY ߰wII7Z]`a8GHS̴Wwj჆2 䁰H/ʷc\vdgPM5슷IP PQ:P=VQyϙ<]ƯɞS]-B\+Ԁ{̷Bfb~ 52&ug=qe7DsQj^RF40̸{uQoeh\0QO 9Hc#:peSnզݴm ET+*alq,GP ˔L6Î WVPJv̮֫ +ݤ$"1hSzNݕ'рDN6L\NISr G[:NlSM2X,i 1< 5ļ0yF貣uٔ:jEX;F& Q3Cmm qpF8NS7QzCkZ[-jYdTZ5Td+MAg)?_Ur~1@ w$/p '>n(@dR H#}BB jMP6o7$ E20{ߤb*J:}W74m<npU6$挕8j9rQ7ޯ*kz#*{1lZ+)ܖr\?+}PY]e9e/MfhYqboմP̙8*'xDUoT`|M}D_S@U1֜?_N`|ꈹ͙Z~8]eb hTJA1ҚT\UĦ0;gAfe,lJ*77S:2s^7el,܌nA=i!( %fG<SaKZs+6yKOX5T!ˤ,3~f.EˁHN;`f;+m9UUc;o)U+[L4)@4Hd0cTxV/IB;CN ToYU ?ٍə8V EI$#@0ǞNaskVQ,+-Z1"Nqy~,E(2+s#Z7HVI+)8G"7 Sus2MФ/h+v5Xspn"[("~UTbbtfTs l?OKT*Ƣ:E?j?GCq ?7#u֋bZXϯ!U}u)ɩv wA%rswe|GfbyM읒f j1%1V{8l~LߣBQ!N^ d?)G/ U,)ad(0"4_N[Z9_h3U)>./yv==2[dعxJEkL7+~O1/^mv8Io7Qw0xHm/2A˼Oiٺ+La^”2I9M;Y,SAcP?ԃ%:nƨ.ȒY6ČPvCkf \7gnx.qw `x@. 3`87ٜ~w禭&;³pSI)z*!0a&dVW˪|׫-flzx6C{? }l.2;P.׌; T $` oTG"~&lMd˩6YG”7?+b V1Z|!MU c6W]"/G+ʅQh˼F}T #yVg= $/kc}U5 dDWtY/6GeDu&B@ ,8s%HOx8Do@q{+韐tʚȑ'BV;ub-Q6VmYl_ b}xa>ě ܘ+fŵP5siPdWj祻m#aumύE~>z,ר؟gu/YҊ4YE7yC *~橧-d(h-fIwY2t[팏F(Ewy<|xcn>_S{#!!=EU\eV)E 06c_Ii|J}m{&?BT(3#%,s:Ubh聴c-nAa*Ou%94!ӛZiZ{!NLt/fZ(QG3C|eOB:UOR휮ICX R(s޸@Ѷ:I B`K:կ{i5-5oeroЄ@:5Sn7e[%Kg5 h@FZNKVlqislmfJjUT`|rl:s/ֿxH;m 3-v}U L|*zp0UqhW,$5sʾYLLZba.W %nyT;$eՑ"3<-7@e=HF8soOEiU%uo4Gkh D G:l>]Jr ljW'gqеӸT4L5Բe˕li\SF4)Hթ,ӳiگ͍4;l|NՍrb,C3 pN3RoO XeΒZ0 sBNx,'@05ڬe礢8@/C0fz!&V=~.*}߷\< g], c1nHVשeQIn)(QF#QM#гfC1"uR̛`Z!odgeS^έgO ~7arV<!=_ yYZoۺ?N 35*yKm}u+SҺX .c 85[٥.7uV_OsRw?E2ձi~C +gJA{J R컥g5\EZ&jVofU;l[2G&f a;`[ T9h / 3f}ǟՑ)QZa!; h&y܉A5p?ɖi<,槗*k0*#¿ ʽg&.x|O_{SH\L)*#r)O d-J*oME[cSXT뒩PmjD=F 4zmEeϲi ,(Ϝ ]v*x}Uzҙz ?N_ȕ龧eeIAc& a2B=*,ͯ52t%L`.ǡZj%(\rA|9! bBSP2lĭxP>Gc7Z!pc96}^}[^݉2W/R`'~Qw#cu+ )}}SR9J7ȕi#IuVYOb5\jw=q9 o61Ƨ8(GUǨսD oR[#L۶ert3)PaILv)owghY_(4'u}{bNyg>&6ӻ6 #!z9mQWoX[UD_ Q"l/tneu:JS~NݩF !S:!Td>+̌m'djK>c `lOqHZaWoQ 0pqwEzǦ?&z1\šE< mf̛J(xGg63 Hwd!̰HѦsQVF.ݟ,~={7qY>UTݼk6y*EK$ •յ, bۥh`Zeg  E8 iLIvOZO7UߙjTT}c D ֭|eͧu32\c w^ڧ @d0Zw]Y*gh>{F`h$m/,ϯ9 FJ*^OV2捫t)Y`U<6JL851iV*'ex* 0)^mu0SGg ˇFP8fjVRxk6Dg뙥 .ͼeaj`S Q|LLU0}*l5kSZOQh|8UUhWW&񯜁:[{UpȈB:ߏ{9^f1?~MqYTOhV>Jȶ#",>eq.TOm!&>Z*|” 4 Skoa_[Ê^6z_O=\pWɞX{DeΙv̵P`;3,RFSB66P-ћᝣΉԞ|e{!} ]f hN@! ӭ:}iiɫ%aLʮ)8k/ x&]fЕ`>h`UL (i a?KjhN]ev\P:Mẖ[kP6WaVp{|q8b kseɈ `)Z`!$+rwTH+v`P"KgB*? |l+}X/6mR !Uz1<ęik/BheurԹ`e5BI+c!0e߃߭+? 2XΓ7& ׷a6A^ fcEJ@AHȢKdl@a`[KFfёaHp heeQ+dȝtF>^%œj!ʸ)RM$O+x*D%С.5 FHYoq[ x`#Tb`k5 =hIP(FW2N,:Jl E?A|v+-sQc`K=;.h[õ{JD{2![Npޭ ϠH ye_U:\Yfq(XMxTp+z ZG,CbN0zsg]<Ϟ'+26~wPΎt TydR:E63aUO̕k[5`[7Fv@cH`خxEtY^&J{ jgD/yy0[5(RAA>B([;C_1/9b2UbQ@m#*=D24Q!s tZ(<2-L3C* |k\É-eSFʲ*qZ5^31] `>m~G?AS8HlrW,Eΐg=-Nj:7ʼD!A\iPi5woGb~Y ԟuTy ZaTIyVyꕁMrcw2eLvCо̑SL9~ (3:@ޕ(OM xW͖1WWswS;lS|5*`g<qӝ^RS՞3ka7:ʬ惫L]a%fF0H Pf@V`9SLO-qo+lIJ>RrSIC8cy uo8T΅w7YS>,%;I GZzS,E Ȳ1~;l]("3MP TSqa8Y_AdqƶCd3 >Ȑ,eUbûOO}DOn+-nr vFmۆ-=N$ia_2eĴd'p`d\xz|vmvt3OCS$%WaO'Q3ڂY'p3޼bx i89<eP8j?( E _a]Yc6lﱆUA҂fBXUh3ӧ-$ 0~9d;k2ܜ¥mr^LSue߯?6^OtI+&Iqt|hio)ɒ+ kQj"Lse_sqvQQeH1F@U3 mY髏)H[C!zEڃ‚ޚ [Q,39VV%;9Ӯ)3ìIeI5%xM ɩ.=L,BdN|&U?TbmD-15[,b0*zjhJ=AGLgup,FcWM҂V_q3{aֻ,}z`(Xb]6^W| ю71?K*7+R )i>."ܻFǤr_YgnɿYW]lj|OHG:Ui 2Bah\ރ\֫#SxWDS3^Qd{C2Z~yb婲:kS|KM*CL.`p|F3%tgӓ~~'8̔A P y UwL?3-{LJ(͗j^uu 6.5GƊՅNg1ќ5hA}J5۶{7M>މDŨ#~皓q@<=,>e lHq}Tژdi%Nԍ=٨h]`Vyٺ5HBmV/r8Ws-W%BX`4r+Su˓2t.7-6<ٵ뎬& ULw~eMnT$ ;sK :5>Hl=l?Bh̆ʑݞM <(XkdTlV%uFVEӢΎ5W0P*Q`su (8šyF29r 0^}ʪ[ybB2+(aPjmi'dX9V|+Y_-9b2;+ y “#_=L\bɺf8Kul3 xL !ٳ<,VWMhH<8DWk󘲲}2CɌ4!5NY 5u3NTU'*S>s,eIɞlbRK.sPS݆ +֛^lNR^2i|6W~=8wq&N#s$5lB|d7meLO|-^ff e`=q]{l_ε~gYN7V,^n743p }"646|4g9;}msF. gLk@BQyeج@kv"/g_putO晣Й"gL-je=GFAWϹګ QޱGԼq`\ zlBN{٢;E&6z/QH'ij fwT9cTFZllҏK5糖DeeT)n}bb(M'\̪n |! *: ]- MiVP B)E&heU^}#S)Rs 𲷺_m^gЅ!at_{YIq"w|%U^#~ᨇ:Cb_; (|E+!Wu+{me;&>.=C;3u-[ǹN_w @g"Uk zŝk*kJ3k/Y^H{vIfelmɋ3A|(k}td ݵKY! rF?&Pkui@ Vh)ݨ}ab֜biLnڣ(E϶ Kz2юfgԩH/nz,hٞgfֿnYB;ID WCUt ?Oy^F  +i]z̈Nm{_nzڮPק^s{TYNx ݝ>Js'/70XaDH`%:屺|'M ,9$`O\= NDQӯV*ӎDdП'M^Bpke^r̦NUT&HW%a말R=\}(R4lMRe#vNizt¸GVn ӆLBeq5kFZ0HW*aUG&65ӣ,rlc*pl77uYP-[43%++|n}\z6 eeWieV8|:tȔmt@rZgLz\fS\Sarn[嬛~CT uO͍q2[>E"wz-EMLT.}Ϟ!?㸨4"yߛ+mg款=EtraΞ!mZŪ5UL-AٰخXסFR>l#mA欝cѐdqwӥ"Y_,ˢJ8R6[Pփ4T(TvLZiNfݙK: j{*`}xe1+*s3n]*@@#' doc' fjY4ZXX% |lfc&k.0@8=^ /M1*6DעQʸEҊbeMlx,Mug xFO~mz6sZAiԼZ(;AѕV,RTic*d'\ Ol:]7uє_d;OnتJX&U{&̉댴J8!AK6n5iU%$cP}}`*Ъ{#12̡=4KgQ8>e7 r돗*t_2(Q\_IOrEw=]:#T1ǞT~uݲБ̳S;$F r lR/xh`i1=u.y晴a}9\Eeo+7^YQ[9FCVHjG(a1C+)c(}4[Les(mI+E2tdeҡ *&< ٽaѻ#,U'':Tx,楴3P>fg-h1(Is d=A&k; GU+.4cvX/zkL-gਖ਼2)pmK!wڙ-<"hAВ{6W:a#*KTspc2H5-{#uٻU\NFZEƂ0e΢ ߊЊpRܱ跣7-L3c)=ᅕ{IF23a,`COEQB_z{CiE"1Cޢl{XT[nlJapJY&[ZJp/'fv&dSjJltm_Ỳv7kl R [/1ML蓑[7mstJuh7xrcNsMz&g&pXQOY]'RX$LwfJ=V|ZV3i ϝL/~_lª*h0;HrbFOH2#?_ֽ [b`~]sqU[\l8N8=޶Z"O [N&tP.p#>N:rZBh2N])wvE(CYXw 23&guUWʭ)O?ڥuSl *~L{ |7$Oi|6!)Siz\ 1YX_};ȼ<9nC=%# ]d`' gVr[~̔Z]J|Y/BAd-WQ-P̍^sYeiEVu3Q!t_k-* ࣶ/UdW QOBƙkf.Y\ϫ8O#fTj0MMI7[..)Y!_C=3̢yS7?-9O;NUudٽW bVvHUY*g3>Yx_y Nی; uT1;iDP1W>BW),ܳE?pX4=hhN [wZV#*.Py >zU%EC9x#0]#HuՏcx,upP (18DV 4f4v`>(&k,/fZ`xs:!gyKvFB1,2!&5E !rklΛ| KUԖDC\LPQ !Bۥ@Pˡ!zd;: )aIb E26S1ۤtKReF:iVvQнct{CI{ӋUO~xi͕ahy&k}M ,#<[o? @{*fz62,ST@]%H7ض2HS`'dc (\a\dQŞVLD@CD`z2G@ f jBÁp/˿|B)F,o#;ZI922FLuO&#τ B;'qmK"b!l+ @H3ۓ4o`;( &[SIBL6g3baMrB:#eGO:/=@vǁGp:ǬшY MYXTFיʕRdd$Nv9XE%Oha1M/6 Y{= ;y#43nqU,j6.6b)#"d;3L>Rw)Ad<0bVe B [+9!Q>D-7Zt"'/Sfc)?S!T@FFwf[$b뾃^oa|uy}uuJ2 ׉p 'ў>6J;cȯ4.<~y띶oLKpCDׇ8Κ#%ʌǺI }^ lwlu+{|lp'YAt (vO( GCJQ{x{-(1L&xuzsY=_~-QK}@6f4`p~[l;r1$ 6}:[?ӛZlaVh+9w*#"gUڍʭG:,ՖlYdtd X"YZ*="ğH@~yu0ymPhbQBG,CAAcj7μ5.1W1lq~!K9MSD+_g>!ZB[ABbo64 #W^ >M_Pag;sSP 0W2|CYո]*Gx)Ҍ&BQzx@颸zC1P+,w8ɅLrA0p_72w]էslLq~RЍܐ/2O$[n۪5koi׷d֓WYK“r\~^$ʢoRoh#qkEL#C /U\޴8I<2Ջ^EdP^(&5$r#W_jz{2%DvJIYQDTTVe1:#$pjt/^{/}_! Es">J(OE0s 2!JAhQ_&^Dd; 5XsM=>u6 !BLRxJ{-EʞJ'ڒbPR\ 8FI`Bjψh`ȽA-U2I Ř֩etXpu FV fߙ!q VϵIu @8['lJF%`2=!Ѱ|.d~pD`G^iL%SdIcO~.a q&,nzk]KVF\Q+ LI/l>+$=Mx0eW6vpx< ̏ VDP* yEyg8:O>m:^$;A"[CiVn)ܰWJE.pm#(7NPRK;4ElT]CZ3fX2;Ųִȭ=ti4P֤L#1VY-p})'mt0Z`w@}PjbHl.(c䗰I!%P7Pf2ޣWǰ~]"݅v)4YY,8 j,Vt{~.щYܐ&ЇfJ7vܾ9G;y8-t0P]_߰IRg<]JAUmJ9r\`(,@CfD|4U&ώB*UU%=<6 Zťnpd23"h Zt`[LjIR'D9U} J@;x:z (ML8J#"JВd?+lBBt촁e >EZ Ùr(bM54BQKA#L%tll:7-ZzW#ժcAdZ`akABLiC,K8Lc[V6@5V"-amxm𨚿(   m >FQpsSB߄֫[㹕IwAD-q1K+1[9GnEK 1\W:|Xeb.&T+:T%2ҪW(+dF#>Z2$kd2}eM|-g 5{t+c5Q/ |1C]!G'9l832f'k{ 4e?1cTܚPaޮl9?Ieh@ƢdIk̖7J6+IEm )WvYXf2,`r!\ƚlR,qncVf1B+3 b  @UWS>P o KBelP,O2<^B>BjpNW,g14QĹ$ɟ?``nSϠYRLeExKFDaj(J(mWT\c+,!gaQ :ʅv+mA-ZweQ?T)X+6/?JҼa6&d\F*FFXޗy[,g~հ.Tl\{6׬z'-Oce nbꆩqUԦ).'8[?epvؤͅ1J2*qPS˺HNw#ޚ  L`Gw78 S9ˌ3&u((f藮qjA'ʩ{)b|ږٝl$yWUQ%|d({~NGh4 vnwhu?Niݮ80^DM~HkA<aTcO}Q) yrCN@DJԑޅBRj͆g4T~VlҷJS~/5)5PpQXB,$b08zts#+MLs% xndlCJ(ZUZ*XHlӔb20@Y9Xލ"glIo^*lh9ti1$ C]H IJq;KxM΄NLᬸ O/v*ma)uVQ5[~ ˨`Qt3+60'Hlp-7,+'LK< ^¢@ r+fv3dRwB}gb'**\GGCķMT=ll\4e(<>\~(I`Q&@Yӷ5)W.SI2?p݉vrxq_"P^ԘqTH@)hᐕ[WY{ \d7k&|߬Q'_9AF8H@70V V&MRm7[N aATz+r*B ;@NW^o2쮨.QCfev),OeNVs"ݭuNQХŝs8>qǭ24km5K'JUO.2v/K{Eg-!北1x]Nc6KTfI{g:=-gFNW}:fVBh dz$۾=>0jDn(w:36Q+ɔXEPBZºȡJ>rf$,}UuԮCrv^ Li8p*lV%}YoŮ8 CVj|#݁_/1B b ,89\܉VڟiqU9N^jgYS۹狃5}Th(EO3e?^˞B$?ɡ1RwwͲ <ieyTkYWmR[cc?:s/TTv!j,t¿@V#g Ef=|m&/3m@ʲ05W!S᫧!TF$/Թ¢S!I͑\xuX/pPpdJ.60Ilr-'GYxzݻ?tlyLJD| @RjU'(tO7-E3B(1.}8j1ޅp^"'YLݡg\OZyfyo#g TeI^W hCKԨi~P7fldPY\ 1'rʛFgTJ(M8" ^.sT{wzxNvJaG>}AFn2- xGTxh:dTApem:,mE>:O"-zGaJ*aoT_7nHF&x@F~T.ףvِ_~Vr+J.o\iHo2pwN%GTR'>埣*/rųZ k!|5L'6.:Xf~ơI&Y{Jbhõ;Y>o͛e}SJŋ6=Ai/?`(Ez?d$h7a\O00Jdh)V)A@,cF)ކ9WQ 4*['+o3K?<Ъhh^1\Mi Tzb!u|.Ԃ0Wp^4M+nA/ jpNlb>zɥVx]yny:Rv!h̴FtF@RiJҨzaΝWIcT`2a ؚV̦9dKbKE.F6I;o*"im ~?] .gq͡}mk姁21TTdey_U Yg(=uVrq{(jVҨbnfg}+ZV_rSȭ`y#~deDǕCq,'Nߌ+LjhTXBe5oA`ʣs7Zk^vhyz`cHp)Z:M=x5n+lG iIg):ԭAVW)}u;Ĕl^ɊUsT%/ňhes^wmdc[ h7Wn N>WMTW=qNdM/ڞmU-4ZsK[YhJ2wCEw:Kp˵gdɣ u4GV23y[2,OVwJE K}M8 HbeH|̧2c ~݉2֚+܎`MߤRP$::cns%m;9rKVFTnΝ~V3ދ+;Rr|e"Lh0uZ8o+-F[ ^Ag53sш )zȵ$+mRs5ud*)(]ZXgU6:,ס#0 QDc E50'|R ɒT/( ~?Q\] A(El::`^`bG&| {8s^WռG-Z}s,F=gSUCSpzt0 kD'5Rב l&j7nn:ȸ9(SZRQ}`0(ɲ<:Y❭ˬݾ15s>}J,S.l}^5>ۥwcjO곢U;F12gM1>nYe_/U%ꝟ压l^b&o _4+A/E^/KQ@PI?[ĒoYR+>Bm~:_x9e9 ts ȱ5=('Vwl[ƷmLwnƞLkh:O)jvR}\EΏ)tjg!%ws˲`lwz~\ɓ(-_$ljľ4&Ke4KZyf1kcϬ`W+סH@vsNо!L, dYJ5Ų##\QAPLG-/EG͟GegZ  d[^I鍄co^8NR?VQ,f@R6X2ZLMwcveG$B^Ǿ[cƾxwV)ȿJEzb<3y,s&Υ=a1O m̻7d{Ks\sH ^$=NzXi"ص;w_^lkAy! *H^钂RZCp~wӃI ͦ-U%?4 ܁j6"0O+yEfFz+??]VD5sGߛSFp^Y. Hy1pxZyQXZr sg]QF"۰(!]UXBk2QIAu+̂b:4ƈhBkAh0*TեϵH6:/X.ΞUH֛^Py-jivXo%Su&]ksfewފ$]_'5Ah.{%|*EٙUsVVյ![br,0 x5DLH匽6t rP7ZyҿURh龕.kZzۦ#Ek3y̢56IKπ`~F^z6Kr /=^f ->^߃{bqz[w&R5yآ9`Z՝R-cjMh{}'bW ,Xt7Yn\.m7=6|2UE ge'V^f̸o򊬲R%nPUv8LkL:zyo:4:O4fׂ](]݋z^7]R%J&wS=n78Y۲9FEIBV VT΢ev䳖h~rLGi(VaeRk19"7wEʕ؉=wt4>ț pDlNYM66kPU9h)?a]8)k8/MY׌n %S䇯;7}B8=*{>sqGgqզ7Twɶ4dUku}6ȄN\HB2Gf|VW|VvDžߖ܄79P,6cQ'b램Z]\m0d13\Zsq_o>m9{;Jve)C>I^6cvhƵ! eudNLwT+ڡ^P|+ThMΖ5u}o{C]-M*Բ?ʗf-2R5Q_:uodHD0 C(|U6e7sr˔]od)~(P#*aw8x%Ql\{h>SAp#|=|B&{QY87ZUK\ ) ';@p=L¯9B<5ײжVSCs}J_,uɯcޅc%:mW63xX1B3E_ QH;A{Ujm}xIuܵ,UF82쬚=sT =ꇑ+|y 9 6^Q(UMgt}l>Ioy8 yR0?{-P,1W LDNЊaTS"=-JbcMrw`<5D`ްqf,\:9kVM*fVλsއ~+=m)b\-r~<õp,5ҝױ?E ij ,bZEÙ3dͰ~wN*5&ޖDApme'L| ?0a܆*&r 07w:TcL pfN?MͬԅS֐hOS;6߯}*iPQ`!e (L^x_ߜ3fSoo-/V![Ȅ˵*y:#"b}gӓuU;pt|]m_jOLR_;Vbd.l춎fq BiB6c_悠ڶ2l,sE 1Щ|v\[T A$C:f dȇ"y6". 'r Cl[~IudЦ,w½uޤtgΑ-dʷbVT 5PvKe##B[tdrq >e1~KO~p2nm._JW0fQ積ɭ^~!7 R#| fSS5.ܖ[yտŚg|l~LM*}\DNzנT(Ri vcN@εY`v.TM^}*  6lŎoZGuiPJn&NYuWVlRh)A/;\rʽeU;3gzumulO$7%{p Tg&l+2뛘q]8'xrC&_M抱lFj@n6qO#YPzOAlP_ LyB;XG ׽23ĺ cr&W4VFpI @!c0:Vr1vt_Q])`2 >J!i0ENV71Y5s^~ *JqB\#~í\9!Sgz?1AWIbvX>9 N@{s.U\ uAU 1t+⎐jLw~'} hEʜ)c&}Qо=/`\႓΄lꩇlҍzM1A zSs3YgR1.AK!XlymlzgMz1 :W5w!3LZshMGS_ sΛx|Ez˜Wu eJrbë Ͽ]Ek{r1.{#E\޳Վ޽F۽{݃fPKϿȐɃJ0[`x5= %Do4SL9y FZXQ,5KN}Hx.ъ9Jෑ }D]e(o#~Čj1pH >#PZp>fg&u4:+_liEbNX2&&al-ca2)TJkyog4rTUP~B 1VA;rVq[' 8@;C`mvXl$iˤS(n%Ǥlk#r[pn hZO*gmCך#i_X9>+7Ͽ*oҠܛ4|T7 7%Js?T{ŨR<]q_3;E̥1!,]7;c2خk?H{AI*jք`< :xlYlk˯vD2s{USsG=n MF;_uoV4Y봂|.#9^.O}V굊W֫q}35FPXC5v*MHQLk5f±<>o[=2,]ks{hT6M(g-72/ý WقQ,WA߫Vv'XeA=FD=3㻨'[TgcOMJz(:c_ٔJX\S6U*nS{u)MZl绊A8A.-/A 5n1ebL%wUߧVauP[pl\^9k~gV6@nX}?] ᔞ)Q;د˹4 ldti򡾴>oz=υ!׈ڡHݥ 1 sS.ڤ2pZ֡$8wOr`G}QQt-fۍ28vqX jXkKViEϝnfpZl9==eq?`y6{15y@/?$;_P.f2޻Z&c A1bzs̒є~^`ew#X32gw5 3 u0r@6GjBzl$ [4WhDl/+Ŕ9ۀ"}:/:aCQYUW0}?vT4dX CI`TR%(f85E[w6١42Rl\g oD79;cHZᱜҿz3J^.[ ?_Zӭ*ߦsE¿d$=61m0m6t{V\:)6JĭrQ >Ҁ:qo,%|zd0,V;#7t+fLf_m 4Vy:_pL uOPkӀ2l(4-0;[ Ot۶׸l٦Ҟٞ_+ׯ!8B #B}2!asB 9b7֣8?ya1ߨI|C8;pp ^4K0v &CSmco!q#F- }n 2oRB'mfsjRV_8?e!.83n56\ӫ ʻչޓTR=I ӷj6 c)xv<0]\S[1g*ek=N ?`K254Я9R!|<(1\?Z`<bN*lvSnNAbut'1aQ8|V^v˃YIY^YQ( V,ۂ˽̺&̽GԐkZ_8ezjraxp ~,WEqhJ1qa/k%22}HJE"Q6,ap@+dy}KT "or[^t:cŚ}]/Zaā ò%.qXss"*brpOT.jtMW$-;sy޴ iE :j#1 F@m65 @0g{g=ާg_#\Nm@C{xH#-dpK{Xz&106#fYFj"-9/H>-HcBv4iC.tOcخ e:Lip-3G#JhD®,  ٵ"8PK*ic>E31؈:!دOU~=!mD3b/_D*Ra x PU.?.p!gglmnet/data/MultinomialExample.rda0000644000176200001440000034446214046050560016764 0ustar liggesusersBZh91AY&SYyO:,y!wAlwWo[w{uZ[vݬk]ۥ=޽{[ءŮ*쵝WǮUzy[+ۧ קomlw;uۭW:X55:R<Wwkz.ݛ{`]tt[]ڪ۽㢴:1}8z_FjyO4&0iTB0`&0C@hh&&F4`*!Th L 2@ @ #dɄ&4&Ѧ *~IC*&  =2 MS42d24`bhɠɉCFCM2O 2hɣ&hDe6 dɓLALM20CL##&4idѣB)!@& hh4@4 D" @A@4h D@i  @"4A@h4h Mh @ 4hD@4@D" A4DxOoNhio ?BD$2#DM5E_@M"h)h&N4 (ʅ}h69c<+M Jj}g&X[Y?^ ޾A4n_bō񱢭+u H`K vHWFEP^Vӱx:O+ToJY'FXhoMc"c_f&5c/ * Dlӊn]2w60⁕x*S7p226)XaUoO:7RSJX)zQȖwME.{z<]'wv. _-@^It~|MhYyZ*_6 (B@f+iC7rҎS"^h\9:x%}l?*BԨj`(0e `^! hDpI(;-N],[_3.wYG0*f3P6Ϛ#f,u@X՜0#?X[vRN]╉^r hAۆ% K9'R]GIf[1_A.e= `{ B<*Ё4>r!R;&߲ G!/'A neȃx &e&b{Ƅ%g>73&eV4Ue>vyU:2r'xeir 1MOJB5*үթT׉3_sxudxU֑BcŁb5 CC>ߛ؈><:)ɡ])2ӫ:y&⑩_jϡN"+ur?܏9F51lqrb VT_q}/s)arMjt3|}3_uT_^T 4C.௣Aܑc9La016De;_f6шP}Ћ"d5]rϺ wv[@dm{h>[RYpijK6i6RDro9ÉV%vS%aHҚ ˝oɬ,|usttI5$z"SKz&QޏȞ6 e@{W{ܛ {H~6WnMC*=e.*= dM HW>pLJbrZ ek϶l6B[,W}?)8v\T Rr+*fEMZ,ԓBpza 2$@}V%ytxht?7pG(͞I I}qk)o"⑐z5Cɏ{V~쿰j#uoKhg0 MsaVo{|(`'и,mϰg N"+L-b@^%" ث bA .4ljS+<+cuT{kռVPFYP( Dp3`wi᫞"fWj1mLDt}>o2)_լ='J}/th#.+oC;O g&MOA]^z=|Ys0AgJgCֶx8QsE3;oJ$P@۵qKWƈa$D:tJgP=cQ| &w-D;cz``_e S[S!:)\MP{:( Xل,SpvWuԩzA-j,J\cg9 'y 5H6b^a@>3̷8LSFX=Xt|.X$SĻ!j@̋ܓKFQRZ֒ k3 V&XA kԺx9l![)9Wllk@A@ɠ-]ϙԧ ȎR+hf/e7{,+'Fv|v֖);[!YݫLA浀pxi28{EZ(*PU .~4Kt!Nkk:pNlɡs] Bn_)|We*y@/HB:%ų|-;\F>J9԰B KЎ#1\2˷2מuޤ;2r0ahN)1:b;|x\gQN]+2bE`TV14Mz}эf̡ࣟU 7)W  $n DERڊ"F@#IlQuVXVK|a'NtᄓOV4&~qT 70gma/-[+JqRrHX`ş$a~@i# c~T&6hmQ7m?5/6%6VX;ƼFZ'&> q:My7<tKEbluhn-= 7*da:oV]5c/s`ޥ#-A rOhB+hFpRzKdlrK4~dtkE83M~J/GjqޘYB* ƩqXFrBfJ~N@-\(|Ydj-eȈʢ6c\8lK2d1M qemNj9z!S혞֬ԥ, 6Ռ/"uJtUf.M@UnS]mrPtl.j5|cB!q'ziɿ Na% D"Ө:w0P)C.[=dҹD;`dVRWb3^2Űi.M(k&Bњt/Nh} h)g}܀VJT͟Z\:1h>4lOw4_ 05>Ʒxmt.T#%S4~?M8Gf | \}#N%Ax屧w[žtC|8ve_2!rd?򐈂`F,٬SWX M)POȕ )je~fT0B1 _VǸ3V2giEb_"c.s.nл2<+*Q1 *shT=-aO"/SA +E (T J_nښuMYHnâZcqF LNlaӮ2yux}Q`_I+k"] ͳ4徵B"Fk{/;rKl~iֆ=ZMsq0$b}9_^Zo3ΛD-y1uZUy΁u/v~rt4osdwҗ'lbx#& a6I 9k~N+10/xʸ+7Evnt$xq_ Hl`UJh)05X\i(ǚڸ˥?.2Opli^gA+NOx9 m#c ڂfUX'poɋ{{qHiP6``֕oN;Xv6Pb~~iS3r["Jݻ1g_V'v.w&hYEDEq,̈́6aVGF %dU6:29#Y7D2*`o"L'Inzq2nu" /܄bc7,ըhagN9S-Ƶ|88 E>\Q}m- s. V _rzo}q _kGN9 ( ͦ/&er+_w ;́;^pu\19U79f-]lti+zqa\jWӆ"Ngku0s #϶E*!y/3|Y֮\8ILg&lHԸW Fǡnf]M0C}7+5 JG~T|[3ڗFy4Q'͠vGR켿kG{?0è͌OI\4qS @:&⌁t'Ae FCg-n+9hhu#6K7^YyA\ 6 AW@SZ+Sz񰦕YG=b@IcZ6 9PM޽%s /tc,&>srE*U"JQ͔tf`ͯyiZ05]M0hIȆy(1\jGէ1a Je[mQ,! A[HZ+ [Vr#kJ)o_c$' [Dƾ NJ;0=I'S[%zZ2=bbrҜJ-)ז/l#HV M^(++TEEF|mA!ueb55WS):Ƞ#0`Z:`FJ+^T(kO//f6jW.rs ӈӦa0/-`"Řm;J|W0qx4|Æ\$v13XQ1xfr@p-gO(xi-OPƅ?.[Ëut1LϊN6ԍaG5 P92Ya;{?{d%: DOgC7=` b?,jց⿪ 4'_^,M ӕ~{Bvd!DxS%L2cI#t@f76UF~ ro>D-s=^ 9MYdj}li t; U .V|},m8njrS? Ru͠K+7!Kv 絘_"i1#MR0ϼlq ^`l9ez*oz 8%(]|O-`v͟3w3'0إ}n[+托଀o]P>wBsO oj8^+O¥H(lMyi.( KՔ `TݺձGx7n.[i y,厮,|#%I*Px([\پ`p-LLvQɓ+J8Y.v؋@P~ۈ+`;@׸nL?\ >KGZbF ֑[K쎍\R˽Y W\UǶ== rGo` 1<;]u-mY$sprC#"5@L4VZi_8A7M3^clZ%tҙ;w80U/5{=fSk*1 ;W3-1_ov0>FHVG")R74yT},bnU_r=@Vx|@goR)M,g.*_,LԼr GHVֱM>lR ^*I(ꑸloSI4ȸ_2%n89[U*t7!G scE{˥}w''SRY|sB1SZKb.^.ҺG Ϝ=tHϻ'=сXjl!'Wj4:o۰S+ӅnP▐Պ6(~֎wa6̎)͋-q?[DpTVUVވB-ai`RKbɤՄ1 <v(E7fys̻ rE2Zv9wZVwHYT؈B|׹B}JA?lR,4=8|Jۡ!Jǐ!}cySSbq.U0{3^aG\7I]P .{8M7gf,4z~8Q>fۛsߙ) !9G2yz-҇Nl3QWY N_=2H;E#>q1 qvԶ s|!c̈Eyxp )tօ}KG=>pڽ;N{Ŕ=yL( d=p7}6'x>Iao)vY@pr%'\oXܳ9/mBH(d5q(EΞ^ТΝnJwg@_2_ZO6ٝUa9U2 НtD]fk 9T*j44_="x;UV0y@ q|f*=c(EK[?+MyKX>vLRŏ=8[0Cv)ǹ,ZGhb?\Z)۝njhp,}ڮXM觧!iKAU/whr+(Ov,ߝAжMJ.-zH8 xb7p*PyVKvj~?Ep"qYQ@G?C}Ďpi9 =N#&U:5N*c%Op-wso5~"aE{E)6۳5 oHoLkJ11ҚD`f{ :7&޽JhPRZ#=?,>Lb&=_ TJtA7-=zfi{ wo>eu бIqEpGǪR\OBwNӃxb]x/ ! ;C#. CGp]S|#Ec6x"U>d:'8?˭j1\^IEoMtH̀IH:ST}fsZ5g _,H Ga=;ٳjakB*bPrUPX6\nE nGh [^,7~vQK𽿔|`/#k⪄)1HT UT˗S``Xú Q76Xܞ 5h}&h@`3K$^tuV8wp,!>k0l4ob{rM8%9J7(^(<_9r&Qnvʫ{pv_cʈp|M)-%l䲼^ah,IN+3m"R#Qs&sOlrgVGJeWoSh^ʙ-C7ЦRv5kX4*h. tVO$#pO_at\7L^ YU"MAYs->cڃWW>|uW. 8IoWo|Fm"_AB-F  C9r!۾2@ѣn,_6H*Pq/uWF!.5h"TSh3W-=k5>_,۟14SKL2v܌I6AgL{E%7\%{A7 JfQuiET1MJg㍊:C]O1Y<;|̒abzΨ౑_E`$D8( ODj@ YGᬙVms=™1kYھi ٘"nȳc{"._eh"&W^)iJs%$yys QgrXuc̙O2+l{ [s|>gEq^Xlt݂@ oEo瞱eg%NR2mKRf} 9 кm'gmd= ~ºWM:{ʘ, QI{7L'-Gf%K%nˮ͊'3Lyqh찡w3(aH(n[0c͘sM[Џ@JC9EH._x ӻrY{䵺3zAk'zỴZm$ u@nSe1F76@&BţĀn'hDlJlr֗vhƜP#r*n%e 3f2w2M| v"`'pҖ/'__Fk;;( *_8n}˽JxȀS5O6xGLD\C]daz~Ԛb5m`k)%֒Z"w5dbva:D,m)Ro>ǵ/<+ax⽪3S%E 5X _T Om6 N-@^ɋL-зNcq/H))?mRV((;>k3q|e0dt󿏷7 &yf2MN8_EgS%q;@- 8|1>w-oe=0+g]t ycBfPml\3HF?XcS" yTbG)Kq:Vo#HmfD@\rmU+6PTCdSM]ͻ, q[Z/dCWƁt>7j+Y>_jNAo^~3hdlo~MNi]} 0'$6aU[_[VfiOתA*TyoTlDP8)smE>dI{*TZGTقQfu_tQ_/!HEd5w6x5Ӭ?4:_R, %X9|%:ИYOt#$m.iGunCD(weebE[R䢽,ՑL}*VgQ`j>ۍ3SDv 3m!:HZ!i#`㳂4!N$|A{aR#3OI|wAy4| D\>-2N]Uh|WR\"&n{LNkǺW( _BFn)StJ}Ztyr=Z=ɉGƳLOmq}KE椒ݸ9ư̤~zP\{U1G/? gCӖ 4{.aYSgwWT!3;}1nBw/pgc؆Pf!ccL=@Kq4^ƪL \=mZbeoˇ1?4ec Yg.:_ɕ8gw?OD}yۤͺ<[ũjl+N 8r~MyAC eNYoe>2sqE=dJa-ÐT('.g9+R(iіjgw6ag.+5j_ ]qo::"`n$ȓv5)C|Ց,fu* Sj'z>o'nS{K:ڰw'Q󲡂n"t&E ilI/Ky[h|JR+Q).!p} #t7mHR\A礐afW Y̚ߴZޞAa1P!l܋_Ï̽BČ[|!&CGށ{$nL ##E0>o')+J a ZbleV*NuK,1!aQyQddEG=w="&i3?mnɨ|񗤚I3 QFM p M %~ѧa2cd;_%w c6Z UW _Op~ˉj dbqotۘZh%3@+k]QbF@-ZyM+NBs|ўmxixm K` P?-<-ٱ`zfp* epVroVgpEBe*5P0E ϭP(-ΔmI>e+X}>Dc 4̼j]Q b6FNjNG7?QwF }yT9.?0=33-Nzcb}]G(l[ 'm""@.Gj@^[߻诧޷I\Mzq /\EIk{уDE.'{}Pzfe2͵#i]oxqk7xyǯ7ˣ3aދn&r:€ן$Ò&-FYpg[:k:`i{{;=Y5`\Q0 Y{6]"rLJ,XFaȁ#b*7]ERy: IǗ5]:dG;;eL24:V2P))moŃ?iN-=ЂXՅGMd "- T?d^&og;e>Tsi~jrbXw 'D#`b#;@ٔnVOkceyO!)\2]vqAk9 IK@;?\<5}mr&qs1\qA;8uF~rv>fJkEϳ]bNq}&,QVn1G(noUJya*<<DD ֋A#joizD+Ħ3v)ͷ};Oȩ헮..uWsAKY>),b[X L#lWRݯz q4jEnKw01qdORBܻfZ& }N'|7T_a ֘ⰬZ|=C֋=,\h BQ3#Y"f03N :oލyJVI['Σy2ܸb_ XڨbElGrqҬj055Kt F=D@W(`]ſ41{[BÖ+\ɧ3x6]F2⦤+"f%<ʤǝ?&]" 2B;rb*Wmӂ!a6EPlL"^ϕuDul2B:5 .aG^,v2tx X:v!tv?S3nQ|'}%jehOBk?qNOj́Bߟhܹ+j?Ό`$~qIW4Cra7j!İ{c(>;d_Tq]tNKe# H0z`FCL:B^RtћYo~VQ UΟ6g}CJ]բ!@ P$"FF|MdcpJ)y<@DB0U7,4of(eNKb,z71~bӺ9ͱjr^X_/%!ٌEFd@Q59@phCzOv<=oOL}Ʇј ç=,I#.bN9̿1[]b\Q_[Mn~/܆C!u*Gҁx"s6&ӀgO33ր;e~]*G;/;fod[-,=C= ǝ^؂ ?S"Ւ \f2;T]TC):gzȫ5<+hU`%xs$ ڣa gv"1*:)n :.`,Vq.ҏ,Q@UA~?C]ٖB?H Z&KL ?jWo6b?n0p5^(1o"JdDTU2iŸXhu¤>]xw:rE|ICP$<?򁔥87L@ͮL@=5 /gȭ3M/ȐGið `a1#0ν%&yn7Z2wPEɒ +%\yG[LWr",{~ZxZb3OTq) \r'+A{|{XeqeblM$d& ,hٳM=SBzZ" a)d'+ FGP/}na&#ZU -򟉠v.WDUWשL"u,?BX#Ӆ cEM6r+8X_I{jXwڞ%9M^x?!< uaKWif=YU VщOVxt_|^30&)] l=MdN[xb|K\ \ϔzulFOfFnSɋPNheV&$`i@b^&SKڠc@ɲQOmΊ<ُ3E+Tɘ:~ waz"@qR\Qj@x%,Vս,ɶr~%DjO9SFoi8ew[pZ?G dw %ծ}TJ1DRF蓞>4[]<{E-gxSJv%mCRAs-oJĻryW P&; jpjTQ8H5"9U \ }/NFNC=JKY֟tv,'Jţ=i pSv;ES l/Rj~T 7Щ 6WM74(ۘUpy+ֽX∻EmdoM=!xQO3;E!^UWmE8T{Wf%sW'UC*LVSS\b ]sj26䈮1lަ  x6FSj"};8mT(Z,7zѺ7BF(2[קU-ZZ%]tKm 804qg_%'?a+ʋIʠۼ8 ֪Ȫ՜ NtN>2t -CZfΰAl*R<`/;j X1ss?A 2I&g\s|(1Ӿ'?ĭҜ'UP B;E3Q15u%/0 [mT33Rԙ$}ԥ t,-5)]ѾpG#_`{v,׌$P3םaܹ '7%5 ^[L![S'V]CQpÏ0?Ea^\ J :V$w6YEojK^qma5(jnm@R @L1ٻ< °_5_aMj9 /Yt LOTbzA:iٸ>*zP[']~`s[qTǍՅ^?K+"]Mvh@!*H|&4s{;p3R}avf~Y=8hjΰv%'@%Ð [0KEhh܌])*qCg8ނ8.H[ u,;*`CV9-4FնĴWͯ4+n7c_{0'걞=2c .v~,҃?mbHP0\3kvms;2yFǔ:FsaVf% 5y}J8 :ڕn:Rxx,ד-B9I#ӼX4["/q) O1߶üBUUNjyZ/ ;4G].Td7Hs2 C~ǫ\ZE,;ҭ<R3N^iUשF9"xpSPtQUZ" #(M;r]wݔ91A(9Fi I,fUi~Qb 4R) 6P KAӲQʚʪ ޢC,;B}?76?T{eH.])>!9xq j\zpMh{6ccZ͈FisTLu?TęeP r>SɲJa ~&@êFUKdƂpAWVE.K݉pe!yeJ`e}/^=(&F_>(=RIs8x&mg芍1[g1+s<~b0j]\۾GNqv$~{즭X#=+\ݾY_hix|uHieENaIihOLI?9:IϬn`ePH4Ӡl7i@\sC:6ѫDG AiWhA*0 -˜5µ+op pW9W \^>cbSr.:f$akŗ2- 厨sc6vgt#tkK+dg%F=䓆.CƎ<^cgG F-N޳G^2=smAܜ5s^,煾ĮOԤkjeLrJ{:$ Pe''S` bq(]-4ƖEZ୪+m<.Z%-$v05bT6?M|4Gڱ.^r[h:U\_/ b˱E[ INnz&7ny K#af@.Z/uzUK*LC?M3NSXy+n\N4jeG9\ƺJG09 U3 f>JRFJ= 3 B9!7!DI6PB,l Q#~Ս@[nh+ԃ4*dЄ݆XQjs\3s_]h n:jn|~2Gn>npHf&D4[ɢP[9z<5^bۍ4W :c]%v= 6)%2po"+dP ]膧1#'Cnf?dWpvbǘb/$D]%m/Ob:De38睻C 3D{1 vi&/ֱHILTrm^&^PYMk]'4ep15^;ws~]2/'va\*Q*؆ܯ;]dyI?v˸Z)=Bۃ1k/dw~@ӍQdR!gikC=_fk OsMw[dr N1 {*Ӿvrsqs NV}lKG5QZضl dJ*l}AwflA0wK9d6 +߹5=^YX-ѱN %iQ@hCYoB\VkB3 CT[Ll+ ڸda/h[g A4WGwޤ&fa5]I`Jio5T&X@ZH!KSrlp$2;:XQt\, cy;2LX=mo_ʉ>!~Mnq \DVB7OYаT@>|SgpDX(t`є5D+@,e1#=F4s߬Kc諾 C%QsgagE߇ n ztzZF2G7SX $k=]$ ĬX E#i%;gAX6/S[ŅA PY=d(Jҿ܃*rWx$zN` lIgāԃ/LCU b,a~5@Pwۀg_{ؘw`xC9YR7>8j\k h=ńEʝfHݲx) a|s3B ^9*wG3t"%~jϪ a l5yKTfP2<\"JFFc%SPU^}SsWek߻~5e\6H!U|;pYXoP, ̂m+~RV:m-׊ϻy#JFy/|?Ǩ#lFei`s7~Ϟ*DSx5NMiȢcs$C% ϼ}jѡߝD[G` ^mR#d\sr  oOkkM$eMxsKz;kRvGPQ "|,iMD.zڎEfut)`?09N7wd'7AԲ&-@<=>ۼwGEՂqQ|Ѳ^dM1'f0ztR.~/g.#8CXV}ckw#{^Lz38 68TJ}C I[-ɇ3?(vrMwZY q-y lP&E^Ϋ>Se=^gvT#JT~+ol2=?T(d멪UڕMm2M@/cb&j}V^ˎºw8PT]K bx%e_dR+,À5m$}UY[WAf9vhYNҦbl-wςؿqa~}vT4 q 8I@ AhKջ6,qbG15B> w,l?8A,ya|Q۽5P!u׎w-fRf-|pqvDl¼%Ae 7Q q倸VY x-*6g&y']:=#Sh\@Qs! [4uZ`CWVzS A Q-Lq!tMD*'TsOrV0-S'sAq^@ȧy](h&)l&JV}3]obMAGb{ #PTOAo( ]vhhwrtn S$ELF=]0rkeallD-\M6*/_ݼHz$lW\Md˟Hl!; 7ԣ6օ>l8>* 囪 4ץM-٭1B5UEs1k 3z|L+bKI''+eԙs;C4i*qME' X0QB$g,>EIc(DŽ1Ҕsgk'J$]ˮUMx8Q#˔ 1  b l4iŠٵW(¾2ih b;ۂ5ZE  RQ@ XdTV7^M",ZS x'].g"rժcL^6t#1{ !6(=*: XyG?39v.g ]`q3lC?K[T44UN{s0URqwPXJ3AwAi.\Yۡ#{-PmٽFE]LUU5Ia>y m@1qnWU`55!{5I$ce=S藣kx13{TAy:B@j;{{ @J.*ZX@_?Tn1 Pa&@Hv 7fR/<}z ^L@ζ$*eeҵqEQ.۟)Mɑ5Iڜ%!>V&ЫnNlYr ABdxMvpvјUъ{:xQ<)y/o߇,L({ Y dy Nlgetcg;4۫9WPYr ۚ!R \{Kh=$'6Vx'*;hE[EIShŝ}{}9HrS0`R\z6Ne DZH d::mvL^q_jxmb=G&D=z?A!Z*>8M:aPFE9$(x"^d]f)£\LAꤠA| *ne2XGח JԳc~kVE'=) #[];9@q J .ܯ?ЌD1˂WP,mQ^.ƞ;Z>@|9/X*x=R:cj'P9&Vy+}H'b"mAm7W=+{~9iN($nܰ$DN`q;ߠ>dǪO.p$irmDyN\-Q!77 ET 8ɻFd|썐ЭuܹL1諘HGC'> bOqy;fA6<`fiңz 批o!*Y;Hm [ΐ5}Y+ΰ?xhؖYwנּDb@W;gե-_KWfz@IcK,F|:-L;5^4˴ i(̑wi$~ܮХzAϏ/y$" zT1Jw8Ymu4 NdYAJZ*/TaԎ7ڟk#9v?e$X2GWgHeGri$YaaDԷ,k\ovû>͜[wC-sҳF&OʉrPn>N0JCW[LN,{M r"BuF^DcuA^;Qx[^ =%}(؋.Or8ߪk<.*sc'6aU.uCk㵽z3,"*aI$XØ4͇&PM75Y?@3e7[]+?hC~ίdJut%R:e|YoF6fkzc軭bz֬ $L9?DY:U+p^OXp;dh" A"@hh@ "h Y3ydI$v>_UoF @K9c5K` Z* LU`Κ5J)g,r' u}dh@R9' ]0' ~HBs0-_{8u|ģB򱱽xp?-T1ԀiaV|8-,|h\4dUF')B&j0?䆣h"VRaj# db {G6g̴ YS}Mʷ:f&Y&*#kRƄ̦ړVx/ָ w-L|oxK^2pTA*ԟvi\=gօ(8(o^  ,a3ǯXH)~7K!JoD?Kk|j)[&G5zYxi<.AŹMY2ȴݷIW-ϸ#J:c wېRrpZX!j7a;Ñ[h9KU\4孲 2M {83AR{:11Q4!PNQl(0Y'']u9g'+eUiZA|DnmFCS>8O fusyָQ{IV\SxIzMˍ 4?CщmVoCKZՃ @@4   4D4 D@ @@  CͶ4uvKX28ݦ,a}ѵ6eƫ/訂 ޑ&r;!<5 '*%6Sؤ=uN_2&r<yM^We}n{'qIrl(ņWd$tWlTP;1NȥY): |k*mAۡ&hsTQsH;Hj.HJ)Ʋ䏱/*c/ ^b%G >]y2|ǯZ! e ,ޅrqJ,='O "   h@\m4D @ M FC?0n\oi^݂lqe`%B^AST24Ŀۀ ڊDz |i&Ra\8ܳXR`.O1+Y!IS释g)#wr1jBW\9+fƊ}<:۔nE=i҈K`Ozn."Dk 2tZl6!ڍ@Wʆp)Ryv!_$v# am/%Gzzd.4m O9[aP'c(Ӕ73q#gjHrUZ&e7_F842}e\pS])5Eh&6U"KٖTNEe8_.3徂'R b5+UkǒQ6TtU]cJsv|Kˢ2f}O_b u7IVإi)T}j,Oj(9-)8\F;X \RCb0!_B#pgb#^ܢX 齲w iClt~xSP S-^HX Q廾/QudD\`" ƺ܄9dfwiS8)z:Mqf3yF1LڇݸS;b\r Ȏv,(u^zPm:3ɚ q'fԱigTS}ێe,T`M0KbOȦHS _b|=S& Жҩuvh eYZ9Hf=LX-Ǻt\aZ+.xt(؂C*F41ǛoU|Ao/38 ~Lj+-F\>hР;u5?&AڠV}'f "qCWޜ+ g6H׊F^- 4%Jk˽FPK-zr1HW,$؈&;?ٞ[qu¶dCtw"8 }o*V 7%Zu5ȺBl^.N|WIJ(U 'rħ; 1q ^= nTW(%f0F:ZNe5ıV;l<5Fq$ ͒NaۅeޠL{jFFEnK b' $H&KQj \}VK t.:ɗ͂%b&4sxO61,kp5: V|º<6jsvkmZڵ1,g;-Ϛ'OEF T4xqyfl޺9b[z4CX 2lSyzX3rU쟔!-$ m"Qd+zE]b'=ccc|t;V?3 h2zKz`4{B PRvk+|==4茍|L @uvnWƛ8Z 5x*uӷK_j@_1)QI2!A$o_!Bzrz 7J4wc3ĪP8-D}vz?40:qOvb1 pnow{$Y|Hೇ%wTΌm[L$_2{Sl#Hqr1DAʴLe;{{=j.@jԀ9mc˥.gR f|=lWkw~}SD$rĴD-2d)%qĭ8ۊowJBj\r=>ȯfGR p YpVa1w =jH}|G Z9Ldz83 &(~@tOkڱwHmh&~˹4,J7W CqU8pSoFm)tTdaL~/(^T_ tƆUc$e&^;fRfjs\jZH]N+ߩ!Ӭ@6{cۚzg|۲5bkWɶvvٻ lM/w2(m4ӌ$"' E;h]/U^R&~ .xN58!:YOc,m;(o>Zp4^iyK(\%ؓWY]ЦZp%%_LYiW( EwnԱȧ ]f&o*ݱh#1~܇hӁ *$a<$(/]cC-^}45΃+)ξq]_ĉ:;# jݐrR/EtW91vN,y l"EUu0 ڎ"d[0r0]svQ R3P8 agxj0YiI=3~/Y9kJ]|6¦réU#o댣̿Z܌mg;E[$N)j a N`׎,8 Hn*Okrי'_ ܨ*2ܕGL5#Hn]2OMv‡==\lh iD֏k1RR D'k(!e\t*Z%ȸ@p^/'Vjr8mߙal?\NcocVm`4{1$tD`D{%r)/'[7$=CEm}ǽ!B,C5qhMhmIZ 6H]~W XTUƎC'jf‚V4tmZܺF H`߆HubELY_搌]` CG }cDq}T!vR!&_J {>I}=^`==gG8bo$(ҼꓶMvU!C0>:;tTT<ǝsr.o-cra6IG#ۇ6c-NـEi+DFL#74咁6Kr C3FnQ.QGżp!M,;6;7%Rʚ3B8ƪ]-Ϻ Dl^+/:3tD!2$߳)אQtVmuDN@ps鍭@ ^~q9c )ʩ̰zɫ^*Rm׫Zеm{&&!2X( @f:H(6OEܣ| Gr"n\cڝCɼ8&trȔy{;iƵ[Eú/|ƛ~,կܗo^ }#Ă=ǵz/|'LڪLi1&9Hۖc;Y[ҽ]IS&tv#V_)QA-_f?g+jߗtP>v3&sSF▹ iyn:97ֈY* | '!Cv({*,,c-{#̨W䳿Gr~jW=D)'hf;Ҁ lRoҘe"iY<?>֞YJs0b̟5;sFJ:* ;Ptҵ+WDDOe.goQsN!wl]Fg[L ao|¤`JL[ >XxGVeuc*:Fz-0".7wbgX&΁e^1^VWd yZQ z-h)yT59aH W(حUF&jh4u7Mm6.4WShl5u_k?'?tOG4g1Ve甓w92ɮRQfG=$^'@դ4|y`m)~}iAt(mŚ1*`[!qHDc|'~WTڪTD_T4H&vV܊a5 Ai*ۂ+UMʻ;q,.Kֆ?Cx>>0RV&Klvm˘{n@igsΞ6Z+mBg_oemܥyTNXlW3`;6oWtcx#Z^C7 !Ŗ)Hr:^,cbcj/}`HWqTtΠh2'b6 U'},1}}Mel<O\]:䅮u^5Wl2UDjF0x|) Ϩݦ Jta^[e_Y؝!vx7nFH! yFaR5t!3_2 tl`j(+&YH`]i\+U_h?LeCl3af{jǒYѧ VaLFv{-d(=7;Xk[ߕp}'"1JL@JJ*Z\YOaEVt5,kv"1G7nTK4pt[ 砓skf\yʭ^gweHYcm0իO~d)rhxĽlήOR 鱌WU;`f v_^=,2[a&NeͧI9&78?:H77Dr=r[uW pWw^ש"OJn,BgCLq/bftJԳOzִR .ic VB+2|ҶGU7)+3wg%@c?8G #g8Cn5?'Nd{{ϞKrm8Z.SM#|Y%C;2{0E[̥82¶O69IB }}n٥IRe#@{j=G#X6k#n)8 _5{̓e%1K9#[dE3!Cܘ8O>)pvy<ӬݘSg,;ejODTО&M9H ufnˤK7BxL.#.>ybtK PoPid(DRС ˟V_%WGv t;.R8RO7sr~r/.^k?UOPXnuB +6E0 I:θo_۶7̽MRTR̟/T(op%'^=d{2 g{θ6ɛ-L,Epne¡^4 v<[+D6n(vVc_3MR:Cc+B>0O\lN ZƊ'g"%!g]r&2B;xJmR"'ԖQ J oåSĤsY5HIEKpܹyP6mG.T&{h4o9 uc! /K%b㰗7`=6+2u?#NJvtR*/!?“u=lŹ#1$BUStZ=N괱~M[m6TCI}5*;sU"׍4:Wa Ѳ}dmG#G\`y_xzV.)ydY:VDB$cmL| \캒˜҂QSw d\S\yDe@1γ5K#Xn`wD5jӰd7wtx$Q= kDuqD^paDfwqZTLnk>lzRa;y}> fh<u1x''{xgZøIt,%嗚RqLїsuA$G#w+S+jBQj]r0gyҦl86VFY uQ8C9@@'" dž[C#aT@t h`.=淴k+RUfC;Vo猷U,reԙ^~#wTjb@F`lmMꤖ؍Ӏ_p,qD B){)Pˊ Lnj͉={$*nwh2vaW!G>矃=>X-y ȌJ щkw¬x|`[%>VcDc}aA ;WDO~d&!UlWg DֈA#鯦MKȧ}K( w_+ĚⱁVdUoܨ[Uc>-}R%h"㴭n"%ѩwؖ vZ#b| JTFo{y/ >ʃ,_' AgDeN_}U [ev~OSS42vɛBBF(cγN趾OF0I]dP7V4QA0tGfJ[tKݹ/3/zNxIС*O6˲9tOщa =: 8m$mtL{R_U168ath0Y'i`93=\i5˾4] zO*o ]E@7I ]yh~؅u }څ~ɜ jjGq堸DirƯtRcb8y?)6ڥ%. )c@t8A7/_Q/OvrRArC&X,3mʲvpJhnhv49z'M~w;Oi''t [7+Ls6L5JB;[QgmgpdC--XTWڄm/dK,-:@E8!?Y!69MM@d6u+tp˝ιP]ع`"Ukm?z?9 (QXV~WP|QY)DCexH[w\zwv[O8FgNN3ЗL[&6ַJބt~~TYo+3c1grX`;ϜM4LVWdmB ~^ޒéHpq"KI66QX6LoUFtY`1Y ܫ4+CU"ʎnjLzhA'х=N94YGTll~YE75]/,pW42SŴ-irbfݯhWS0@ffhO=z| G6 bW,#2'J%&9cEkK["lck̺3ZQ}yDIlenPj&p#XH:>K.>U3K@ ej{|Bz,ڬXϾ$l%N{xeM.c~_zF%Wf7Sj܎JŠp{&]^iSŮ4jIT#kB'H\baڟ kki_w!>_ƾMktC)zO=mu) w~pH1oE3_G,_Gѳ:kΝ~5τd%ck+B!ɿ!>Ca'^OXSmB!,ƠE)?ϊ" <*OQ!tR$s`5XbsLskUZ 9wFΖKU198rCZޘwK2mOPl]F;*%4319(=Hm~;h1EOA奜 9jQR/PR׭xl Bt{ًIύQc#xyaWT;gTH7E!S5@Bs 0%!=GtZe RϷy4WV^(Ew֤慕?DՕwf5mEӵm=51j%ighwitS=09"rc=uH:ݛqN>Ω`2^vZje.NDt'1#vV;ɅtpNv)ߏ߷IxDL4 _ vjrBhYzē%b+PӊZf^b Ht=mlCY{G O'J' 3`|^VB1ص|_88IW *s<Ѱ7؉D~b#`4If+XH~evWN ni6(IFIN!&bM6}"m@eSywƋ+V;Zdyh]6>1LArLx<raL0ZDkp⑼a`>G(,fּ~!gh / RYηw14d9@% YO Rw <yp21n:$}dƢlY7]^ҞĊj.'p?8p+4Qa~߬XiY \~+}$]ɍLN9tThe {)kbLWb1,φ mW*&Q&4)hǃ#C:9o5*tp/Tuv8,`krZOvx|w0$>'%e3yM]| \G\+,'yHE7}f-kD*I-GO|a39FŐR`MAc9y{=f+HK4ܨՄ=}`Wѹn&$[*Ƅ%L Sf&mf͌\6k'\#'<3ъ<ONgx M$300ےLΣl?e@ UxvCh3{ '*XYZz֦au K_I= PH8+Qkr)G8P:}=7ݦYָTэu|u)L?L[BKoy:4ZŠ=/nl 9ΑO"SȐOΝ%rZoZ4{ܦn@NDx3mܐ.`Z6MկU ^cU S-Gkm ~A_CA>Pd54> oK]0U١%0#oƮ X ^R jTӘ[+lz}0xQTjMAޥj ++tIcӏ]Х$8=r#teo%cnZyfW,%_9B'al%N1-{ai,C$H"{[lnPgO~-1M % %57f $WfהpH#S4InA>ckm(CU_1p@搳pw3>,=D6 ת$A$ Y< 3 D;!3c6X3i.]M+э:r&]QDgz]Yg>-,q67p/W_nf4Nm${˾]+IHɭ)}+c#(ν3s: UX 11rrLIk>P!' 墓:5CAbQ \W5YY[OFQ_oHU*hA&p6q_\qD,3h$p~WNNIRs+^9k#;N٦Wbm|`c<.mBοDW5tܧ?07 ` ;_k v)j!Iw$0,}Kqݩ\tMaw8u՗7I\[t^q4ЖbHNJ2lpY4>5#Mk'EP33lZ:ҊO$ ]B+ͥ{ s&͆.NЌL˃g<;DX.Ǻms#`NieĊ0δ]1wikys=Aoiw%GAB:).pvG3_e~B<4( rԢ1 <)[eBIid¨>ь؝IdkPGeO!@e-1qufR9k >):\YViKjdr9&s6ɦ*,$Je@2)%Qo |af{[B'HgsXz țWy556- ('=Vt6Gfbp{ݺ$*JE<$g6 |m%pb]: W(K$kMC `'u*G z v jimkw6Y=5F".& }霳O?Y8.@?$ަ:2MI#d<TQ n'nwɃ!F"o6D"D{t_/FI: 5=ٜ<{̙9ϵ2iFƐZۓ|⮷FO WG=Y*<]G5eD<,@Yk|t*aOR_M3/YnpOvЌHwD5F̿g0A tvPn{$ v~R) = i1uh쉼ߴF?7@^'CdA$I+_ 3}TL'$J)Gh"6Wn0D,ldAo)*A.]%KE5 ~>6c [\[d_FEQda]:o@ @#7F /ЯXd/erz3ߞZ|<)@)fugs{@~J {?b_,r|wB(1UB 9;R@ Qؗ&a7k(ک2樐yt.o iEU {e}j{MflEUj`vLHR,_;݆~e"k|sWqY>rlߏ3A-\qq!NT]/nAMCltrVN)ܰkZ 5Iٯ,3X #bs`t\5DaTRƀEBk61n8Z*x4 sIafaii9Hƚta D})v.IY";M?Hu: YSn-auϸ}ngpX|u%*ÁS@!\MJIk^UkJ kdM|SŮA;l=*lF8އaL*Ug8w~ߋ(Fq:gytk4//b \ii1,FX\ }ӛ܎­<C۪JYyqFeGa\⻥$j]/nZ]on B:%h;Y$Y8_eI.jB&3sQPO}Ěu?FXڸ~WlGFqBi>eN:nBz]*Ľ22u*)PZQJ<ȧs;թ( 1W'^L-/L֥SIq˸б[ 0)=yI~?J>xbHn=o [ )g[my&>¥tBTYjӠni&3L)[m>+}8XKj0>tȈc9@QJ&@\ ]hLy5"x/7՜Az+f ԟTrFۘ"bxQ5[s 0LK΢9 ;TpRzirRO|ۢK=>uD(ذ)E:a̾69/g/|l/4+H4Г GTHifn\U)--\$a PN䍞ʔz?-T}gܼ,؋:[VcL2ծ+e$T6j _68T{cx1hu,fo un|{$mydvvQV'_-joBv'Țp:搠>w~{Z%2gsȼ IdiK#hΥ-#޵^GU`Q{si`s XLCM5RBqiHP9^ez]lq:LE%Oz;y=u= jG[>?- 93C@_H*-7 l8*mٺm>'(l Ͳo3[SQj7Jr[e5H &>f kֳA 7sWI~~2VIv>ڕXu{ 5KU2 |u t#AI^ Lib[?Oΰx:!kVgnyjD.,"br>Z췽3=6;IܿQttz cn2;IMlH? dEro O:-q³ GLh}I(Ƹ\?;cf>3+UUʻ6yq(~Sw35 o~j70i-Ͷ=Wv jb+E4Kmw#*T0_ ?uP舽xGo]*ĶG 宏iM8|AxKe[,Ȏf~k˗:?Gn4ܹh\ ب}K>Z ;nddqh?r5|g(OkFuq[uc7Tf;5E7ҵ*n5O~I;<)rnH˪R# !ַh= 7zk Qa{&:DXHΓߔF&Q w\0ZnD5 d\oޢUJn~"4ʹ {J͗\F qlhܕ@n`MNϜxGlD}ɠ.S (Y{ {2wHOl&<[S`)@j5nhU$/ճN^/Qoku|iΌշCl_?bfƤb8n?KHj8[#6U߻d=bv$Q4f#~@oVu=Pvo\/>N#MV&$DUb,MZ47ƝؑTR:\nYTbB|x{?&S3o^!5@t\Ih%!W%^(HI5[y7ΐס~/}IHu[nϚ;E?޷)kU֟%I#?N>]Ym"ՏbXlU >L5rrʂqd;˲Ǻ67eq$@62%/K +M J@?^ ]`[6,}EO)q󥵎Ml?)[ܧX뵛"w\!4C7cjQ#xijYABGpiiXlcx gp hM?FOY?aBKy21ZÇ%%Cܽv74ooX^ q=*NjWڷg*;n>1j/_~94W㰅2ɥk M.6> NB宱yx~-yZR %n02+lW[t«Ys50. W\"_m U'$i:p J*sRuEׇ_+ y1Q%VvVhy1g_vPr )a,1'e)_3Z4jĩɭDM*eN@ӯw`$PmL>(Z$֕ Q,R6yM ؾ32LXIᢱut̅pRxʈRD2؜>yb-zanIXg)8l3XøQ?42LwWǨmJY2h) cω9[5nN ʢZ!pϸꝏ3ᤂT&~.Jhϊx塚 u"c3tȨ/t Pg)P퇺RYĔtYͷ鸒!t7͈_W:3a.EunZ"suɝ~(*|C)sQzG֫V{zÿ34~d$R#1]Iy@㰢Z,'saߣ 'VBz?2+ 4;-W%0q >(⸥(C /LV/8L8A2e9N⩎VD洭V6w/&]wy 2˯ U%dX&w17)2vLHBUN_dH{cD5A^20,ЌmSZ_ltCAnhϰj ֔BP[x,7']0]۬a3z#J&x T _{3SmY@%oT՚ 2hXheO>\h9{4$HR<`"[EFx=cH[{ ;XΣ'<BEY:isJ~ ,N-o%/p-MĆfWհh]l5He,7>54sͱ-rbh"&.`SJ@1v3y_3ZP"षcHD PUy#BW?$Sbi[uW83 # dJN,٤WEHzozC[ԇcxJ;_#dT݆ U+׈zbQ{}|;}jLuk9>$o] #nCyFcWz=x'yNg}cSmJLb*Q چ;Ƶi%*)U0 u]+aZm*!~]nu|aJ?$ji"Q:|f ]},:-$ ^ c~H4Xk~]1jYNȷ1|_.sE21 N{]?m}__dnSjE* VݧXfQ3˶Q@G8;4=Hx %R0?c,lLj jnTm#Z!˹ܮ'*a$Ipqf:`.T޴%x] h)CumAs!Μoy&bGiG30 D_l s;? ."aAWE^عNB2[-0kA@Mt /Fl8+]iJLJVoZH4%f^V[Iˢ[vu0G9):xkcRGGEک '4)#HQQ${XgJ櫘h ZY% 9ʃxd3 ~9 &o.K#q7z9]#F3/&`ґ9 6]t8HM('i}! A>Ҫs% ހr51lr}]Or}ʗ?X[zjT#˕&Bߵټa@yLLց,]쥏LOML;j"\=jp_Zag  S+$32V >l4zy.Di %HU߷me=/"8ߧhͺtX@Am' :t l;C(QPzӓ؝?!E@ogq7ٌS'@q{]<TV_t$Kn7j+)kVHj$:(B e4{-åT WAz?^)QxE%C&&ՒȥEnckyC.٪$iёhͫ-eyژW"OPcȎe6=X!7mbJfZ٫&$ah1KuS0uHM={B_-A ^V 7 P&S o}`y%<[ԣo3Dɮ#1 (.w#etuЉ8&!@gkY#,X{l%##LCA) /;J3_gf߶xG# |N'4XI.p\1I[:)\J$V\ׯ^UsIX\UW? l?{H& ];ĕ<:]f[Z:ܸǺp>%.Rk7b 7o7n6Ju%_û?6тrng0_fVj.)~ݒ\̆ QQw2tzXZ^v)6uS)6Tl8c^/mWPrLgMI߮~[+H%\\keNv7_kœoztedIFgkiWo^4fwJϋ;&)vr<[uKiϺ!Af֣wYE𽫙c {ȇ B7c[*fN>mv|ddG`^ UK[D^2Uw,D9S`p"G; 0.&krN ӯ/>SvvMqo%% ra>A"(_)Pϸ9߀d&7C9ׯ-u+GCOimGy1酽/и$"a_Y20{R1s.UnR#;7݊a5 (sV4 o};}SzI]F7ʹ}JZdP>&\"1bȎ<=7vR!1; nƴbaٮ k.7%XUclyۈ]+>)w(LEqaK\JjHZϟ M JR-hߔz(gcRXk*u#9 Y|T+$?fA;hz@ (6R;!OA!9ɖY=fqlz02Z^^DHrzҰ<25}=V.v-%FLфV[K .08_ X5LqɄ2qls/Br3KN<>l} h{P^ g*Y"5j@p ?5 R@hVtBJ427hpRm!ʫ֞&ĵnx٩jX/> 37m܇9(O{V0#&5d5a JYbPg؂sMt .wS&Y?]ϴ@Tym&R{.l5j~x *vpS 5^Gs#$7N% ,1ӿׄĻǪk@Dg~8b$hN>=JI=CSi yp[[xkV|QA7#{]|N~Xt)aHXYdwdiK]PȲ9 kPjՎK-`:RnAnZ(pO(^ځt&d 5xv a(‚m~lĝ/oL4TI<|#C䆝REq'88c&bH䀷 Cy{]SI*{ģ TSu{;3@NQ;׷`hU"-F!Gc3x{&cvZ%O"|a[EbZ~;6BMѹfto ia M!X r.)!(D=ï#11,Ǥ+gڇ5 }`[.]2h{R:;T86[iQ{JCZS1Ocѣk4J/C"ȗB㘃 ⅸy܁ YWL=HrVT݆$O_SYN Ҏᱠ,)V{F4ث6XҨ%&p8~to U 6V+)u!27G ޲6AFW p  JJ*ie߰IXi`!Qlߴ"NEi_98wD\KzcPSD+G#NTی%ϖmʃh"OeW/E#3 P+59bި L ^b8L;BS_4́շSjSOpmiJ .C}vUެRs!fb9"n`.5{c F3Fg/š51OH`QMGR@?i|٫J!;4P}mܸʜtraWIFJwN'I)Ӱ"8j]eG˦1cQ9U$:)U<}59>MVL(*7x%nj%-^B`ܕz(vFr w[96S~=mFQm0MNv$},3 b6m*4<%@F܉w&cdWGeScXZ0 N)}x^=Ȋ&*650/ 'ɦ> ,pw:ˉ#E\~ `MFwD$aTzIu5k#rWOqg&wa\'U;|DZ+ҍ&S`v ͘dtgb j-⊑Y4#lfNqvim9܏;_\3F)b0|0(ҧ̃)(هƜʼ9:´L=3(!S-9˽D֥t/u(M4<;-^Kތ;=!,b>:ԋLW+{ rbauqefȥ(~V m#<.6ye`MvQ?X7vy߼skoį% !4)}DMԁt)Q(FgX@)2bwl)^G=r^Fn 8`SL>B~RhZ&j#S 7Wd˧ a 6n?gl8xlGO|p^ %p?OGpEMsD(\Dr+T/wD55e=JLsB*pا6]ʋ!G'>>#V 8$JmUUGXkwLI܀H.h Kx "mʶ{'l%#5@X4lMQdwzԥ%i {ڬCȺQO3 L%ToZ |4G>4on 5XԝfBG{>M!HRμ$YpA&֭.dBqt˜+Y0J|&fzv )\zWL2Zξh3읰G󈥛\g`s< DmKƶ'>h6y ۀ E}vXeXo/!;2-$A|]D:-H 16 WȖ*^% rr !5ܱruN#=db,f-}%VILJmN>̩ rqʭnv&'aHKUe2ޔ {* $3̈́f `^>NFA'CkDc~ds 6w$Ho秌Cܩ셅0k-c{mcrL1ɖVrp22w6FgRhV?gY /Ŭe]>34 UNhW|%;.8qy: ƏV̓YCÿ_C#sԶ+QjJK<>IQAܯz p$eBa4i1 <8`Rseǔl.riSԂ -aM4ԑڋEIn -?~We[m/0ODFBa8/jndm~ozFuXӎU+"/IwOgLpF)р{42CaKkbdmK71ʓ$t (+9Q<ܳe^#4ISzp|Ș˽(oե?ݐ Xf׃ⱪ$f@cN\?7"ʭ<ӣvb:j['bQ= /XUswӑA_mt?0+4wBGGl>oW-~C=YxQ,p QgB24GŔȗT5ߒ( E g:e.qUq+/J >$pi} 6h9Y_z]dYtޡ3If wϢRJ|EOs_vijv 3_wc%b;C^o`?ȉb]]$ _o,#mbn/8+n]5QnxI 3N`j%vr%*ԭkmysT+e@.?a.zQfC tWEŃRmy-6V"5Pz5G9Vŧ=&MuޯAI-E(ynC˴eGZ#QeXDs jNEK!W+j!iQ(U*۾bHu5,Wu!}-1pוCb֝BuaƏl0 ȏGfn plFu'$4yυƖԷC_pEP#eelP Z5,)Mއu;v),|$I ۜlY[c~l#RdO{P ڞ/ gAW cYGur]<:FL9-R`MnkS.K gݥt_}zZ8O֊8-| k@*WM ޸NTZvM!°Ϟ>On>D␺n nU|6^<[Xn߭ E@%ㇺ@PS 9 @-ĹvOŠܫ9 "Qz4مŃ;+R^Gtoen/RA\(X }EXrlpvQ9Ȱ'j\^e՝4*Llx9}sH# s-W@|m\){Vt3jN!NʬsH[3#dS5*X5PwrsjJZǢ5BÌ{3vn~jL*His+ yP p,`ЃE_ 4o:`/I9EAw-P}x2RJ:}i"ܝ"8rd; j_8q_̞)v2mzώ$FAuHk@TؓK8BSA"Y^%yZ4noe+yn4*Pמ ڜ`4ë_Ao/u/AٝuS5HIk5!libG9_SRx!ldRFXT^ǐHYm'Љ_ce!=Bst%5 o 3#2:X)^Id䅏3Md5hZ J,\>?) gmu1%#o?eqj0,6< cq)mZy Xش.oN"{0QcpayGl2,:էf.kD(vP'n1ADF;A؋Co=ՙoWnk7ivK]}x:cSnqOo]VUlun*5h}1BÔv%i%,5IhI7b˷^Jz$8 Yy?CyC]ůop ߮ J"Y F*zU%ުbԱiݨJ5|!m}[f opҞEMy/Qkq]T:B(UϯJ7ҥkoQxVe SueP:kf59D=-6J-*7ň~;si Ū-U8"j2<,3 vJ`X)K I>mpY`8D<άBDjB؁F|؍ksgM[Eibnp[,'E BJ '" {9U!X~\Af R(hd"odV|]`=~pnFÍݎ_ā" rBv3 =U ,)seaQK ?~I-Bu]\hK)W̨'=.d! ڔ} ޳I3(^2ph{|YiNx0q.ϥ'3\hz)`Y:]v#*Q7[Do&aauOMr;]z"UT]rX׏/ra牳Be3hW3F|6HSk"J#y`^8Od&lȲNqTymP`Q)ݐ~n.ɒ'Pjy'?%Qv,N83_IPORc}48LAwe~BZTwvxTu'겔㟖*2vubUzJid˓D2W'Ǥ q=ܹl?綵pK!wt&A$JLx„)%G6wAx7%zF^ 68.!pwO#G%{V.uX < zID`;Ɂ']QV n^uuI{]fr/oPk5V3wͣClxbK;zS}QqmZ WC'@<eDNE$9$VI20n1 @\HӀ壗%`{rT3&^Z߂0KTmۚ l7A>K[R'gJζC@  "+ JrҎ83xEsn#[\VMҩsalG*r'lبQ h$XnEt"+R=n/%/}t*P7xà\\id4 jhdjx_{#ir(73ͬzv)0ƪ\2YͮL#h`~;0u)o(̉<};^Z>6E z/T3R+N{oG&˅m0 %L#3V(PaDJ;]gLz RAlёݵ޷l6lqh6=)=AJ34Â],D!oG5L2X4Ձ EAnd<-Q+)c.DZ8&ۤa%XVbDpP'HC߹ hvhV5IGlX~ L;[_@gx:s뮼ϥHJVDѺ4#Ҁ}y矍dw{Ff+iB;5i wA&OcC/,6(0֘S~a˷rtV@C4IRu3^H K# V6ߎQuп"G'q )6TIV0ue>zR;W{Dbojܬ WV{ p6d*k8^bC,-~[CWIǟeq"Q4TώכFCW//jFV@N%'zK0AQf_ei;H^pp8 tʉ4z(p/s ؆%pIyrroi:hcv@@I̾niM_.L_4chj$5f[b5{V$K* SC(oձ3 [A 5}f1ecG SʷtqVi}vY>X\4H?*e! /*Fle6n0Ћ΀ Ce鞠%Xb G*˓dmyZr#A{_N1ZATLӬjI'ˎEi1_Q늲d}v7ͷ ~VQ'z߃d:'ϹˋV(U~ŀ{>اs§A!q!L/y+(+(|6PqARc () \1Y0+&\[ W؃uU R!X̽K |_}۝A֢m_QXex6[a4?S+ACC0\,Nnd;/XGr¨xl2SUǷq*S8o9x-@az[69;-s4Sv@({dܯ-Ә%\L1tn\= =;Hӧ6oϿ"ha:3{P?E">ijrHC,Dnd^{@ʞd9KAϣocD ƄT0 {frgQ>t-AC/lbڻxY=*5hzXHQ?[_3ECT :e gKأ7=g*pV݅[(ٮJL<> =zn 7M(PK>!Wl\R ᨬ(+lr@ ZnvBpޮz 1ɔe4SDŎm^fwշhBr15xI_ Q ,yDi,K~DAvb6mZYWyUT7Oc"(_5%lɤ  ^>Ļx,[޾lg2ǃn(̙Q%"uf={/Gd7~LtArz'- ͔7*}f`f\L$cB|p-mx>zρ} #9m& d4մ: hȠ-Uvr)MB$\a)\d_&lKqk+T'._ɴV=QDӾZ%zF \A M#7\ [,fpB*Q'7dyyC'S!, ;D{e'zlμ CG/Cb#yܿfu%OzZOR>f)ZSJ6t3 YҧEEK\RlyGWs[ rV0>WGۯ?H|/9 RAAc#30j$" TMsuNF+פXxg/(^JF"0V]U5NIUxAy_X"G֍i\y;JfuK`G~M3P4|MVppsr qc^{+ZzT'b9@&`IAjH͗Lecͭ &v/AnSTȋ[x7Q~8 M3©1N(bJvFjtpIٳ^suA!?|H&!$Z|Pֶrǝ8>EœaA WF/0V8~ó5*C_.}I>ĉXsk#Guڅlnj&}*r* cYMH==Z̠"37&bOGg\q MR" 1Cw4#S_je̐d_m7qO҄^k|ǡM*qYtN1(K90MIҕTc}$}trTa}6Rl6 Dn+=2pV "Ug*.[9͉Pydq]=|=oN?4gh')AQ^NIG]\7fA|znL#+gCp>9 *o %)ܒĕR~B#G??֏^ RoSֽN/NN*G՝a{a?N2Ln0/VN=J'9E{$b" LGqae:nJvoܘ̙bTu"c95HT[K6IV:x[gaN%7&Dll9~LJ<Ǵ')-2/79eĽV]P/EG5TAᮃLWD[}lv2Yiv]C W.%][hDPb9 1:3Fg(8Ku $Vp!wg˺ԺtOܤ±ew1֝AsTFEJ93~ J^JD\gr+BZ 1ݐ*o.;K'VRAS&#M^bd2ձ%y=,5u`y#zo4Vh@ xW lHq,d'@~|8* QV* kom+b0^ӵɷ6kO@0G}*&0$$'p3?&*wWb(K;,Dglt+to?M FTs[لo,ӟ >F5;֤xnBcrssr՟wx d:rMp6?2Q &pPD_nzkduB*oޤߺU35%u(%KQXƯԤ{7zezlԒæ"eHc]#ED#܋h;MGM\8:#~$4\u#vޔ%  |p&oFeaѷ6 G/TdRǝpBJ#hKM tSJQQ@[?Sɬz4T!<5H.,@ 1'MB= O wCN~s{|D]ry!IE|sg]Z\_]BeF?Q~H*D ~%._[CB+CwS?=MMFs  m.oVMbY9S9oNT}teW1G >VUg "$w>(@GvH5E=#@drJ7\h"Sx)3N 0) r' jy?<4CG :\ixP5Jw iС+1hibԦ-_r(a}ޒ R{t VSLn [0ԻoxX^,4\d6M mRBp>b ;ϖfj*Jń7/l+7aHNwȗ࢚/qJ ]dC;4ʶ RG$ܑt]T}0uJ^7R_h}A}S>%;0 A#jG8D]bu_2ZNFRt\H bĞ+L(s[߁nnǶ#8c{xU)9(#Wӭ8Tat:" xz.62j$VNTWaihH~ .Ꞗ-5>a#z1(wY2:(\8>fRW`pq2\4 gS׽F)<,OZS}/7W,ߑPB4CiALs5~1D3z#T`Ш >##nș-]cࢭE,z($?Og5'Ĺw, %HWSA}*"wgyxQ8R2p2+ `39?'/?o9;ԤdB.rtLwLVt݃Ne꽐 F{um;Jm隬@(|cUtReS ~3n`R}O2ڱ%t  XfYIoĤr'C^eݹ09Txd Iu+Coc]wXXbi.2cܿQNGdA@@D-+hp1ً|ۍyN^VP&,I4\cr "yE,-tUNr=&k-IF\0z vď a4B%M9;"xw&N`GM9gREqFn\WN%ULVHqɎrX @9VcQ'<ńIF ͳz7Km径TgMiiy=Ap<ߊ$#큐yBգ_*^?]9p)΂g%yF} ?r&-m`+>4S+?d6"F>$0x+^¤}l#^ ^'w!*]MV@ۙ*z8S:eݗM$=.cѬEᐎ_JDgmE-YϥR oVImv @yi;U_=~-ށJ8 rVڜJ/ɲ8|~b?Qvt0@5+5IpHiX߀Zf>FLʰc53[wv9q/e!8NJ&F`. p|]۶cBByf~e)KIe&8aҚ wc]mJ.Kzd0*mN3HQ8rO`m?vdu,-;G!y(%YS{#۬"Hn :u4YZzyhfy`cǟxWAbce x`e$@ 39CqONO6֧}ķ}}VʈP(2 jH>|TztQF35r+ϵhWcuATrB^cРʔ0B]a?7=HkyGUZY[v7+8 zcd<?Ua!-i"<)ȃ*kQv5" ؛ѓx,CUg\K`Z@al B7iʟ4VPC+6tf |#(,D,t#A;)5 Ss9jY b{bAKnmE3 @P*G]f[&wPfL f ,W(3*G>~d0G ycxo,?>2RͽS}A.KLvcp*dfz֗!Ń{FߨH4TR(-~Ѐܧyxi+fGGi؞Kz6RCy~YE,#  ێɌTi\<5{T2dUQh.É1`:MM5F5Pck(w.$z1uwZ$vQutz@5mOs@IrBgp#̔-yEBq~hH5 LY Z,5Z>o {^[<+zGe$ J:q 0>$AYŕur , Z鑽nu[!lAM>`HsbTbzKCbD8aٔNK.dc_Hc~驲jz՛y23$C5(M0z"J2]Uu`V;j NSNS+O㼊umtM[Uo|Ye$pNykϋN4, Pf Z|Ov;'aKTni5y +/y#E) J~ps(N<(Jv1LNUA6ԱܟC)[KbX`0N -A_3tBghr!vhYб6Q^L2o`1xZ˵ U# œ3䣿$_Y]j <: 8tA.M䪟YHVnN FSbZ΍=nrc#:.&`G}j۫ 5E ى\d:su?0$tn4BͰwc lDmwl]8GU@~u."Ӝqy=hcri ,&W)v0ps4 .c.6SR;vL4Ww!^~ndp_T, w0=N8Z'x@0,aGٗd*a'Ce8b kؕ+$(~AԒCM^;LF3T>Gu;t!s`] trx`-0u}XTz>pX2ꄜC4r4fX&f0ud=SYF.2oA`N2CD8Wj_ Jf_)U+D(ȇ=' !E Zj328ߴSL4q(I)'9P%5!e9A]udq[ňuP!Xdp40+巰mHJ.C+ |R˻@#3Zю}'~Y*NoE/!h)2.bD ֐)kc't، y7Xlkk&?O46C@  0Mt=&W1A `h?UY,An'1㨤AVBU+U^i ˴Oh_,J[WZAƪ@>()ytuLg2g|bDibt<:t[2nG")e6Y_7 d~Wk҃v!{A,~ lx g^cбĦ`CB1g0]uRaĈF7Ls%=l+w$`>v~}BNd? ;[aκ)rfGb6Q L7/XpS1m}Ů򊣄2Nt B>fC we V¯U~Lu8eu "oor ҳ7!A«!jraf%7S*6Emd81!2 *BHHѺi6s1 K:ͧ&3w-EL#2HQ 0&Cc2\OSOi:h?2f+ϋ\bQ>tәtVkmڳs3֣pꢡ!HCXjEx*0ݟNǑ; / v5aS fcVs#/@64͂V™9 Ob0\rpa d`ZM,qiD8+@IXvQn4Lr|nC t>1U;}KQ.aƀР9eViT)!IJ`ZZw9T4Bw|_5-Om|1ۥJ' )rhӥAbwP{fL1ϝ$ߨU IZ5Acch*ӧ3۰ /hTHF(0:a@g+7 J/ 0>aA̕ phj1|v؝){"#5zMk5VduEc6Ҡc3Ģa5C7on{c(b :;K1 TfԨl )_ЪHoAes4Xlt@i.#Uo,ͷLtpF\zkm cmJ0YR+~n =_J!v5"]=@ ꊟ.N g/QiܫuN+rp&)10o(6+ wRᵘ)!geQ_8ےl7N݆QdI7OMd= ObAOUb @G7Q }Kt7D ~4?c39]#ؾSFqeTrE5(tNaϛ\S%COŔ\vJY\dA㮸Slkn*?uDi $Ԅę(Za ϊk9@^jF DIt@Q ,{zE%zڲ+UgXUXvsdإln_c %l|냯<`e#%R*Ax${J?8;Ѩ8@avw2nK/sb{x4_ -TEBX ʼnH/60=϶/CQo'JVL GN"~Vh8JYg7t 6Q3IޗREW#Y Dŭ%d`M%}z~ =0vea?#銈2ZM[p6Y#6IauFE95Ǯ`I8V#uoը+~Ee^/6ViXj12&v! ,+% 0sZC(~r=TCqycZ7Eo\}yUHI|LIJ?7+󠣠(>&~&ܘm䝑cԭ{a?4$F균A Yb`J@cVI-RsAC_UF.=`#Q[&z CZ=JbqT Ҟ[Vhn§.$cc/1ިb;|0N#*e]]p(lw55U;+a ~w/y!+ pg*%|0S=+U:ݯ6d1`N9#~eW4CюֶV b.$?oFpoΦM= r͗{l:QA'Iow1*(nۆ@qIKK bAA"p3~{B`v0VtY%~6Ml 1zS/+jS?o[To)yБYm>k6tUa炧UI(kxKN3OP>buI{cHsyܦ}==}BÔAT6k)hD=ݜ^R\c_nLtbܦ\'w:_^n"/%妅gtPbk?}7 6(ۢ;V#iES4KQ|MK8OX!HFw;c]oW,̓=6Qݫb!Tc3D=Fɯq*q^*ѻQͭİH2D͜ǵj_5筊qW &T(amƊN3մclZ4%KzP[WWO}o+}$nsjhke'QUgBmxnYIp׆/mdi~鹱ǸF ta? &4 a.MOtZ4CgPy4;+5XW&zh:k9ޝ4el((C_+k6҉j3Mτ" UL ~$- :-rӯfdy:%;4AIr#yI|+,`zGĬnEciBgVN3Muyl N*?'PǷ5 KԴm~7ϼbk;db) > bbRSHvKr-Ϳ.Dfكmhh+yi[H٥|oUl?*|e9{RՍ"*WQ\ūFY%)@vU uX( #lnU (;1dM\ XrWսH)70etf5=E2k .™Nl:2M̖f6䩍m۠/=A:u-ի޻+7Xm;wYX.7}.jjveEgPI-O07[W̃;KfN-{⛂Rzu} mCow>*{3|Nٱ_ΓTGϴL!Bqk0zI.c:oʾB6۹]p,i,mFݘD<6"5M6TQ:E?,I{cϴվ< so_ѵ8E %J\ j߭9i- ۈ)tTl->'xk*L6ڙQSt?N.5qDK[)tL=J_'g ӄx hBÔ tWoH&+LPaJN[8ZcX>5])u&{aU95S}E۶.yD3 ?aѭSb+7OzO"nEuxqhHeQ:&>Jȍw-+F".Q|m;[e aM8?@G %hȨf^N=u^35-B 7 2/mV&]m$ uCh+qzΣ[,<1c$\ P]dD=۸z+Ur;ٷ>S~H:!] 5C,dyٕ gw .oR Yj%xA=锾w jsuW'KYîF:n=c<~=[ʎ~+9jοz:ڝRHur]ߒk z&sw,VĎ4qKrwagd'% V&5ʈʴkeݶ`_9Ƶ8C!FwcGW=&P>B]`̨&U$oLsCMIɣaA[s$8ݶ)WT:o%"-c4"\|9a\'S0g?#z<5:|ZH/GC(ѷOϷ$͖9q G ggcSjzeK'S;oh@r"+(DD\K=JLS80sSS%nI;wY ߅xș OGSpoDsݝ&̎8ʾ?.X'0f߮5ʟhXƊW`|ɐ _*O~ߟ kzs OOKܪݙ; m5C'X7Ga[n1<0y 't <8Rmn̛5U_r"@9.uejFohb]tZvn &bLnAS ;.;c.F3YeM044]xfo[1 ܿ$` sHuMA$~lzev]cZg),ME*xբ<㫇Óި}ߌ2%̫XH+,r]W_j˓`M-!cNLQ_D^elvAP~tJνms{YHC1=fgܿ* ~*.KWFM6;f jnZZU]s7}GCb:/lC +Hzt@4Q"Auw?PvHضFv *錻3tZyjVq |=G 'ؘh*FwYÖRVK'^qqZKK~ NcG]u˴o ZԮtx@ ]\da SQW7|`hNpZӎ{aUl~EtE\ZWfd(bxO5Mы]@ȉ~ˡgtUsc=&rq ďp.N*'L;AB3tsZ@$+@F~Ai} ˾ث|88)mWSD~]⑉dn]39Z yS ĉ?J+rYPEΉ^[c;A݌-'K_޺ 4t 6hG?Z&;Ǧ2F^\wB[ctL_~dȻWg+wZ خzI_?S-`[cRQqZvԆ, d[7ȰWC(_ۓXMl&tm# AB C.m3_ι9*rd8|ƨ#s4!|S=[lPjF$ψeYZ B?W/NyƦ._ixuk0L,YW rǡ鬷S&ЃXs!:٢`XB-"M<'I$xq>mi^3d)e`PŰ=Ɖ%:rE%$VL7'hn xzM2TV"^{dp{;1j^h hFyAwmTǖu? ?aZw1{5~:#j_nӒGܭd}-1kdzm'|;yndEFͦBdXjsD#!|xM̟X`j8B;,oJat N^U B( }:@|g%d9C*~fu{2_g/G x'cbrn2e4T (ނ4ХKuq3e!Lzr='UFwM,+Vcd q`B"Rc2r$nRd[W*ɄPAf؝'E GJ@K l؇X+K&cw v_bA[xQ<|>@qIzߎX{5WMaɏO똾9w Y\6Nn\&TLDO([:;EbЭ`~RG.:%_`| 1 Fx C^ʪ[ Dˤv$$BR](b3t6\Y:+l5yJcqDR!A^.y+ mJtg2468 ;1[)vGV ܛsW"nM*@C1L 6O8ںXK JQF 7C1=S 1xpTwӹ;U0mf64DqKQ<1! .;Q)-q^[d^ǫǝF垪hG5G]!BUQU+bhҁ JfajvU2nWN͇mT'{dsGcSʱ0!S@ \kjףxKL`pK-(K1LE xKEߐ@=zy\w*6TڍHVWv>,T`Glj͉3[C:׊_qliQ+?`65k7|բ| 'ooD R' d0@Ln MR"։T)݉=ރX="d/CMZ @p޻ IR!$p2R̀LXfwk4$`O&x*dрl>5nhT P y\~rrq6V#5>Rp<(do| SW@bHo.aW:FI/x5Hx0_ /^ͭI[z."aK!@HosɁA3EtyłL kQQ#u/Cq):%ɥ 4&*[t4h"+g?, zz鐈1k^QF: ·Zח +hPʹ V dƩi REn MuSUG| a4UƟQ-UiV~H_ UnYnw {d^(E\Pp?,M\vtɱH<ĽP~mIƹ'ɦ]1vN4x!{?pCl9RxlfpXsDp3HHA iuƏ7~lcwϜ<ԞIvS/CD@p#0=G XޡC=BVwKHjvm vUWJ^0uas ׁD*T"/3Ԗw)" Hn&@)^¡\9N>}Uvͨ,E'%yviTHoLb<=8uy##BQ?CX.bsEqI0fijql#3|M_'sTNz_yA5^5p9Q;f3bwvD9ԳB@4!;Jf |Pѡ _bI8M\&GY.~gT*K1?l(B%GiaD.țdzS@>W\a3y.tWg!:O94)sP9Þ:Fs)ڃZEK7 eGa/L< |@w=Eq#CvnyN_/iYۇ=b\T'6!8A<+I (-~`>0@tb Daҁg3~qxc 1 P|ܪ?ctXwJHR *wBD"h+8W2>М#څ/ 8`]ӊ_^}l4ʿq(Y]†|(+e4sG>N4ܟROZ$W)CCq4L__mz} ReTX;#&c C6*<.SXwRNXlewN_4mm]A1< P/Aco-31QBtc~cg9>ڼGvB*G~]&c٭l8*X)Ӧħ4v(@!mV@d`@%(͡~\ *lG*e6T L!=_@>NԆwc?ރB76|8`.@{Af$8&| Cz-{J=,  A8}"xt:פb/4E{{{jj#/BsLce5=R X

uz7Q *5);Qs1%ONs9}!O:eG r$QFCh1V?!_0ƧIC㎄^ 0`)?p|R*0.,P|1;Y~+)tmvP ZJٌ!)ݠG+!zfu/%َ[:4x5Wi|5{JND sEwD..`0M`$iτIu.yqΌR/Zfc<ɝgY*J7*yF-ᴰ!ױG,'܇р#l>/8չ2*QE*m7؜߯PH9C[_Ӳ@80ē8V&rVV͇h'TI,{̙D #GZ띪'= `A"m2L y%Hi_~movs8t]+u?^~MR ;beApiŁEARۛ sc8,v2)gR%?7 #$!f"Rm&B b>&I6N >tB7|8kxɂKLxa/`Bk@ub>,Q?I9lmI9 *_RZB7p8 l$7(1IWJPn t* )2B *2X#G;U?"=D&;Sy 1T!b!hb} d@*P]"a&n .>:KS !AhҰPC !ccz<pk:2EQ$VE`. M  ho i#WXL8P$Z_^s66.Tw/C+O h2" k-A}! D[|* U 7z h?8ub?¬V%n'Pyz{f)Z4ZX X]+Vr1'cQWqKp|ū;q[sk}9aL ~'FS&!d y6@-}@FSMz6Do:*}JA.D`NZ{+ȿRCCTt@sH .B+~8Xam!16ݸ7aO;yi@3{ly gO0B'ΝՕ-#T l"Ć+`A_$6Ertל]6CSr=BAv-d$JW]%jE͟ hOl:=m5`x>D-w@9E PUoqL{#NUbk``tmf]K;Dz,^%w[D(1^);DY(-=mT´L B:)վ@Ljwog?/0GM.3qeQS'>mJ٪RSJa6,_o&z}eq,*%Sߏrﻊ̮{Дx}ࣰ݁Lʿ[4KvB[.Z[Ƚh,2%ljG o!@by8+xT?SQw;Fx!N?QƲNEo;gF)5V< ~.*VOK߿_3;|gSGcU:2KطI&zp}82r|YCU{vRB)w~Fm r;S->Oe鿤~ _11w) 7n|Hڜi-49%̻O>!TbHz  ckB^{yKq`N;|qf^Euwb2](Q:jZUrd%߅xNUK^`x7aؓ@7 :ެIc60l^% D?{E%*gY%}꽑sq.+ ur:vm#@՝Lᰅc -G&pA&;~Z!L9 In%eo\.Ň嶹'cTfZG F0X1iz!L@I+b7hG1a@zߜWm<зEBfzh711>|=S<._6@Mⁱ8e|]XGn 4>5^{wo=j%(D0\8 z؎F1J=89J&l\s?⩣іz x5_m3ߟw4xi(zw|T` b [ޘ3)eߺ>!Mm=F .A2 MpKZnvG;'MVgo~Z+\g[+10BiޗXVϒ+IZuRͥrPБ >XSe k%d^5A'-lo ؀AK[A]n⮟T@֪یa$4B9 *; GMoYTb_*m=!F"HsKǀH~/|wn-ĎU?ڲ/B5G ֱ':;bo$q{Nx܀]?po7/z`G`O '(-UqR ˔r%̳E#U :X| pTi Q&{]isLqwxu:e~zh$Ta>*E~9L> 1}k!|Sc9UT^ؠ,r|$Lyn0LP|hfPl!`Qoq\PprLw% s5Ռ(tl)˪ɵsܓcŬQoh*r`#Z#NJ~jnۇLy7+(aYY|&Rmm4͡?nUVſ+4Eۤ;7׸J6P-jU -=L,S,'pTqiK/۵ܺlNa-CϙU*a"қ5L|i4-#t.1+&q]=oKO!ͱ/n;][ :-@LJ@8•Ds$|p ’L]SnJʡRAV6d]N u umuWUe3-ȧv8Jq̖YCU1~*Z$t371`$0*J.>=s!2Zvld Wzs5WݿYtZ Z!ߚ)()iPZ3*}[su5ӷe$,NqrQ6)#mH>>/&xo=r QkXJS*l! vɛ *-SN42/(}(+mj%|ɸ1 QJy }gkQ,tsb=%AQ3Wr6־c,loTJ`M|@9ϙu) (`ׅĂtt0òSQe+e%ڷٿS_噖" Ƃ4t`i6pz$ &!re=ǧKw(Sn;f"YpU{6N5.#r@g;}~{\W2kK/׋nx}ۨ(3ݯ?U=^sԖntGaM0S>ЭR9%`shTKiknm(>[UƯ*~~/eGNO Xs \*,.J]źQwyd0X̨~+"oePKO-)G '|7\Ǚ6!,8g`O V>i¤Rժ9dz9v k,udaίMHg1!+<#4 ` @r݃T`2K0z?)exnjcc&ޔ DiEǙ8WujhNsV]̽'BOs-VUĞ91 25k` ?9CLJ0[%3;f*DSU 2s3Iz|mmN# &JdIhP!Qigm$G=E WѯWץnӝ 7)U ,߹լڔ::䯾G#HԬV 6ʉbuvY5צZr܍\R\+Nsg6.Mz㵊$y.szebp?>]킢⥙.G/ !{&ʵ vaz(BƌGmox8jl]_Ek︾IB.묋S39y8!Umw9ZLUz4=vwD]s[Vw{Tkxj+j#V3߻/?IH='s`^IPunV4%M+<`-yop@kl {s bUczE|)O0dIc"XqPYmV2Xy:T^g(44bYRUR$J1'[NnFNjN Cfk3{exadbV%U-YaOr[Q<#}/Ҡ^RQ)rX1z# Vv?# FmoTvZCXScqd_}gƖ#"L;zYLλO+uuw3V%;#/RoGgWdMg!ju.2\l]=H\1SfdՀүwm5kҬ+Oeq,K[F,(t= &ly T5rɏeDɻwABesnjSRO̺`/╾rn*i3hn 2$/=ПZV]2$.PXZ?0hS'3K?*Sfosu_M'6Y/w<"jm-ܴm_&ۙI$B$ =5HIG)ߋ爃 >I7KkL7m + ;sEƞd(-b-Z}"-JSm*eNƪջ,g]; "_;K3.h+Zg1ȗ Ic}*k[rGJ|Iv׆1G!y եpqoC='?RCfOρA,cp6xUW>ĢVowj@l_sjΨa BcoH TTnY|[Em0Mz ?fn <7ta4XSrت}bX4&-ޱi**)y$S1?d>m*_GmG1&!֪D607\ 8 uɺkcm!rt}/ gb`{5JMa[=T y6.g303ڟ{Y9>-2jV^wsAtzRL-dk% 뭔F9=o3^YwC2^gwJ4xD}~IN6{Μ-;EB☧eS떭?ps# 1j8ck J ;=2o{irgumsvkʜbmS$2RJdq(dsˡt'y m/~?3h:2. eTlFy`=dTʃh^{7wjA&{U(afm5fۅ >xs w]>k0>Vy Z!fF*`;1@[R_X{' 7 R!NRd:(4 -8 W)d_Ƣyt[0VQUizZ4!M{3KeuC^-~mp17W־ixΞ` >1|~t2lyxD냌ͽZj ]F]Ag"\fE>Dv u^T:YۣΟ[Üf ל 1&_8oQnAs[/Πabzk_BdJL4Cvf@?•mJxsiK^(c)9#5FP5pоcݧLcINɌ˦x-ϻ -A?[RaE3S$qV{2^I"8:ä ?_吼N.3'sկEt:_pƒT~7^V>wĞ~QQtלHZ;\jVԜmMJ߀\2WupvrD|)K"XD9Hk,}Gv_p꼮t릒"?Sru`]T-8=Fm뻷ym%xԮ.yF] $Q.tkKje(]tl(6wYӭ055k)xX̼\Ss̨]?|"b<0Lc `khګ5K3eӬnh\M'p3Ʌy,GU >|x=^x;?{nY|عz )${:xD*z2M Gs6苩gDLz< Ls>ff>SNUT5bM}"һwY6m:$#-sz}z5\},gvg}P B"{;㷳Z8u/U\ɒį)y*DLVɢ<7H\-1?jT2izb AJb27./9F@|RYzE粫l>ߜQ>uýyX`s8oaJ~.=ߟI\؇muϛ4>(H߮fr~RVA!:CeTZލ&Du+oWRL'^{;uh<.~}h* yӽү4URP\囚6y4ؗG>G%V_eб; 62]9]r^%6I%{_av>`iu2_͍SDTڸw]Υ3,Hɬd7:tzR4^|ֽ];h|+qt`~"5! ,˕גqENݛdջ]E!| =`,wf/;Q8W+|Hbj]8TPI\UZ|6||mnΕXܬ28e;ěJaO=-:kv^3$^ۜN63`S"~mS&p9ILɫ|O?,qu ^·%q&>֫?f/ WV u"2jP;MitVg;j*zNBqb层Ӓ[LThb|5BAW΅K<3\SXPa9}IJ&"={xgN c\ΛZJl2G9G8947 [߫Zej³jFU%V0Uc#Gnׅoi+0&QfytsN߭lKO8'4dk*?$v#\~>qIH,$" h/LC"Xg<@ UY%o&Pgf=ˢ%:V= fYggYc\"Wqlʄgr}9/m'pEd ^ׇհ q0 yu%Ձ&H6&C]9Z .m/NRHžPIFN4^A)Uw5AHN6N=sұbLu0[Zs^=NPnE#w&JuؼŠ>L-"ӎS-Wʹ$rS"UxX57hJԙvqDD`cU(]no,g6_hVZ5CiU9j:¾> &`#ɶ{H6@$Dr0k^/wad_O!er$f0S_N˞sf*1ԒqM>Ao?zߍ,!B_knrbi+ WR@a͟,jcJcx9p2W )5CL!ƙgphجrru6)tF2xA2'%PT>}ʰ(ʥyY 1\f@5GϗFъ[YkDɼ!*X$ߴnpkbfc&T?1&>-뜞1fnѳ R斲숴$k%{yY8=֗:fmZIxjUk~ wG{D+\\1m {UqR^Yf$W-ʖw (HLXt.,x<@(yƼhżJe(V5kѪ],־J vFu`}*q9fG`LnYOdZn%(f2Y:@+@T6Vg-L$DwFE^jo#_e:Α6JrօFݚ+^}pa"Au?|&zv)ђӜm'^rqT`H3󺗅73)j%uԴ+x_ٹKkۨViwl h):JwcwO57uTHe{zGaɇCZF@SdNq%p%5]ڈY#:љ~GdWjq (\F ERTeZC9F˒H,uy%BeB0[@~(NZ!e8|]mmMq,s{.,x[$ z2+Ԏ2l h{u x-q Hd M4Փ J0n]>eDaLMZi՝ƭ]\zc}LN_r^0T(cueW6ܝ ks7& ̎7TpL)b %2d'MEyC Q}[-ϜC~6|S^$iP:]xJbܸouwAsS3ۥ: wR !,ַipScy1s0:6|iZBYJjl<0 ΔƅHd)j/73RSoHi"֠@GU'YsFR-ϑ& ]yKem(jE'bd(#LV4b"w 5N*JP.Q׶:})\NLjMc^| lOWQUM.=u*JW%*fXs&*YjT'O<{!Es܅kB-HcҺ[L p8-hմ}~e7-ǚba| ;"T `_~ _Q=sWs:%AOJ=DS*<*WqY7.; heC  ]Ђfjqڦ'+H]LRU$`O3+!]RMGK˝!jvFV-Q,lVՍ`ׂr _Y {%g0VKE6P c}e v rN sjt¤PiYnDPO,,gGW|g( 1|ds=kוlLajG!r`M~$hQ&)H*wYNˏIXX2vc+k7qhB?E: K 0 pI2ViOs\ݶ. V\ 03'?Ѱ~lYŬ,>beuTQZ$&T)H:wb7rI˒n,K/v|-*B V֭WEi9J2M _ k7HI</1Ȟf9<&2 ^*o/Տc`[N: @p"LjbS0 eW2e]Zˍ(e-uht_p"=Eto>̫XpQc9+R W}-ǩ\`a%Cl6.^bSWGsD-ϑe:g޶"q&.HҾhBԌpoc.~sǹJPR' {1̀LݐȃH~i,< mc݅YRE,=; 0JעnKkI`!Kw\|2M {%:͹ZpŜ˃Tg6\a km{:DkxI~1dm'f_IdQ\AnxF}_Gؕs0᥊ﲦ|3/})g%R;I+ٽgNp3Ad<;jX~_\#ϥqVHoQnY,,ij*eԒ f!HSAedec20 mBut"TD?vu=:45 6O0ԉƆ'o]qjwtX.NV5)^^oIdzE0O|`,n`+.^\tDQ?d.VdgMBR8OhCuW |χܵa}zĘ\Ī\6]xp]15+ ڊK젨 G|&L,ܳl3<ͪׯR;ӲQ.x[mUuur MR5%=2]. il#e'r&P`r)YX;#؛Z);-|[{7+p,)R vJ`U\HTWXMdC7 <=SiJ}zwû3~:.'e36P8 [Zã!s6XO%/ ,pOxa4ВB6G!M-6ʛgk%h2enbp6mLm# 9e1l&"P]{#TexS섰iZ4c9m5iIealY¯ÖF׌5b|]4%Lap#YAJ@ hi|W]6'YÓShK d \.r2:{cf\zmHhHVJzq"W-h{ʭ:tӖ󭏠P֞{$>Gt -ّXFI¿?menoemԜCHx{ְ`o}XsZ%VFPv| ;݃ςgoz=-71[rꅲ͠VAԳpX{%[]fſfl,D jfUQK g3g/D](-PkYB ~ z3/ 1w#$|Bz~ Yp͏JKiuM;GiE/JMY͟[]g$*23>*\ xQgF wƦ"L#L.VD`7q=ȟORE\6~_]Աu=/Z` 'Ce~]8L=mEp2՗x3="j B)Bưs6'xoAtF7E$'W_1FvS(ъ"%:i˩H2ˋ|V+l[SZrmpO>4%e&lsN]vx-).߱[DTf )D y*AhU/f!X4܊ >SvF&gP\gG5y;b<ӱbUt,VvBVXJPOƳcv:5^cn!^lAIz!rGrnM f>L Cuޕ}ǵ@yzxT$6yH yvJBtS\V-xTy&NTܻjp3_|27 C|,0j_XK5EWn~jf%- pE}/L}_;9lW.^\@;R>-\r֊=B5` $59ɴaupf:uq=9 RA< *E#10[C2;z=EmQѫ8t }"P"n~̢s0\eB8ʚiʛ|;+Ӻ[D!:5e/t6Guü G"_xqH Ț/DSaꓟu@0%bka\&*v/;ͯˑ BV/3368eTs~dLV}u3 =|d-K:B;9RLi*g|䇴1=nX)`kʷkdkz5u1ئ5iH iжA: Ew Y%kmdѧviYd taʽ$TdUțmլ[VuqJkPßoBN^u5 R,]K$OѨߧ6Z:@/i&T>PǂfOqY |<3q%ijhYC>0 L(f@FFA e뎧UxP VOҕmauY^<_GV|ykg֎E/jʡ+3jJ'n}z{ >Vaw\BMj߆VhS-GQe 3-qV6KMUƓ:eEnMiNx,JDu*\>WW:Pu&_]g}38Sw Юt.A# a%w'hswݎ;aTP`+{P [퀻V_q?&_4_2ӹjnF5Q+Q7pX: /7D,H5^E8 rU=b|gӹmRD?;NY'f)v|/r<\E%v;t0+01.bER%0٘b֨$k3NP(΂5W9=yz *eb~p+ce6V[mV+śg"R4'.r0h0r55G\RT/^k겚/}9|?Ƀb&؏N YKG _[{`%C*x1VTW$g!~21!x8fӊVuGeesWTE]]5ӾF|6]#;evTl}Վ{or[Nc\q+J{ _ܼlɭ +'=y¤8>T&̻u|Y%!}fO EHv+kV]/0ͳV/k o8 hqmF_+&O* 1sx֨#ָ7#*uufۯp#;\n5ܿW)ݍ -9ls 2]& E}dn΢/ \z °旻6ywS3/f#/-tV&t xGhy~OIďݲ?~:) ҤU~ ($Q4Aj @o#/8f2ȝ8P7IiEM =E;^&5z)pja.q|]0t;Ǿ}I)`rJuRH1efxeIƾ`t%-JuYJ.rfvf>d|ۛ`xg.'d7nN_KvY|=I+&ik&g.(ur}ۛ>Jc-^Br~L3\̬ClTU&KX,Vp-9yrIꯋ p~%ɁBW޸UIn̫qw%/J&]_H.F$gwBFni.Zns-w 1O ϧ5esr?3 |5^ qjW.t%+,L-_VITT 2g:y'A\l}䑷xҁf?9t-[eC~Ӣp5}?o0wPMt@!7w%(mʉbRbnkCO5ڼ=1 LO&6T/.C՝|z ͫGm=x̔iNfjA Í*nGQ ђS"2Ct%=-[l"[3Jɮ2EjrwjmG2>e gCQRD3bL'3(1IfkVbQh7F>& &wk|y,d(huz!%'tQ&T8VhK0ػh_|Hq*I!Zy,C}LFR;%mr(A/@@`>*J|ξ^Gf*s>~SV&w#Ƣڧz0k%S[zi{20unvj2,֓UiAY4m( ]!M0+r2 1`f _)if дgSaS=v{oWs)ǃ>JxV{]l*,$Q@HF $eUYF3YRC,\MHH=jCSdq!4 xfQg`*-SYwo81лZl72/  x J: c0-|=ߔ;2(Ndrd*f`wMPbT-QX{ ?F2)\@tԶ(-6F@-(F@9WhB\ɧ e{+5s+j? \.Yg$=A|rwEs\}wrHL&D,^inō$@Ua=* 9x?f/9]P+=(@TǷed][Ä۲*4z@m {MQf2fCMh֥4(WMNйP:vALЈ́ )_G.ߟMZ +@_AX;B3eh(ԡdBF+`:Jh0 |0Zh /ބm4 *cjoڄ{3\rSu.EKļTɮ9Bcqso՞-sx N w WVQfWwיHF@M_faȧ/ʟq"n*4 #HYEGYCO[$bd9X"4?F; KeWin\(h4u,ac8= 5U^ q1Hs*HIqrQEvxez5l1怟f؅;=LLi9-'dRQd>p@`~52iGSVZ:Ш(HUԂSj^CJfcNƣGIϲx\FHFY2>ttNv6ga`22T,>9kfV8ڟਨL3!|5񷾳̦~@iN3yٜ3^ aq~)l2Wh&u`J3ҟԽ{-Z0#u[k%2l0'd]էT|%]F F udž\j" -zc+SzP15?ԿfE~ }{> 2k4ɌsΡ R,/k έi"?ѱٷtD`LC5aߐC[ qr<)IЛg߫,Phd\A:}^)MneDV\dL,QGT$CD f҅S_`I2\`\)M؍q[Z $:kҨfPutL%Vn%NA!D(1ޡtb~ %{!)տ:krX04BШe8R޼i/q rm*mrJkdyV˱j_H M(*EQ: Qs+Å9`LYFxm7%Bx8(`A\))ƂS稿1@U {B)j׻Ndº9 !NQxU 4GS(AN򯷿29[3SE6h@\!i2K8 eڽӓ֭QwS/ܓ49m^uJV#2 }PʁwZXa̋f6kYї;Y#ROʑ~wڋ!J Ϗ |G>!k,A]f_#y&o#ܜ i7ݩ-MƮϻ?:6{D-Rm'b jR~B?їN/6_T]?h*zVp6Rt,XMUiwT ?&++Y!Iٗ>1yK jWS+zEt2cϛ2% T,@huEP,+*ݠ~1eɧTLdާg̺ b>!:e0 u{n?-x*7zzn)…Apr loc2* uP3,Əw-G2~O5bjwoֻ4'HĜz]S3A^PFT,,];~U?l,zBQ0IPnvjBۆ!K9e7X'˚I|Wx/4g} Ͼ65CꢩT^Xk2kj{ii}i;+!ӾQ9t7q~h$9zW,b\}󼶟#xOw`|;Ȭ> )/GT wRd\R~Rܕ%#9EVQ(DaQc?6!@J̷bqfS ݶ&?Ws?@#_8O͘Z(H ܂Te_ؓsSڦi7zVtFU<H vS_aEY!*h3h=|S6-vz_譗]gFޫ;W,^+o nìPV=*TP6f(z#ѕ}?j誜m;1i7Wʮ/gcJ+O'7?ͮ`%xLa0wh10 c GݍNc -|f39; lֻܹe5iQc+/jܲWk,e\0JqwtOރ؅d t- /|$^;u9𤰋1;Q S+,xԜ58"!MBpJxxC:_PWv]"ozm]RG@(YeUI͢1^l骲Ãp!X:rLqG"Av ,ea60C:?9Yݣ#W=JiQe-w6:ׇ}I.O&[)EB.!rY~M?-ŽVm]tp n8}I.B[oPs`js kFgx[L 'G&0ٕO-`ʛtd,TZ;qg{7^sʸ$]sJ")rB|=DZ^%D3*5Mf|є_+VPBj+Fd{xc0"~6Aq6,lsgʅAtkiSu԰y :G<jQW7${|P2(O-Ѱ|,jŧOy *r}l"/VFHR&:d:l-;Ke*GX f{-/6Au|Mu"gշ&#g.ԯM IN[,zU;f"pT2 s^yF_i":^FZst*q ]<3$mfˤe|d% ^]rhgYYrꐧ1˒gn6;Q8x 7.N cϽj+ Iwy岟 ,a3zU?l U哘${JmRr#e5C³_׾~' $Mr)gJ'}ݫ V9$_Z>6=XJ-}{xV1=ؚGOcZx.ٵHnl9(Cw' p _ A˾(i;T%(OsL̶,TuT!vX D_:*/ -A#[ߤún8Wrgp}[nC;:6DDH@x}W!icC5j63blZ,?/Jpѓ9?:Kh {J'^oyYk8`乵[&]rz";]8K០;Nc{_}(&Ո"HR98Mk~]Imn;/ e֪Y$2VYaB?3a<39bйltHeF@ZC$92U;fQ/#5R--Bi~F΀$(euUN~_,& M[e*kb [-J!NT4J -~ҥFpjڍ㿠kX3u3iuKKY˻|y.Y}!|Uh<򟩙wDm_E  By٤480+J9@d?B8pÃ6sئbHElg7S{DC^뤿6XG*KT[Րs:(QD{ϑN8.μI=`.|`E?%idO s@ L;BZ a.p]YPni_P ]X|A}$rcSG޿MQc?ʺ.܎)%Ppˡ J)˜Sw~΀^BQwL&dW gϤ2ĩ{p)b^[s$bB%KQpX[ٷ-+rN-IN$`B3_Y#=͖A>MQ}w2㔙N|a%+AVG%k^:_h=ԗZl+?c}y٬'7{T2]Ƅg3޶aS.]Q9 ڤ{v>ԭVmYcVB ob/XD|/ҵfojJh@Փ7vd'/ :u辷WeEb\=(O辒yBuf U| 3Gw L`83FգcڹH#͗#o\%-S@=Y@|nm1:̵^2Oaf= WªE}:W80ASs ڜb(n}}.z?׏% m`O2[կ{7eԞA `dK iGbr\^k :9XD@ɸm7cθnZ ]Y-|V?uqXd߮ 53uh{Ze2\:_oaϜ!vZK`t}<. x8tj?"piaR缗uUkg(T7ڰJhшlD.b vxcW[۪[|~P%ςBL *3[LzȢ8o:.s[r ݑ!)DxesbKAQfґ7Ozq(K(7O9Bq/T-_>Sj7x.$~}srR-Эk+&f(!5CڀM&~o/5z{k\/޸Zx_o*o`g%RQYܤ\gn%8qNʟ]IkS,]w1:%Vև0B߈eW}iS9$"2:6}&}jD'#A܀R),ؕ?Dqi떢34_yoCX֎=@R {u4{-Lg*N'w!"g0ϔ7w'Ehk!)0O%% "Eh}gxCjaQ{?*S'} *3;.r&HzsFOxsحɥ6R7ſ`(+o>.Wě9eVu$oBҺ+ >U!ѯ ޳߃8י-ԏZ2S2+=)\s{21"|{ Wn>W@N0z܋7x;j69<6 !%mtl7TIw$S G<glmnet/data/MultiGaussianExample.rda0000644000176200001440000004540014046050560017245 0ustar liggesusersBZh91AY&SYY q0_i|_4}{ӛ{;;=5=[Y/y;wjc\oK׵{uouQ=vywwTGu[4Od MOS4M0&d@IM4ѓ)桄P`FL54=z4ɚ f=4&mOFFLj6C!@ &i40=2ix&&D"cMd h?C#L&0j= ɀ&&'CDe1 L&&L@ѐTD&4D044ڧMmL210ѣ?&C&4O)biDLidFM&1  &dMҧL&zd&iژS=50 L)QS'ziLa3& )6E6O5'& yOM0f#M LD)5L&'& B) &dMM 24Ҟ 444@Mh2 F4d06C  &&04@L 44 Kj`1Akk? l t )AIQ sbFHĢ@@BK-6j[Wgwd 4f gRl1q!hwϵz]? |vl'kl)АbϤykjẠ8ÊRP)iko#s$i|E;EpšRDI^ZT CP E(VhiVzǴj^1iz-&;FWw I-ss;2A6 CR[W2\BuZ=!~?+v,U"nƻ %95'L" m`Y"bWvK˦+ _qjN#Yf_ƵRI;eB6g~F5q;Ji!n@0(r˄ULGDoXV<1yˇ&L`Ԗ[.EɰnYf\3;JW4rS|[>b⟡Dž -if?8a)^B̾7JfkzxjR/a[Vk*Wo욖LBu_Őq)baRZڐWa| N!&wp 1ay=.T %A0]ĺ3d:L6nyS*,R飄 UQNey ኺdFm?,6wN12)4߳G[aX cϖsJ} e42뉾\#`n i sJUA\b9TI^¤)Mqt\dF' ë'햇[/}4q*s:Au 2;CGOZĘ-+sPjfX YGϘ[X )Hk"4nS ֊C>UiVql8Lg>`S¼T}J-ȅkRj(.hAzH%ݏ@FĈu?r|BOM|umEwR uwiƱ*qO&cmhBA3us40ۮҊ]KkJSRcsb-pt5O|_i:T9'( yfȧ0@C@Nvr^2Z),q1qume͙H<oܪ>|LmXN[vM8&+?~ןԇĀVKc,԰4ҾmdQo&2NF0\]sPmq<❘y+2Ɦ%($v*3Je"X2]l%*k5AgYW=cF%|:NiXV*nqꃐ!i\yeme*:KWP/v1ۭVpho"qµ=A..i<vy* -\F4_Y`."U )>-ECLccl ' kD GFT>3szCB.f;3>L ee׶ܷؗ'#R؏Aʬ_4@Ÿ]^}_t~T59g`;dt!Of}BB'DbM瘺{q?J$y?A{q1éJz #|wk]XoN r (Qε_0[kF)j?sj ׌"JIތ]Yĥ d!yMuPuX\E(^`uO\U8ͺ $gu_T5ū60(-M #H|i׉LØWTtD _7- v> 7[vUh=~~gX^9E-3pRn(p'Yw/6yg] +gΥ%:'Xb,=n0?cd|GfK1~0f$n*YYDlFFH| dC.JE{~:-dN v9kUܕ*M7g&MF;,bGܽM ̖W;tfx,EG T;T0O׸ЧϏ܎;N{ģl)zH'XPdfMxUQ[-ʁiKo%Geۂ1BAtLӯ !$Ӥn󭰸4lq@gs)pY KؽWzޛm=NH&1ǿ~SJ8_*lnK:&7 vYjp~%9dn0Z?[VߦG;M6fǧ6we Jᖤe+`Rv&I]1+%퉂,!2Uŵ=d&|WVOZ)#b3k8GAv֜bsN2$}֗cg_ܷR ]T?KvZ,/Vם5G*|_V]`VPM$T^QOy.FM²Ѿmvj9:^7֬/&% ✣jACZaUJD;nt3 2Zy`/_rW-Ru=,iZ)"|H22' HAה `QrƤdtivtlckeWmcѾKUYÏ '#+HJ1$Wulf/ԻnIiWQE}K:=Иzѫ@1Y ԼYxh$/ „JFh2yShtd3~Ό UEjg'îߍNvg{{T<.*bif}CHpz| ƽkHlvCl͇W.o_rH4l,ShқSdd}rNEǞF7aRd@ >bߓТuu3{vaK#6A➿#5$;YSATҕZwt*hVA ]m&7 jȩ4i!-+,d8 0NsIBMq)SpF+j?C7@1MTJX4h|̐pCPT*:Ʃx}s/qMhJ.`Ńd"Z螆?u^8!:gc%ynKefE@>p%c /Á? ˁh'p=\}Lj4^YQFEkla)@׹_im Q^r$kkBIJJv;}h~jo6=bB-:&LstsY0^|'FB¾Fo}TE#knavFlPN6,6GKҳ$s9+}^bzK} -I$dV-Ĥb/sS' 4;uunGe SO})zL #qkyl_A"M3 `4 f) +K[xvq3Zwa< $ ﬦ[ؖͩDGJzK!ZRG.V/6M7Y*?(tό?~wAck{yp?KC  W2"'\>Awޥq_ wDνtwVMO Hp>FTRg ̊`nmPx#K%;ϵ >U*^l_:+}UXa@K6cW:JafjpR(똎%R hm[|^@?l|VbŋOnn^'xcTp_&a 3G`ZC؏;?!qiku oBl;Hhʫ¸ ;%Acd{lkƘdx%e~1E׷8W(fu-fuo}@ 19N5+X6XaLu)FWe9ar3 -KMK0(PY{kSr_k>`e>,a0C4>s4XCI=φF.?¤5XշsZE XCδ Ez֛Ag/7 >ˋ֌yDf$ګVܭ1:;FQYyo,SUh%M+G|ğgAjE-lqyF`9 cqC -yNѲp.O{s-CЬazJ$ծ+#>.)yqE'0W>`ZqVz!Ê)#ԇfcg)04N{֜r^؀KIq^^&:UJ`*XH)Fa>FfWZxqVp+T\r7y= ۧ}Src_Oh*ՎZU h&'T?5DgPwuĆ0H60'u/ Aq*4y? =nZMQBp-8٦,|OM⋏.T\ZҦ.3;-:fg<9zO;鮥Fyj\ m̡ˣEƕ1ᇥXxb? |!\KΉ_]\M͸cV?\P{Şnl>IVNvpT$;F!M`:/H]w&kw]Qm7z?)l}yS/ZY=x(l\eU]-ӑg6deKW6Pg2E)@e˯M"F`&̩+\탮h Nv[/~%ϑЋ唢8kQ#3Ԭ 2WܽǦT9 !i,E8 v[FfBm~tkרIZI(R^zqUg|A!@x1(Z2x|Gاmi?&#WVH?Yr2ϱ~M9UtK3[&5C/sʇ!)YgEBjggT꨺ ^Ņ:gIw=&-͚HB319r8܋-JRS'"ؘ%#4E /*ω>eJadzy&"]{w-0*q9N޲vN &ubZj<qƻKxa5( Êm(N&3&BN*<򣭴{o2CoW3@1c0o hܯEJyisߌyV%@<7J"JW]Vbയ\S*as+ə"0EWDQf_FRPH}Y9!/(3 O,_Igf-j,VBm C]F_ׇ3Df{2*(D4i?.iu%TM!"q?IMe3C9,‹@ҽeIa9\Q7[k_I%;(S-l+(gV90hP[ῧȅAb;ڇJ7Ùf` A@YiEX3%MK4)~jBA<\x<%8myEXxDMgDRNȑu<ۨ&V~B6bj :MmsٙSϹdд,hoPBC)`Wj%?z^}5vpv(JuRO_n#1r @Ʃkp LKZdYh^Ei+.lfI6wIł6Mn ߞjKx: '5˩ܩɝVsMo^:R۪R"#HX -炱1gF҉,S GA_́7i$=ٙvj(1 "z9[ x_"T5-5M;}X55tw;ڇa r5#U)}1TU_POX.uWChE綗1ݖ"lhC%D鯩M67Cc8iɩtkBzR3d6"ͽd| !o7utCLoI#=웋iƄ戵ܟ8T qtCaPp/reٱ!{J0wBַٜJoA|ɂq_+{ٔOdsb!J.B7E1-}9p-qEi-pd(ոtjF7i)Ix>} bc!/ˬ(ο|43ʵMb֍†6Q|&k_U$QMꝢ:]bOoyED j 37򪞘K?di8C;w1/m 0нu40z%4`u S|'Ύ0͔W¿p/!1$@VJ .~<kMU'n]+Bl*e2LbG򨀻! oG|O6 {ukrYhP$҇DY_审͇c6GEĠq"&~1ވ`@N×1v݌Zg.vWx[nZb >v.5rK#b T%'M. %~o[d{9#>9lf uT$;E[jk]SC( ?_%)r8C.¦I7>I?ї+hD19W_!,3x_)Q lw&lΪlyWف81 wVq.T,b$rs4aT)誜6!ug{w~ا"̞+Yo/HW9@̛ԣ^\Ud!SWj ]uWqxS>hlؔԎpfoIY8/v:Kft=BzPi0w#ͦP=Qtq_Ek+.eDžFz g.vMq=TPlT!, o_c賓wEdlϷ*5uR 4W|rQj+Uds~.&7p9ܑ*dtJe:Y4B?qfmӳl+#In";b3Ѽ/6bY#jt[>wg8xgc{xxPݲ{>F$6dspRDnAvvBIJʶCe&O>P?kǣ(mW㪙œGm7U ;vC!`vt0_,A,05M^ĩn'g:W!jU1ui*u{ U =+;XVw]O o~2SG&+ {MgK/J9H|qG,UvI8)gFX1#rCy۽eHl9ܧTjr̒?Jq/-YbrB0*{)\jEe#9F.}-j]\NeMؓ;JQ Q&ϪSz 'Qޥ>KUwDUม+`eE"m+da$9ؽGl'HB?*WYͼ]LlG9<|iΜɯpeaHgWw2>u:=ri.BP0#(#}Ȅs]6Vט[Cן!Rb 9/C6Ws|&{lo0TEO񵃦)SrDi!@7}i۩s9c-牜L%>`ŞUwڅ#܇[jbY7ч-XcpQ= ?޿MF!4,ߵHB/1cP^Wubu6ۤnm'R!zN@Ma+ʛེH:~JoE[e'[fTo ?FӮ˰/2$v '@HT5I)eS@#\k%ު=0Aa}[3_?Z+PhzlaPn+Z}H EM>zjP-fD-k8?Uv5l B8uNwp(0Щ۱MExoN2xb!w>(AYS;CɮFV]g/oɿ8KE4СyNP3N)@ + Kut;bgzR^7 Y{=CSkl= V̘s)n@1?,#_DQ҂c>mz\md_p&y t7aM@@g% '3gTb8)m诵lGx#7D{-:NZ|R5JަЭGdg֢X1sV6>tj11pir$$Fdz"Oh*O;qC0w|IVՀL\IX-s7.Ҫ("]_~ 33/a:[¡p%iZd/seﻔ٬}NCDᰏ mqJJe@w% N􇮟 spCJo<1iptE OtI>Z(yb]g$j~ |QRԝGͱ܋3l-6THٲ^< 9o=Nne&F>Ɔ~ Nq۲}z~?Fcmb.S;VU˻MryI~ޛ\sm3'"B,_׷FtLmU&~+u:7oT虔j:^9E%l!󶌦|Ҳ!>&9l3Zu\.< ^eM0q峐+.3l_/|$ dq~NDQb-̏l?ŋw(&]N3;Pʯx6>oks—jP0]Ve2nTBˈkuvԀwGdoͼvyKҳ.p'I;2Oz[^9(&5\fAΗ6iȖrv J=NKpTPҏSbhXol3E.ri+Dު7R7^(ֻK7}_.͵P ;YfGsf\yg@d>]%lBiua5V-$wtmM3+Tdevy@vs{ *q}~$?zVZPsLŽEI?э5-7ji6ԖHu vN *{[ǐgb`sd *zx]vepVp^x~uLc&xkҢNsGC<Ū?Vw4 Ӱ]xҍ!ZJ<΋$l>o43λTRǨ:z *C:܈NUv@Pތip.;w/Mb:6BH Tvyp]m&q3{eyFb%$Q֫۩N^-<dkV@3|3x2:Sz:kNm{f.K}XTt^";9QouxOa0u}5ړhFz~ ew`\wQ/v~(;`AsH>3ņInǘ^.L U·*j=7tR?IJrU./#V=Qh.RH}\/r*@boG> 3ުB&#xU Ueޚ R=gOP}?rc]x[xtO2Huc@O R&gf™ w3r'0{);x49\*J_k@g:gcv]ɟrXoF4!,O'e!$#Yw鼑ݿ0e(o.( /ixF[&Exe,J WyپVRߝ4c-VDFk˗GlK&#LB+o]F\tpʽ+VZŐS, s+븘>*p֛op$ƳRi;`X|j6qT*b,ʲm.݉-Dqi/eVJשg噭×)ŰQb̰ ,ѽy@yr(woRTOV=ťL ?Z8x|[؈.rƳgZ(.mc*X˼[DJwˋxv6 /ucf*~k@˺Tdss^Y/ x&=$djIc#C{1H "xv=SG 2E090CdӔX%LjYu)rzʆVC>F7I١^/i"=\ClXVtGOtm.ִ 7:cXzw[KTq~/ΡՔGHHg=NluRTG4D;T|C Pb1Ǘt Oy~QN.lt2zbDn;$2>$ cѐ?sjׄez~$$Uq17LjH .:Njwdo|г0ϼs^.?fOD"D:VwY'hǹgUr"i~t7Wd@a 瑃_^߾AI]L.! a=^L۫s­Dn=DgX*-JawLZ6wkHZozupũGP ~K$B!su:vEMQr TV̼Y\ "X0 ?մyC(j4's`RʐC@wIk u*ǩ1䀒>I!̰LV7q|[zRKpbMOQ~uHMfXL/2M`4nQ Ha1vf r4%;GK. ,~"?cBӷ!m7y8L6QX%0 9D n%+$J”S >C@fZJ- |׻i Ê(HwUN7PA#ѼnGs^y12,` A=4хo;/WY49z4f, 5DjpmrDEg Y$p ]8"*thJξ[ vȣ?#XNhwލ!dg%XA,LPLfHN#@|| r^iD[4<.uV">Xεb" AGbޱPDB("+nS'mJړttփq' @oa+i=ɒӄC.@'/:A0A8}@w6 o^ -^GGf US )l7ɧp#b[I d!Sv1iqx%ݑ^xDe9_ y,+33|Ίk1`a2pu&`C>jI_P[vp% @!>w{P HwNAVa |rW)ӳ2ɗmF9A S?xcsP ۬ wE9W [;\q/Zٴ_PzPŽ5Ib=f8wMx+ۛZp5lO*r94ͮz`-eNJ,*ѠL}mQe=%4 oVƿzUD!6YmUwb}0@o]*}:bf^YWRŧkɭlxGp^YKڍؘ? .۲ە"3R{q6+bNiT}N cIf %+{IֺKe{A늵ށ%[Trp6tiX|t0>;#5ӫe}b<'l3Քe`  !YqqNp VS  M7:?dF{wrE8PY glmnet/data/CVXResults.RData0000644000176200001440000000031212464072752015415 0ustar liggesusers r0b```b`f@& `d`aI%a@6"H߾!6O\ʽ6.w9_9"pu7^+^{Yһ ~@Wu/쏙L7bdo glmnet/data/SparseExample.rda0000644000176200001440000001027214046050560015714 0ustar liggesusersBZh91AY&SY -S >}>rs׷Qܲ{d)&D0SciMԟ)SOқjL?M&b$cɿdG4R;0'/C7ёLFb83È&݂ 89  (1jԧ. N92NuK/A8 T-j^/qs$-5$%_"?<, :IJf GΦlxa=mN@U5YGu0ԫo:S8PKwcQ{# 2o$Ki$-xD%2|hNb*NN39T'N9o TI>- GY2w{ȃXyKyu*>!ܛ,qTp(%yҸ<@|VvPL.+YMuh% ac4I$4=GXpaN%y-qsenDHs50 @7tsZ~s "]$<[=ҳmXiR^Ҝ/0(9ϔ?8&C{۟/Nς( wHg^-ӢQDMqKVS գ`wC#ԑYoxz.DK*H-TzRGG[~nfc{Bs#L_6Ϲ=[h5[r1WȠDp%މ1 E}_ ˏ}'FuC;>tS@AgX@^KJxX,0y\d4;Nŭ]Z- jabLJ޵ ]ۓvZ`Bhp"w,"m}Μ樕0C.V>SLLМ&\)WoALWb.!>a%U[=Ơਲ਼1!̯dn^N/ `Ȥ,!]ߐzAGޞA$iT61n2g{7ѥd9Jn\@<E-$i:LBw nע׿1sȰv׌DE`7*"VǨ©ebٗCN˼ܩ(u3˞E~zA^:^:ᥕ]=OJE+xI4k]i8X+*t9(aU]dVIL';SĹ;J+Ѵ] gk̒!vZ+cl7%q.pTy+Ebя9J;& clV3WA '}FeɃaǥqSo&k"IX<\O3e%𭦳 +/&ڬ/(tY;éIWDg+G.ز2&.y\S> 7IicIf0ßj_[ssUlGw_b1Ҏ)+^fKLim) eɛF Tk/+Y6Hu 6*RGwNF_[yF%l,z.D?ja9,'8i(>a<i`g \Pvk6F|xN MrUpQa:J%^4 ddfiZ>riFJ"\)'bsȳYs0AqTwG051wt.$<>ٮ9Ѭˣ՟d_U$V5%)['P,Z<[XHAӑ}cG1$?aeu,{< tRtAk=[]Ƨ9` ymw !:ϔUCv%e->8[p֭:ݤmI8!m쮏3et5kd#6iB8)Bk7^@ib mY@,žאּheTj&m)?p{EZ23ְM~#r~3s3?Mԍ5b|ҫZK[[:F-`sU>hyU7kzlxT9vV2լy8'M;qRMB>E6kNwʱ ׮\\%1 +\*HxU#5nؗO|8 e}ﵘgEAslvK,yν~t3pdX>'_%u1aHWR~XA-O۽뻽Y}uz_v; x12`L40&#M0F4#`LM CCS4ҡO#'Mi&L&0H= L&ddiSS&'ha15D*~hLjidL #0O@jy !L& M2h?M )&$'lF&*U?di3Ba< 44& LMh4jixM14&GMFLd M4iFɉd @B;rwT/ "D W"!DR!0"1 IL]z٧obh[Z9#,8n^|'K2[8>,M2*qc}^j6>qOպ~4g&!'d\$d8, ?j;;'Ho>@ KH]OưE\K;|J6#St;_JaZ$Ը$rزq1\@h9Bz_UVrĪ;B3}3YKN5,qS 1Ǣq ٤ě 9 GJK6o0 +JE!tѕ󬟍>Gy @5GEj͕ws^=e+ZHj /烟`ٻ@|}c%|iڽmnE U%'} qó}%hW!T=%Ka&|[g8̮>[X< iYdVDMVC`m@жiwzxۤoOD&o`K,p| Weo^k!T`Ӥ+"Y@ٛ=賌j(B%.E!5gםJ&S?(,Cw WL涂:mY ץro{ qh851Ehez.p˭y"GPmTDIi@rQ.׷ CvVG>lAdU9hhO1h7lSBFC*K=܌J_!'Dcj%FA-鿽O_j}6Pdڜ{A\YpƏJP*b8cib)X@.[{ "Dz[h?>娵}`'= 1bqEzPhw3޹3t| r+Y QkL }ehSxH}\$87WWM9gă+Ŏ5~9Uk߸6"K irЩ#d+ڝs ٴi5B>4cF8w;KXWai*  uYOҗuaJ4=1H:9ho3a-"69[K 1WGfSJ26&ҁ|`)CmC C|1O-"V:_q*l㖫غMr6F3Pl>Sb=醈j˩=9VZci@p=ŀ0BVsġh-y}qD) 5q}"S&;+U4SD\ (Q7[)X\ˠhߧT݆H싺,10-[K”ru^{T`s} \O٭X0wj֮B9cFmrtTV K?yoሾh->rOd[720c/$.yW!l53TOVsBNA *9k!nkcx ];'jE^mP>/ܢnF/z>P I-zOBnI2 Yra꿅\փ *^g`s&9H@n8E~echqUٿiiƖisCKF?T @b@Ø6D_}>cY+MI7?%mcû%m(ڤਵl,ՊK{#8)f#S)9VTL֟<D чz"UzcTetsrmW鶎e!I\S GaoQb 9$dlcAo:zƚ/ ̆1'Ac[ނ JnmB,\$34HACqTǰDٮX/<߀i4iTݖIo W0G"9RXk7 8)^N7]!cU/Q"({ 8ޗN{g(K.'i>}fo߼D]U"S!wr/Vk<Nk߾%ٸ[voSI>*mʋ#=v9~FlFB,xv} ]])ـ@Wf7BWox:FW0qQ"ޞqCO.gX! HX3eHA_o{f^W;aq A61R>9 {-zxY8<ʋWr˚*l$6>Vo23DCqOzβ^"yisfy=L+C- ;K6J@QGc&&Dk\O2"6}_Ak#nYS$boQyCK6l2pn nX+7/*GׂҡB-QJFj T:9)%ʏv DKu3TSJLDN[["Nu1؇1mH?]OxK<ǰNN5^ԭn`_F?l1r$޲fK7u`} MRf,~84lwh+p쓉NINt+m#װpĹ@vFř֎Tc QpjTS⊷R3`ҭc}Zv- V}G\1Wim6zpw ^dSmǒ}P^ŔD%:ٽ?UuɱΓ׻(f_P@s䗒!yXP1U˷X1 eh.=kׅK.tFW"B1uн _X7so÷!D]mQh;ʞr4&FG~RJN\mfKvYZx6:f~z|ïݣ@E FR&1A0D:ol :{nX&c9Ĝ{/v3N<9#B*=&݄v现1U~"j_4ZɫભdF#?< cca%/ uCQԂ ,*РZJC"`x+C0BZhS3J<) _DfFud6uYz0.D)WP0IlwhΗMO/`|DKϰ<#^}/hߪ1*.x~Z8=MO| ٯ{k&2*˽$5kIALfq!{\2h' 3 e1h`RbYJ|">ϑNl|ַ>Ԁ?\7{D@CD<-esx92=K2qD4Y.*//nT7=@mՇ=3r|< VrUd:(pއwqާ$sKl$/ߡ/}} p`Ž*QMᔢFЋoeEwm e!)~`/.*= [H5b("6^l ʺ ھ.:$Er.qn!L<#B8 +j[!-gx?ٮ3=lveS'F+4*O5'_m2k[_ocۦo}T91dQoP)@Onr"<tѻ~o{!c 7Dy|Ҏ6 ;[O}vA'Ñ^LNkP̉ڀ)X^0riHPnq~!8ظ-_IP/rY\' } (ykCwTߣ \0)]͉"JRmX֎?e0Jp4 2Jd?ĪZv= KI24a4DiSkbo@t/wW}I:qtT﫼0vB=&LGt_iFC,4]̷){U`Nt|Q$'4SLdZ! \S"F?jޓl0'U"<١sW9[- V#VE<&!]֚OBwԜK,4ǫ/..,rQCP)qԚgH D_ b3$ XޛQzmuگk^67DR3mhК/ =ŬR_g9UF~2"C(Wxҵ3hPq#?[R. U: BC)NVQse:m{$T ~[=뺵=yn|+|yT{d]>AUhC )Z>ݘϲV:b{gEȼFcg<#J̟uB#MW,/;UaP]op+у~sU|qVCJ'2,MϺ5<0z_4!WP>F c_x[ArMnP+wxV+Xr=J lLUyLd&Do&9}&𽥸RJLH})!G33Jmn;ƉhǓyY1seG<|86U&Ջudmwd) QϽB%G1iz焨Iyߣm [_/#Uw&6g=U^H}U_}fX^KMVA:j{L"+1Ի?g˓UO@Ι0d%ګZÒn##0̜r`|U'ĝ9>Ny_R!(U;rDI&ijNdIg'XQdΜIVOS-}:f٠_Ƕ)P$'i 翞#?W+sCVߙf:6HiB>M 뢘ZҬwsF u 6RL֒76hn`])y0baxPc]f^#P+&|vphsu/atYҞEMR%o2c]Q-d u7щdž:>U\ ?f4b X;Gh!t*7"[`K!mai+a5s)RIcSbDnm&cSd5fE+sn%G_ UXCd֤kEDOhz(x_l3w\=>K Dkɏ;0|fd1rR --4 yTzY0It*8 }0v0mA<61Lo?K *׫tv vK!OЬCQixy>?3nÄ́e^>|+Δ1PCDvG^xvV)&˃N*d4GE `Wj 3\GN)%T-#|ʕPeŚ*ONLL^?7rIBɐ9 HoG۟~[᱒Po}ݸK"ābDTւ۟ptL^#]hBZT/$L SJCaA]qXQvɐU/-C-mz\: Ou6sv`e-y`g5]~B缐龱稛q%QtwWLnc5{oo/[[\M`B}b 2g?+fRfήsǣ`aVviA7C PoWkSlߑj[u[)EӄekiXЮj5F;6u":O\A~y~0{NWH)^Iȇ0-C/q$:p "0RG$'`&VKithzaۥ`syi44mʥ:z6ZlcCV~|?\~"Դ$|hOZ*dOk+>sO#)'߭jjyģ2"t '.k? f勽?]tAB ϕM@ġ6ϛ!G9]z -͖S j?BVW3q q; 0MjYn\3YkjF3~BĎaLN?>naю)?T+둍'xq1 I/X1ۣ''twG2lqO#v3]ϟ/G?[nq`/@7Tҹv}.׌r:H6}<#oR~⬵q;tUOa)<<+k֘Xu^A~)xpS2]6G&ך .K<}xCcЗ'u02Bm^hlFTť&*5ltqUѯ!@TEZvp+y!5&@C^P.=wA.%3̈zUD8NCKK?sP!s8޾\PpKfDbJW+קDfdXL<aSw֚so~6QR\)?}'4 5*G#*5ՄBԕosb?ʸntycB ɭ*P X&vZ[pmߊm@ i-khԝ(&d|MGpVPz_{a7yO$3~d"yy쁷["/:m0)1by7]ޤjC~4wF/eMj*C2~H)2Wk^Lg+y'\i̜S᪻۸r؛f}8w$[^A&Cr&<,k_sMR3J`l "kN ov;>| 5}U<- SJEDUS8.h# k*Ѽ-$ff!N}%zGdPX!ت+F|䑄mc8"qZEIؾxJ^ydv6) mkfu SQUWψu%oK7 yDz4.H~"#t-%V:F xR@ NΑYQDnK1`?"fͅzP0k8l{}tGZ[.jQ=ik|6-WθfͅÒX%8W}z` TG74sӗaШP {3,|| R׉П+#I 7z",%\9#yWF\{Snv=-f9dq`4'u+-6SfsYәD_r?޶wꉘ:2H L-mΞWϻfo$#ACӌnBX~W%`Nvq$Lmn])9#J 17po$u7kHla;L,3Z9<\1׊ȪbRB?w;fmM8 ŃՂljf-uk悕8oz&@`s<6 AI~p_L|U_Xjc :m}z4tz OP3XlD؉GJ2۩~KDn)LF/㣔@2,^:A6uˎ\EuFBwۿ{q*5TiYD`[l#pvw&N;8jIג!8VG!);x8G$H'&Ͼ0߅twa@ p 2 rS:_x4 ^o>a-Ϋ p]gOCd0Vx3H}1ٕ8k&>\J$T?MP0md@*?@*- C>!XM .D hNav9Gܐo. R[jayagd*4M,_ u<$:ae?ل<`byYL`ʗWjpx}\^D&J0C\Ⰱu"Q (d[DaſA{Cg+ hHNx[LuI=[oy,|ؖ! S'&zvTH&rvہ2ompp&LD$+Ҫ] e7vLF=Wjn[HB$!dd45nLfru;Jjtjr83Un+SeM:dpQ]ңzDZk9n¦l 4Nid[X.t#{Lp|փؓ(MJ- eD)s D9KAHMdtŽ‚0˼1ٻ95ZGTסAIyrT<Ґ]w>i Qn?.`GD8½V [/8Sx$Q6+ gZN/ur*2?;x'P\kZ  $j)}6&<~+ǝ\KjyD2u/؃ޛ¡>LH>7Ezo,{(+x8uEQJlYOߐdU1Gi_TIUAd{hHZZA1|nvjS_~-:gwۙ%uSa54y&LUhw: yD<۱SD4_y;PG==t*_[W'Qu][!RNQcxb} =ӏH+ՙek-54b85=deI7$~-!/C%S]m> N2miQq\~(:ΧC;}k*.5lA&8P˥[Qx0w9١ Tl-{\gއ4[o!{ayI[D{ A]SsZ sG54Bjmpܒ(-Oڄ®σRt'<5V(@ID<<`]̟*9J}fhkyX*TL7zjA? ڨEyW{{񽒿GpӆKA%e 49~*L STB@ұcp:bɄAn0[z)wW?>i~L_  W $.1w G*\:݇eۯ"F72fxRC.}@GbF x`q2/Ɋt:Rk{95&8Ҫi ? RG1ȩl,/Bٗ5*}g$vX}0{o?m$qPd1k@ˑ6l*l$Uԁw*))hIjnl:7uK9,-&IUYi{ڒ2kY \BV6 smFؐ}bE!o&lKZ&\E uQ}2r@h+.Yexcjrw+Z5Ph*Bg l`\s60{[I,-:+LCz$ \S&p,?ZS&mlEo;[`@z]uXXWyf}cPw@f bu&ڪ/J7ռ x']i nCOirF/H}n3u ޜLYHRmS+NJg(h%|x!;Xek^2_U7 (tgQ̀#kƏ<-6r)oW ,, B1bh()V\VA"3s ׭A. w <}]j| 9ʹ5]NӍèfP@4b#:8R5@.$- UXRFvE76*䣏_Tf~7i96?oIO+aL͑B UIyUS~uu|t n*{ t WW8tOF{"' "TWcQ\z;r>3CyBz_DzvWe]Ќ6gfEQոCB=r`ɛR3ڈVsJz] 1E8ٽSk_ocPH1]IZ|?G2lh6%1T䍒rr\BCR3QᶴQ<{Z}03P䛍ٮa_3`%~iճL#Wn_uƿ]f$zJ8AU7e~RXkxm.rȬ b qr5k7#s|z; w5J/&e1A9\.U:e:\k%8 Ǔ)d^uy=gx@Un> s.yYey&Y?}NR)~I 9v~eQcSG3k# /)~H @= ^P!b]x?G74Q=A#D @-31T྾0< kom׌~~jqidViS&qn>F1ڳY%|nxA\}Z_]LG?y7P湼{"Z:ew0 7`QK*ǩEԈAAvio0GmN@+dB`ǚ -h bkqQyǂO'˔t|rļ54yFu)fْ@<-)HK.i:3l}y4?h%4Yї7C pdPH CZOm"O^0@#9A?U{s ,"w&s{N<:[fx>f>϶؞0%sB~ !A0"Ht}08rzn\^1٫8^oN]{膪R *[S&HKFxJci|1=l^_,#>;>HG'qf >Fb%Qv+tn pyp,y,+Vd8X h4 W1q;O_VzYQ(א 8@*g0ˋ $髋ra`,D!{XDBuD#g9uQ躴ԧۮi}qHsͪ%c 19 R\\e^wkC,2#Z<+a`FL{[26|:ɯNW9V4-DmeW*v162಩mQ( +H艱٧qsaXbN^' YeȦPD"YZ@B#;JqJ,a3l8@3tq$8?;< @}js@Aùw:#f!@/cE=)lѫެj -㡂m4M? cZQiDA3^ y{"jN#[6=_4,-RphDd2,'qQk61L%R 1Ϟ V6ƌoߺ-jRH H*9U 3i5dnۛlc}^7mjpqwߦԦ~fx:Ȁn>i&{<E]ܗ &KY BBn[* [#孩;Mqp(2 s,ml0:[_;ڟ 8~c*OMb+)R vسY wͳӒ2J1HC8AYj03@ȆPc;HTe _CliM]\:]L  %$g[0{t>ZC11Éwc{Cŧ'2SRM`D?xvvfA6+[%*@ iyC5Ճy0GUL!(,_aζ'=82`DDE#RʳXSt&q|QMym4@#JPr\ۘh<` oC,Q{Gjb3ѣX@TQLF+̘& yByH %.d!+=[zk70! )Srɋ> |"<źWմB('sl;7íɘn_ɦ9Vk,$ňq&Q !`D)cwf`@~z;H)`f!576jgwڗ'̥qW ݍ Ӿ(VTQH!Sj hЀHѻ?f Fo O0nJLG>^gNʒG Elm=k_ wkN}Ւ}UۓI{,N'O+ȘWz*#BZWSHf{)RX!IZ214$2ͷKAeرHٚIk}+9X6Ged7Fogh isD#C !'._Q:! -c+5ol09֍ 8Vtba6J:ý.^eTxWg:P)}n]Ixch_"[jw_=#zq]͖"o>ɽJfAIkUHb;$iZ6%W&mOt1]'!hҊKvuFawx%\!}b~_G"40͍|B0nTrt)D,g'& N6i﵆_36lye+h`YQ|br-LFXj~u v" ²u:\l ]H,-wM*fgsKpKJurqb `%_K \9E$<cR~ +$0ZUć@j埯KԝD0JАݓu?EFݬY]= lCUm%ی}??϶]0,family="binomial") print(fit2) fit2p=bigGlm(x,y>0,family="binomial",path=TRUE) print(fit2p) } \seealso{ \code{print}, \code{predict}, and \code{coef} methods. } \author{ Trevor Hastie\cr Maintainer: Trevor Hastie \email{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/glmnet.path.Rd0000644000176200001440000001476713775432176015061 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{glmnet.path} \alias{glmnet.path} \title{Fit a GLM with elastic net regularization for a path of lambda values} \usage{ glmnet.path( x, y, weights = NULL, lambda = NULL, nlambda = 100, lambda.min.ratio = ifelse(nobs < nvars, 0.01, 1e-04), alpha = 1, offset = NULL, family = gaussian(), standardize = TRUE, intercept = TRUE, thresh = 1e-10, maxit = 1e+05, penalty.factor = rep(1, nvars), exclude = integer(0), lower.limits = -Inf, upper.limits = Inf, trace.it = 0 ) } \arguments{ \item{x}{Input matrix, of dimension \code{nobs x nvars}; each row is an observation vector. Can be a sparse matrix.} \item{y}{Quantitative response variable.} \item{weights}{Observation weights. Default is 1 for each observation.} \item{lambda}{A user supplied lambda sequence. Typical usage is to have the program compute its own lambda sequence based on \code{nlambda} and \code{lambda.min.ratio}. Supplying a value of lambda overrides this.} \item{nlambda}{The number of lambda values, default is 100.} \item{lambda.min.ratio}{Smallest value for lambda as a fraction of lambda.max, the (data derived) entry value (i.e. the smallest value for which all coefficients are zero). The default depends on the sample size \code{nobs} relative to the number of variables \code{nvars}. If \code{nobs >= nvars}, the default is 0.0001, close to zero. If \code{nobs < nvars}, the default is 0.01. A very small value of \code{lambda.min.ratio} will lead to a saturated fit in the \code{nobs < nvars} case. This is undefined for some families of models, and the function will exit gracefully when the percentage deviance explained is almost 1.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}. The penalty is defined as \deqn{(1-\alpha)/2||\beta||_2^2+\alpha||\beta||_1.} \code{alpha=1} is the lasso penalty, and \code{alpha=0} the ridge penalty.} \item{offset}{A vector of length \code{nobs} that is included in the linear predictor. Useful for the "poisson" family (e.g. log of exposure time), or for refining a model by starting at a current fit. Default is NULL. If supplied, then values must also be supplied to the \code{predict} function.} \item{family}{A description of the error distribution and link function to be used in the model. This is the result of a call to a family function. Default is \code{gaussian()}. (See \code{\link[stats:family]{family}} for details on family functions.)} \item{standardize}{Logical flag for x variable standardization, prior to fitting the model sequence. The coefficients are always returned on the original scale. Default is \code{standardize=TRUE}. If variables are in the same units already, you might not wish to standardize.} \item{intercept}{Should intercept be fitted (default=TRUE) or set to zero (FALSE)?} \item{thresh}{Convergence threshold for coordinate descent. Each inner coordinate-descent loop continues until the maximum change in the objective after any coefficient update is less than thresh times the null deviance. Default value is \code{1e-10}.} \item{maxit}{Maximum number of passes over the data; default is \code{10^5}.} \item{penalty.factor}{Separate penalty factors can be applied to each coefficient. This is a number that multiplies \code{lambda} to allow differential shrinkage. Can be 0 for some variables, which implies no shrinkage, and that variable is always included in the model. Default is 1 for all variables (and implicitly infinity for variables listed in exclude). Note: the penalty factors are internally rescaled to sum to \code{nvars}.} \item{exclude}{Indices of variables to be excluded from the model. Default is none. Equivalent to an infinite penalty factor.} \item{lower.limits}{Vector of lower limits for each coefficient; default \code{-Inf}. Each of these must be non-positive. Can be presented as a single value (which will then be replicated), else a vector of length \code{nvars}.} \item{upper.limits}{Vector of upper limits for each coefficient; default \code{Inf}. See \code{lower.limits}.} \item{trace.it}{Controls how much information is printed to screen. Default is \code{trace.it=0} (no information printed). If \code{trace.it=1}, a progress bar is displayed. If \code{trace.it=2}, some information about the fitting procedure is printed to the console as the model is being fitted.} } \value{ An object with class "glmnetfit" and "glmnet". \item{a0}{Intercept sequence of length \code{length(lambda)}.} \item{beta}{A \code{nvars x length(lambda)} matrix of coefficients, stored in sparse matrix format.} \item{df}{The number of nonzero coefficients for each value of lambda.} \item{dim}{Dimension of coefficient matrix.} \item{lambda}{The actual sequence of lambda values used. When alpha=0, the largest lambda reported does not quite give the zero coefficients reported (lambda=inf would in principle). Instead, the largest lambda for alpha=0.001 is used, and the sequence of lambda values is derived from this.} \item{dev.ratio}{The fraction of (null) deviance explained. The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Hence dev.ratio=1-dev/nulldev.} \item{nulldev}{Null deviance (per observation). This is defined to be 2*(loglike_sat -loglike(Null)). The null model refers to the intercept model.} \item{npasses}{Total passes over the data summed over all lambda values.} \item{jerr}{Error flag, for warnings and errors (largely for internal debugging).} \item{offset}{A logical variable indicating whether an offset was included in the model.} \item{call}{The call that produced this object.} \item{family}{Family used for the model.} \item{nobs}{Number of observations.} } \description{ Fit a generalized linear model via penalized maximum likelihood for a path of lambda values. Can deal with any GLM family. } \details{ \code{glmnet.path} solves the elastic net problem for a path of lambda values. It generalizes \code{glmnet::glmnet} in that it works for any GLM family. Sometimes the sequence is truncated before \code{nlambda} values of lambda have been used. This happens when \code{glmnet.path} detects that the decrease in deviance is marginal (i.e. we are near a saturated fit). } \examples{ set.seed(1) x <- matrix(rnorm(100 * 20), nrow = 100) y <- ifelse(rnorm(100) > 0, 1, 0) # binomial with probit link fit1 <- glmnet:::glmnet.path(x, y, family = binomial(link = "probit")) } glmnet/man/obj_function.Rd0000644000176200001440000000157113752553007015274 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{obj_function} \alias{obj_function} \title{Elastic net objective function value} \usage{ obj_function(y, mu, weights, family, lambda, alpha, coefficients, vp) } \arguments{ \item{y}{Quantitative response variable.} \item{mu}{Model's predictions for \code{y}.} \item{weights}{Observation weights.} \item{family}{A description of the error distribution and link function to be used in the model. This is the result of a call to a family function.} \item{lambda}{A single value for the \code{lambda} hyperparameter.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}.} \item{coefficients}{The model's coefficients (excluding intercept).} \item{vp}{Penalty factors for each of the coefficients.} } \description{ Returns the elastic net objective function value. } glmnet/man/plot.glmnet.Rd0000644000176200001440000000427013775432176015067 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.glmnet.R, R/plot.mrelnet.R, % R/plot.multnet.R, R/plot.relaxed.R \name{plot.glmnet} \alias{plot.glmnet} \alias{plot.multnet} \alias{plot.mrelnet} \alias{plot.relaxed} \title{plot coefficients from a "glmnet" object} \usage{ \method{plot}{glmnet}(x, xvar = c("norm", "lambda", "dev"), label = FALSE, ...) \method{plot}{mrelnet}( x, xvar = c("norm", "lambda", "dev"), label = FALSE, type.coef = c("coef", "2norm"), ... ) \method{plot}{multnet}( x, xvar = c("norm", "lambda", "dev"), label = FALSE, type.coef = c("coef", "2norm"), ... ) \method{plot}{relaxed}(x, xvar = c("lambda", "dev"), label = FALSE, gamma = 1, ...) } \arguments{ \item{x}{fitted \code{"glmnet"} model} \item{xvar}{What is on the X-axis. \code{"norm"} plots against the L1-norm of the coefficients, \code{"lambda"} against the log-lambda sequence, and \code{"dev"} against the percent deviance explained.} \item{label}{If \code{TRUE}, label the curves with variable sequence numbers.} \item{\dots}{Other graphical parameters to plot} \item{type.coef}{If \code{type.coef="2norm"} then a single curve per variable, else if \code{type.coef="coef"}, a coefficient plot per response} \item{gamma}{Value of the mixing parameter for a "relaxed" fit} } \description{ Produces a coefficient profile plot of the coefficient paths for a fitted \code{"glmnet"} object. } \details{ A coefficient profile plot is produced. If \code{x} is a multinomial model, a coefficient plot is produced for each class. } \examples{ x=matrix(rnorm(100*20),100,20) y=rnorm(100) g2=sample(1:2,100,replace=TRUE) g4=sample(1:4,100,replace=TRUE) fit1=glmnet(x,y) plot(fit1) plot(fit1,xvar="lambda",label=TRUE) fit3=glmnet(x,g4,family="multinomial") plot(fit3,pch=19) } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent} } \seealso{ \code{glmnet}, and \code{print}, \code{predict} and \code{coef} methods. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/print.glmnet.Rd0000644000176200001440000000263613775432176015251 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print.glmnet.R \name{print.glmnet} \alias{print.glmnet} \alias{print.relaxed} \alias{print.bigGlm} \title{print a glmnet object} \usage{ \method{print}{glmnet}(x, digits = max(3, getOption("digits") - 3), ...) } \arguments{ \item{x}{fitted glmnet object} \item{digits}{significant digits in printout} \item{\dots}{additional print arguments} } \value{ The matrix above is silently returned } \description{ Print a summary of the glmnet path at each step along the path. } \details{ The call that produced the object \code{x} is printed, followed by a three-column matrix with columns \code{Df}, \verb{\%Dev} and \code{Lambda}. The \code{Df} column is the number of nonzero coefficients (Df is a reasonable name only for lasso fits). \verb{\%Dev} is the percent deviance explained (relative to the null deviance). In the case of a 'relaxed' fit, an additional column is inserted, \verb{\%Dev R} which gives the percent deviance explained by the relaxed model. For a "bigGlm" model, a simpler summary is printed. } \examples{ x = matrix(rnorm(100 * 20), 100, 20) y = rnorm(100) fit1 = glmnet(x, y) print(fit1) } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008). Regularization Paths for Generalized Linear Models via Coordinate Descent } \seealso{ \code{glmnet}, \code{predict} and \code{coef} methods. } \keyword{models} \keyword{regression} glmnet/man/glmnet.control.Rd0000644000176200001440000000472613775432176015577 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnet.control.R \name{glmnet.control} \alias{glmnet.control} \title{internal glmnet parameters} \usage{ glmnet.control( fdev = 1e-05, devmax = 0.999, eps = 1e-06, big = 9.9e+35, mnlam = 5, pmin = 1e-09, exmx = 250, prec = 1e-10, mxit = 100, itrace = 0, epsnr = 1e-06, mxitnr = 25, factory = FALSE ) } \arguments{ \item{fdev}{minimum fractional change in deviance for stopping path; factory default = 1.0e-5} \item{devmax}{maximum fraction of explained deviance for stopping path; factory default = 0.999} \item{eps}{minimum value of lambda.min.ratio (see glmnet); factory default= 1.0e-6} \item{big}{large floating point number; factory default = 9.9e35. Inf in definition of upper.limit is set to big} \item{mnlam}{minimum number of path points (lambda values) allowed; factory default = 5} \item{pmin}{minimum probability for any class. factory default = 1.0e-9. Note that this implies a pmax of 1-pmin.} \item{exmx}{maximum allowed exponent. factory default = 250.0} \item{prec}{convergence threshold for multi response bounds adjustment solution. factory default = 1.0e-10} \item{mxit}{maximum iterations for multiresponse bounds adjustment solution. factory default = 100} \item{itrace}{If 1 then progress bar is displayed when running \code{glmnet} and \code{cv.glmnet}. factory default = 0} \item{epsnr}{convergence threshold for \code{glmnet.fit}. factory default = 1.0e-6} \item{mxitnr}{maximum iterations for the IRLS loop in \code{glmnet.fit}. factory default = 25} \item{factory}{If \code{TRUE}, reset all the parameters to the factory default; default is \code{FALSE}} } \value{ A list with named elements as in the argument list } \description{ View and/or change the factory default parameters in glmnet } \details{ If called with no arguments, \code{glmnet.control()} returns a list with the current settings of these parameters. Any arguments included in the call sets those parameters to the new values, and then silently returns. The values set are persistent for the duration of the R session. } \examples{ glmnet.control(fdev = 0) #continue along path even though not much changes glmnet.control() # view current settings glmnet.control(factory = TRUE) # reset all the parameters to their default } \seealso{ \code{glmnet} } \author{ Jerome Friedman, Kenneth Tay, Trevor Hastie\cr Maintainer: Trevor Hastie \email{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/cox.fit.Rd0000644000176200001440000001214714211522451014156 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxpath.R \name{cox.fit} \alias{cox.fit} \title{Fit a Cox regression model with elastic net regularization for a single value of lambda} \usage{ cox.fit( x, y, weights, lambda, alpha = 1, offset = rep(0, nobs), thresh = 1e-10, maxit = 1e+05, penalty.factor = rep(1, nvars), exclude = c(), lower.limits = -Inf, upper.limits = Inf, warm = NULL, from.cox.path = FALSE, save.fit = FALSE, trace.it = 0 ) } \arguments{ \item{x}{Input matrix, of dimension \code{nobs x nvars}; each row is an observation vector. If it is a sparse matrix, it is assumed to be unstandardized. It should have attributes \code{xm} and \code{xs}, where \code{xm(j)} and \code{xs(j)} are the centering and scaling factors for variable j respsectively. If it is not a sparse matrix, it is assumed that any standardization needed has already been done.} \item{y}{Survival response variable, must be a Surv or stratifySurv object.} \item{weights}{Observation weights. \code{cox.fit} does NOT standardize these weights.} \item{lambda}{A single value for the \code{lambda} hyperparameter.} \item{alpha}{See glmnet help file} \item{offset}{See glmnet help file} \item{thresh}{Convergence threshold for coordinate descent. Each inner coordinate-descent loop continues until the maximum change in the objective after any coefficient update is less than thresh times the null deviance. Default value is \code{1e-10}.} \item{maxit}{Maximum number of passes over the data; default is \code{10^5}. (If a warm start object is provided, the number of passes the warm start object performed is included.)} \item{penalty.factor}{See glmnet help file} \item{exclude}{See glmnet help file} \item{lower.limits}{See glmnet help file} \item{upper.limits}{See glmnet help file} \item{warm}{Either a \code{glmnetfit} object or a list (with name \code{beta} containing coefficients) which can be used as a warm start. Default is \code{NULL}, indicating no warm start. For internal use only.} \item{from.cox.path}{Was \code{cox.fit()} called from \code{cox.path()}? Default is FALSE.This has implications for computation of the penalty factors.} \item{save.fit}{Return the warm start object? Default is FALSE.} \item{trace.it}{Controls how much information is printed to screen. If \code{trace.it=2}, some information about the fitting procedure is printed to the console as the model is being fitted. Default is \code{trace.it=0} (no information printed). (\code{trace.it=1} not used for compatibility with \code{glmnet.path}.)} } \value{ An object with class "coxnet", "glmnetfit" and "glmnet". The list returned contains more keys than that of a "glmnet" object. \item{a0}{Intercept value, \code{NULL} for "cox" family.} \item{beta}{A \code{nvars x 1} matrix of coefficients, stored in sparse matrix format.} \item{df}{The number of nonzero coefficients.} \item{dim}{Dimension of coefficient matrix.} \item{lambda}{Lambda value used.} \item{dev.ratio}{The fraction of (null) deviance explained. The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Hence dev.ratio=1-dev/nulldev.} \item{nulldev}{Null deviance (per observation). This is defined to be 2*(loglike_sat -loglike(Null)). The null model refers to the 0 model.} \item{npasses}{Total passes over the data.} \item{jerr}{Error flag, for warnings and errors (largely for internal debugging).} \item{offset}{A logical variable indicating whether an offset was included in the model.} \item{call}{The call that produced this object.} \item{nobs}{Number of observations.} \item{warm_fit}{If \code{save.fit=TRUE}, output of C++ routine, used for warm starts. For internal use only.} \item{family}{Family used for the model, always "cox".} \item{converged}{A logical variable: was the algorithm judged to have converged?} \item{boundary}{A logical variable: is the fitted value on the boundary of the attainable values?} \item{obj_function}{Objective function value at the solution.} } \description{ Fit a Cox regression model via penalized maximum likelihood for a single value of lambda. Can deal with (start, stop] data and strata, as well as sparse design matrices. } \details{ WARNING: Users should not call \code{cox.fit} directly. Higher-level functions in this package call \code{cox.fit} as a subroutine. If a warm start object is provided, some of the other arguments in the function may be overriden. \code{cox.fit} solves the elastic net problem for a single, user-specified value of lambda. \code{cox.fit} works for Cox regression models, including (start, stop] data and strata. It solves the problem using iteratively reweighted least squares (IRLS). For each IRLS iteration, \code{cox.fit} makes a quadratic (Newton) approximation of the log-likelihood, then calls \code{elnet.fit} to minimize the resulting approximation. In terms of standardization: \code{cox.fit} does not standardize \code{x} and \code{weights}. \code{penalty.factor} is standardized so that they sum up to \code{nvars}. } glmnet/man/predict.glmnet.Rd0000644000176200001440000001270014211522451015516 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coef.glmnet.R, R/predict.glmnet.R, % R/predict.relaxed.R \name{coef.glmnet} \alias{coef.glmnet} \alias{predict.glmnet} \alias{coef.relaxed} \alias{predict.relaxed} \alias{predict.elnet} \alias{predict.lognet} \alias{predict.multnet} \alias{predict.mrelnet} \alias{predict.fishnet} \alias{predict.coxnet} \title{Extract coefficients from a glmnet object} \usage{ \method{coef}{glmnet}(object, s = NULL, exact = FALSE, ...) \method{predict}{glmnet}( object, newx, s = NULL, type = c("link", "response", "coefficients", "nonzero", "class"), exact = FALSE, newoffset, ... ) \method{predict}{relaxed}( object, newx, s = NULL, gamma = 1, type = c("link", "response", "coefficients", "nonzero", "class"), exact = FALSE, newoffset, ... ) } \arguments{ \item{object}{Fitted \code{"glmnet"} model object or a \code{"relaxed"} model (which inherits from class "glmnet").} \item{s}{Value(s) of the penalty parameter \code{lambda} at which predictions are required. Default is the entire sequence used to create the model.} \item{exact}{This argument is relevant only when predictions are made at values of \code{s} (lambda) \emph{different} from those used in the fitting of the original model. Not available for \code{"relaxed"} objects. If \code{exact=FALSE} (default), then the predict function uses linear interpolation to make predictions for values of \code{s} (lambda) that do not coincide with those used in the fitting algorithm. While this is often a good approximation, it can sometimes be a bit coarse. With \code{exact=TRUE}, these different values of \code{s} are merged (and sorted) with \code{object$lambda}, and the model is refit before predictions are made. In this case, it is required to supply the original data \code{x=} and \code{y=} as additional named arguments to \code{predict()} or \code{coef()}. The workhorse \code{predict.glmnet()} needs to \code{update} the model, and so needs the data used to create it. The same is true of \code{weights}, \code{offset}, \code{penalty.factor}, \code{lower.limits}, \code{upper.limits} if these were used in the original call. Failure to do so will result in an error.} \item{\dots}{This is the mechanism for passing arguments like \code{x=} when \code{exact=TRUE}; see\code{exact} argument.} \item{newx}{Matrix of new values for \code{x} at which predictions are to be made. Must be a matrix; can be sparse as in \code{Matrix} package. This argument is not used for \code{type=c("coefficients","nonzero")}} \item{type}{Type of prediction required. Type \code{"link"} gives the linear predictors for \code{"binomial"}, \code{"multinomial"}, \code{"poisson"} or \code{"cox"} models; for \code{"gaussian"} models it gives the fitted values. Type \code{"response"} gives the fitted probabilities for \code{"binomial"} or \code{"multinomial"}, fitted mean for \code{"poisson"} and the fitted relative-risk for \code{"cox"}; for \code{"gaussian"} type \code{"response"} is equivalent to type \code{"link"}. Type \code{"coefficients"} computes the coefficients at the requested values for \code{s}. Note that for \code{"binomial"} models, results are returned only for the class corresponding to the second level of the factor response. Type \code{"class"} applies only to \code{"binomial"} or \code{"multinomial"} models, and produces the class label corresponding to the maximum probability. Type \code{"nonzero"} returns a list of the indices of the nonzero coefficients for each value of \code{s}.} \item{newoffset}{If an offset is used in the fit, then one must be supplied for making predictions (except for \code{type="coefficients"} or \code{type="nonzero"})} \item{gamma}{Single value of \code{gamma} at which predictions are required, for "relaxed" objects.} } \value{ The object returned depends on type. } \description{ Similar to other predict methods, this functions predicts fitted values, logits, coefficients and more from a fitted \code{"glmnet"} object. } \details{ The shape of the objects returned are different for \code{"multinomial"} objects. This function actually calls \code{NextMethod()}, and the appropriate predict method is invoked for each of the three model types. \code{coef(...)} is equivalent to \code{predict(type="coefficients",...)} } \examples{ x=matrix(rnorm(100*20),100,20) y=rnorm(100) g2=sample(1:2,100,replace=TRUE) g4=sample(1:4,100,replace=TRUE) fit1=glmnet(x,y) predict(fit1,newx=x[1:5,],s=c(0.01,0.005)) predict(fit1,type="coef") fit2=glmnet(x,g2,family="binomial") predict(fit2,type="response",newx=x[2:5,]) predict(fit2,type="nonzero") fit3=glmnet(x,g4,family="multinomial") predict(fit3,newx=x[1:3,],type="response",s=0.01) } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent (2010), Journal of Statistical Software, Vol. 33(1), 1-22}, \doi{10.18637/jss.v033.i01}.\cr Simon, N., Friedman, J., Hastie, T. and Tibshirani, R. (2011) \emph{Regularization Paths for Cox's Proportional Hazards Model via Coordinate Descent, Journal of Statistical Software, Vol. 39(5), 1-13}, \doi{10.18637/jss.v039.i05}.\cr Glmnet webpage with four vignettes, \url{https://glmnet.stanford.edu}. } \seealso{ \code{glmnet}, and \code{print}, and \code{coef} methods, and \code{cv.glmnet}. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/elnet.fit.Rd0000644000176200001440000001271114211522451014471 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{elnet.fit} \alias{elnet.fit} \title{Solve weighted least squares (WLS) problem for a single lambda value} \usage{ elnet.fit( x, y, weights, lambda, alpha = 1, intercept = TRUE, thresh = 1e-07, maxit = 1e+05, penalty.factor = rep(1, nvars), exclude = c(), lower.limits = -Inf, upper.limits = Inf, warm = NULL, from.glmnet.fit = FALSE, save.fit = FALSE ) } \arguments{ \item{x}{Input matrix, of dimension \code{nobs x nvars}; each row is an observation vector. If it is a sparse matrix, it is assumed to be unstandardized. It should have attributes \code{xm} and \code{xs}, where \code{xm(j)} and \code{xs(j)} are the centering and scaling factors for variable j respsectively. If it is not a sparse matrix, it is assumed that any standardization needed has already been done.} \item{y}{Quantitative response variable.} \item{weights}{Observation weights. \code{elnet.fit} does NOT standardize these weights.} \item{lambda}{A single value for the \code{lambda} hyperparameter.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}. The penalty is defined as \deqn{(1-\alpha)/2||\beta||_2^2+\alpha||\beta||_1.} \code{alpha=1} is the lasso penalty, and \code{alpha=0} the ridge penalty.} \item{intercept}{Should intercept be fitted (default=TRUE) or set to zero (FALSE)?} \item{thresh}{Convergence threshold for coordinate descent. Each inner coordinate-descent loop continues until the maximum change in the objective after any coefficient update is less than thresh times the null deviance. Default value is \code{1e-7}.} \item{maxit}{Maximum number of passes over the data; default is \code{10^5}. (If a warm start object is provided, the number of passes the warm start object performed is included.)} \item{penalty.factor}{Separate penalty factors can be applied to each coefficient. This is a number that multiplies \code{lambda} to allow differential shrinkage. Can be 0 for some variables, which implies no shrinkage, and that variable is always included in the model. Default is 1 for all variables (and implicitly infinity for variables listed in exclude). Note: the penalty factors are internally rescaled to sum to \code{nvars}.} \item{exclude}{Indices of variables to be excluded from the model. Default is none. Equivalent to an infinite penalty factor.} \item{lower.limits}{Vector of lower limits for each coefficient; default \code{-Inf}. Each of these must be non-positive. Can be presented as a single value (which will then be replicated), else a vector of length \code{nvars}.} \item{upper.limits}{Vector of upper limits for each coefficient; default \code{Inf}. See \code{lower.limits}.} \item{warm}{Either a \code{glmnetfit} object or a list (with names \code{beta} and \code{a0} containing coefficients and intercept respectively) which can be used as a warm start. Default is \code{NULL}, indicating no warm start. For internal use only.} \item{from.glmnet.fit}{Was \code{elnet.fit()} called from \code{glmnet.fit()}? Default is FALSE.This has implications for computation of the penalty factors.} \item{save.fit}{Return the warm start object? Default is FALSE.} } \value{ An object with class "glmnetfit" and "glmnet". The list returned has the same keys as that of a \code{glmnet} object, except that it might have an additional \code{warm_fit} key. \item{a0}{Intercept value.} \item{beta}{A \code{nvars x 1} matrix of coefficients, stored in sparse matrix format.} \item{df}{The number of nonzero coefficients.} \item{dim}{Dimension of coefficient matrix.} \item{lambda}{Lambda value used.} \item{dev.ratio}{The fraction of (null) deviance explained. The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Hence dev.ratio=1-dev/nulldev.} \item{nulldev}{Null deviance (per observation). This is defined to be 2*(loglike_sat -loglike(Null)). The null model refers to the intercept model.} \item{npasses}{Total passes over the data.} \item{jerr}{Error flag, for warnings and errors (largely for internal debugging).} \item{offset}{Always FALSE, since offsets do not appear in the WLS problem. Included for compability with glmnet output.} \item{call}{The call that produced this object.} \item{nobs}{Number of observations.} \item{warm_fit}{If \code{save.fit=TRUE}, output of C++ routine, used for warm starts. For internal use only.} } \description{ Solves the weighted least squares (WLS) problem for a single lambda value. Internal function that users should not call directly. } \details{ WARNING: Users should not call \code{elnet.fit} directly. Higher-level functions in this package call \code{elnet.fit} as a subroutine. If a warm start object is provided, some of the other arguments in the function may be overriden. \code{elnet.fit} is essentially a wrapper around a C++ subroutine which minimizes \deqn{1/2 \sum w_i (y_i - X_i^T \beta)^2 + \sum \lambda \gamma_j [(1-\alpha)/2 \beta^2+\alpha|\beta|],} over \eqn{\beta}, where \eqn{\gamma_j} is the relative penalty factor on the jth variable. If \code{intercept = TRUE}, then the term in the first sum is \eqn{w_i (y_i - \beta_0 - X_i^T \beta)^2}, and we are minimizing over both \eqn{\beta_0} and \eqn{\beta}. None of the inputs are standardized except for \code{penalty.factor}, which is standardized so that they sum up to \code{nvars}. } glmnet/man/fid.Rd0000644000176200001440000000160013775432176013360 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxgrad.R \name{fid} \alias{fid} \title{Helper function for Cox deviance and gradient} \usage{ fid(x, index) } \arguments{ \item{x}{Sorted vector of death times.} \item{index}{Vector of indices for the death times.} } \value{ A list with two arguments. \item{index_first}{A vector of indices for the first observation at each death time as they appear in the sorted list.} \item{index_ties}{If there are no ties at all, this is NULL. If not, this is a list with length equal to the number of unique times with ties. For each time with ties, index_ties gives the indices of the observations with a death at that time.} } \description{ Helps to find ties in death times of data. } \examples{ # Example with no ties glmnet:::fid(c(1, 4, 5, 6), 1:5) # Example with ties glmnet:::fid(c(1, 1, 1, 2, 3, 3, 4, 4, 4), 1:9) } glmnet/man/stratifySurv.Rd0000644000176200001440000000230114013330131015310 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stratifySurv.R \name{stratifySurv} \alias{stratifySurv} \title{Add strata to a Surv object} \usage{ stratifySurv(y, strata = rep(1, length(y))) } \arguments{ \item{y}{A Surv object.} \item{strata}{A vector of length equal to the number of observations in y, indicating strata membership. Default is all belong to same strata.} } \value{ An object of class \code{stratifySurv} (in addition to all the classes \code{y} belonged to). } \description{ Helper function to add strata as an attribute to a Surv object. The output of this function can be used as the response in \code{glmnet()} for fitting stratified Cox models. } \details{ When fitting a stratified Cox model with \code{glmnet()}, strata should be added to a \code{Surv} response with this helper function. Note that it is not sufficient to add strata as an attribute to the \code{Surv} response manually: if the result does not have class \code{stratifySurv}, subsetting of the response will not work properly. } \examples{ y <- survival::Surv(1:10, rep(0:1, length.out = 10)) strata <- rep(1:3, length.out = 10) y2 <- stratifySurv(y, strata) # returns stratifySurv object } glmnet/man/predict.cv.glmnet.Rd0000644000176200001440000000632014211522451016126 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.cv.glmnet.R, R/predict.cv.relaxed.R \name{predict.cv.glmnet} \alias{predict.cv.glmnet} \alias{coef.cv.glmnet} \alias{coef.cv.relaxed} \alias{predict.cv.relaxed} \title{make predictions from a "cv.glmnet" object.} \usage{ \method{predict}{cv.glmnet}(object, newx, s = c("lambda.1se", "lambda.min"), ...) \method{predict}{cv.relaxed}( object, newx, s = c("lambda.1se", "lambda.min"), gamma = c("gamma.1se", "gamma.min"), ... ) } \arguments{ \item{object}{Fitted \code{"cv.glmnet"} or \code{"cv.relaxed"} object.} \item{newx}{Matrix of new values for \code{x} at which predictions are to be made. Must be a matrix; can be sparse as in \code{Matrix} package. See documentation for \code{predict.glmnet}.} \item{s}{Value(s) of the penalty parameter \code{lambda} at which predictions are required. Default is the value \code{s="lambda.1se"} stored on the CV \code{object}. Alternatively \code{s="lambda.min"} can be used. If \code{s} is numeric, it is taken as the value(s) of \code{lambda} to be used. (For historical reasons we use the symbol 's' rather than 'lambda' to reference this parameter)} \item{\dots}{Not used. Other arguments to predict.} \item{gamma}{Value (single) of 'gamma' at which predictions are to be made} } \value{ The object returned depends on the \dots{} argument which is passed on to the \code{predict} method for \code{glmnet} objects. } \description{ This function makes predictions from a cross-validated glmnet model, using the stored \code{"glmnet.fit"} object, and the optimal value chosen for \code{lambda} (and \code{gamma} for a 'relaxed' fit. } \details{ This function makes it easier to use the results of cross-validation to make a prediction. } \examples{ x = matrix(rnorm(100 * 20), 100, 20) y = rnorm(100) cv.fit = cv.glmnet(x, y) predict(cv.fit, newx = x[1:5, ]) coef(cv.fit) coef(cv.fit, s = "lambda.min") predict(cv.fit, newx = x[1:5, ], s = c(0.001, 0.002)) cv.fitr = cv.glmnet(x, y, relax = TRUE) predict(cv.fit, newx = x[1:5, ]) coef(cv.fit) coef(cv.fit, s = "lambda.min", gamma = "gamma.min") predict(cv.fit, newx = x[1:5, ], s = c(0.001, 0.002), gamma = "gamma.min") } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent (2010), Journal of Statistical Software, Vol. 33(1), 1-22}, \doi{10.18637/jss.v033.i01}.\cr Simon, N., Friedman, J., Hastie, T. and Tibshirani, R. (2011) \emph{Regularization Paths for Cox's Proportional Hazards Model via Coordinate Descent, Journal of Statistical Software, Vol. 39(5), 1-13}, \doi{10.18637/jss.v039.i05}.\cr Hastie, T., Tibshirani, Robert and Tibshirani, Ryan (2020) \emph{Best Subset, Forward Stepwise or Lasso? Analysis and Recommendations Based on Extensive Comparisons, Statist. Sc. Vol. 35(4), 579-592}, \url{https://arxiv.org/abs/1707.08692}.\cr Glmnet webpage with four vignettes, \url{https://glmnet.stanford.edu}. } \seealso{ \code{glmnet}, and \code{print}, and \code{coef} methods, and \code{cv.glmnet}. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/cox.path.Rd0000644000176200001440000001071013775432176014344 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxpath.R \name{cox.path} \alias{cox.path} \title{Fit a Cox regression model with elastic net regularization for a path of lambda values} \usage{ cox.path( x, y, weights = NULL, offset = NULL, alpha = 1, nlambda = 100, lambda.min.ratio = ifelse(nobs < nvars, 0.01, 1e-04), lambda = NULL, standardize = TRUE, thresh = 1e-10, exclude = NULL, penalty.factor = rep(1, nvars), lower.limits = -Inf, upper.limits = Inf, maxit = 1e+05, trace.it = 0, ... ) } \arguments{ \item{x}{See glmnet help file} \item{y}{Survival response variable, must be a \code{Surv} or \code{stratifySurv} object.} \item{weights}{See glmnet help file} \item{offset}{See glmnet help file} \item{alpha}{See glmnet help file} \item{nlambda}{See glmnet help file} \item{lambda.min.ratio}{See glmnet help file} \item{lambda}{See glmnet help file} \item{standardize}{See glmnet help file} \item{thresh}{Convergence threshold for coordinate descent. Each inner coordinate-descent loop continues until the maximum change in the objective after any coefficient update is less than thresh times the null deviance. Default value is \code{1e-10}.} \item{exclude}{See glmnet help file} \item{penalty.factor}{See glmnet help file} \item{lower.limits}{See glmnet help file} \item{upper.limits}{See glmnet help file} \item{maxit}{See glmnet help file} \item{trace.it}{Controls how much information is printed to screen. Default is \code{trace.it=0} (no information printed). If \code{trace.it=1}, a progress bar is displayed. If \code{trace.it=2}, some information about the fitting procedure is printed to the console as the model is being fitted.} \item{...}{Other arguments passed from glmnet (not used right now).} } \value{ An object of class "coxnet" and "glmnet". \item{a0}{Intercept value, \code{NULL} for "cox" family.} \item{beta}{A \code{nvars x length(lambda)} matrix of coefficients, stored in sparse matrix format.} \item{df}{The number of nonzero coefficients for each value of lambda.} \item{dim}{Dimension of coefficient matrix.} \item{lambda}{The actual sequence of lambda values used. When alpha=0, the largest lambda reported does not quite give the zero coefficients reported (lambda=inf would in principle). Instead, the largest lambda for alpha=0.001 is used, and the sequence of lambda values is derived from this.} \item{dev.ratio}{The fraction of (null) deviance explained. The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Hence dev.ratio=1-dev/nulldev.} \item{nulldev}{Null deviance (per observation). This is defined to be 2*(loglike_sat -loglike(Null)). The null model refers to the 0 model.} \item{npasses}{Total passes over the data summed over all lambda values.} \item{jerr}{Error flag, for warnings and errors (largely for internal debugging).} \item{offset}{A logical variable indicating whether an offset was included in the model.} \item{call}{The call that produced this object.} \item{nobs}{Number of observations.} } \description{ Fit a Cox regression model via penalized maximum likelihood for a path of lambda values. Can deal with (start, stop] data and strata, as well as sparse design matrices. } \details{ Sometimes the sequence is truncated before \code{nlambda} values of lambda have been used. This happens when \code{cox.path} detects that the decrease in deviance is marginal (i.e. we are near a saturated fit). } \examples{ set.seed(2) nobs <- 100; nvars <- 15 xvec <- rnorm(nobs * nvars) xvec[sample.int(nobs * nvars, size = 0.4 * nobs * nvars)] <- 0 x <- matrix(xvec, nrow = nobs) beta <- rnorm(nvars / 3) fx <- x[, seq(nvars / 3)] \%*\% beta / 3 ty <- rexp(nobs, exp(fx)) tcens <- rbinom(n = nobs, prob = 0.3, size = 1) jsurv <- survival::Surv(ty, tcens) fit1 <- glmnet:::cox.path(x, jsurv) # works with sparse x matrix x_sparse <- Matrix::Matrix(x, sparse = TRUE) fit2 <- glmnet:::cox.path(x_sparse, jsurv) # example with (start, stop] data set.seed(2) start_time <- runif(100, min = 0, max = 5) stop_time <- start_time + runif(100, min = 0.1, max = 3) status <- rbinom(n = nobs, prob = 0.3, size = 1) jsurv_ss <- survival::Surv(start_time, stop_time, status) fit3 <- glmnet:::cox.path(x, jsurv_ss) # example with strata jsurv_ss2 <- stratifySurv(jsurv_ss, rep(1:2, each = 50)) fit4 <- glmnet:::cox.path(x, jsurv_ss2) } glmnet/man/glmnet.measures.Rd0000644000176200001440000000153314410376311015715 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnet.measures.R \name{glmnet.measures} \alias{glmnet.measures} \title{Display the names of the measures used in CV for different "glmnet" families} \usage{ glmnet.measures( family = c("all", "gaussian", "binomial", "poisson", "multinomial", "cox", "mgaussian", "GLM") ) } \arguments{ \item{family}{If a "glmnet" family is supplied, a list of the names of measures available for that family are produced. Default is "all", in which case the names of measures for all families are produced.} } \description{ Produces a list of names of measures } \details{ Try it and see. A very simple function to provide information } \seealso{ \code{cv.glmnet} and \code{assess.glmnet}. } \author{ Trevor Hastie\cr Maintainer: Trevor Hastie \email{hastie@stanford.edu} } \keyword{models} glmnet/man/glmnet.Rd0000644000176200001440000005057414470256451014113 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnet.R, R/relax.glmnet.R \name{glmnet} \alias{glmnet} \alias{relax.glmnet} \title{fit a GLM with lasso or elasticnet regularization} \usage{ glmnet( x, y, family = c("gaussian", "binomial", "poisson", "multinomial", "cox", "mgaussian"), weights = NULL, offset = NULL, alpha = 1, nlambda = 100, lambda.min.ratio = ifelse(nobs < nvars, 0.01, 1e-04), lambda = NULL, standardize = TRUE, intercept = TRUE, thresh = 1e-07, dfmax = nvars + 1, pmax = min(dfmax * 2 + 20, nvars), exclude = NULL, penalty.factor = rep(1, nvars), lower.limits = -Inf, upper.limits = Inf, maxit = 1e+05, type.gaussian = ifelse(nvars < 500, "covariance", "naive"), type.logistic = c("Newton", "modified.Newton"), standardize.response = FALSE, type.multinomial = c("ungrouped", "grouped"), relax = FALSE, trace.it = 0, ... ) relax.glmnet(fit, x, ..., maxp = n - 3, path = FALSE, check.args = TRUE) } \arguments{ \item{x}{input matrix, of dimension nobs x nvars; each row is an observation vector. Can be in sparse matrix format (inherit from class \code{"sparseMatrix"} as in package \code{Matrix}). Requirement: \code{nvars >1}; in other words, \code{x} should have 2 or more columns.} \item{y}{response variable. Quantitative for \code{family="gaussian"}, or \code{family="poisson"} (non-negative counts). For \code{family="binomial"} should be either a factor with two levels, or a two-column matrix of counts or proportions (the second column is treated as the target class; for a factor, the last level in alphabetical order is the target class). For \code{family="multinomial"}, can be a \code{nc>=2} level factor, or a matrix with \code{nc} columns of counts or proportions. For either \code{"binomial"} or \code{"multinomial"}, if \code{y} is presented as a vector, it will be coerced into a factor. For \code{family="cox"}, preferably a \code{Surv} object from the survival package: see Details section for more information. For \code{family="mgaussian"}, \code{y} is a matrix of quantitative responses.} \item{family}{Either a character string representing one of the built-in families, or else a \code{glm()} family object. For more information, see Details section below or the documentation for response type (above).} \item{weights}{observation weights. Can be total counts if responses are proportion matrices. Default is 1 for each observation} \item{offset}{A vector of length \code{nobs} that is included in the linear predictor (a \code{nobs x nc} matrix for the \code{"multinomial"} family). Useful for the \code{"poisson"} family (e.g. log of exposure time), or for refining a model by starting at a current fit. Default is \code{NULL}. If supplied, then values must also be supplied to the \code{predict} function.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0\le\alpha\le 1}. The penalty is defined as \deqn{(1-\alpha)/2||\beta||_2^2+\alpha||\beta||_1.} \code{alpha=1} is the lasso penalty, and \code{alpha=0} the ridge penalty.} \item{nlambda}{The number of \code{lambda} values - default is 100.} \item{lambda.min.ratio}{Smallest value for \code{lambda}, as a fraction of \code{lambda.max}, the (data derived) entry value (i.e. the smallest value for which all coefficients are zero). The default depends on the sample size \code{nobs} relative to the number of variables \code{nvars}. If \code{nobs > nvars}, the default is \code{0.0001}, close to zero. If \code{nobs < nvars}, the default is \code{0.01}. A very small value of \code{lambda.min.ratio} will lead to a saturated fit in the \code{nobs < nvars} case. This is undefined for \code{"binomial"} and \code{"multinomial"} models, and \code{glmnet} will exit gracefully when the percentage deviance explained is almost 1.} \item{lambda}{A user supplied \code{lambda} sequence. Typical usage is to have the program compute its own \code{lambda} sequence based on \code{nlambda} and \code{lambda.min.ratio}. Supplying a value of \code{lambda} overrides this. WARNING: use with care. Avoid supplying a single value for \code{lambda} (for predictions after CV use \code{predict()} instead). Supply instead a decreasing sequence of \code{lambda} values. \code{glmnet} relies on its warms starts for speed, and its often faster to fit a whole path than compute a single fit.} \item{standardize}{Logical flag for x variable standardization, prior to fitting the model sequence. The coefficients are always returned on the original scale. Default is \code{standardize=TRUE}. If variables are in the same units already, you might not wish to standardize. See details below for y standardization with \code{family="gaussian"}.} \item{intercept}{Should intercept(s) be fitted (default=TRUE) or set to zero (FALSE)} \item{thresh}{Convergence threshold for coordinate descent. Each inner coordinate-descent loop continues until the maximum change in the objective after any coefficient update is less than \code{thresh} times the null deviance. Defaults value is \code{1E-7}.} \item{dfmax}{Limit the maximum number of variables in the model. Useful for very large \code{nvars}, if a partial path is desired.} \item{pmax}{Limit the maximum number of variables ever to be nonzero} \item{exclude}{Indices of variables to be excluded from the model. Default is none. Equivalent to an infinite penalty factor for the variables excluded (next item). Users can supply instead an \code{exclude} function that generates the list of indices. This function is most generally defined as \code{function(x, y, weights, ...)}, and is called inside \code{glmnet} to generate the indices for excluded variables. The \code{...} argument is required, the others are optional. This is useful for filtering wide data, and works correctly with \code{cv.glmnet}. See the vignette 'Introduction' for examples.} \item{penalty.factor}{Separate penalty factors can be applied to each coefficient. This is a number that multiplies \code{lambda} to allow differential shrinkage. Can be 0 for some variables, which implies no shrinkage, and that variable is always included in the model. Default is 1 for all variables (and implicitly infinity for variables listed in \code{exclude}). Also, any \code{penalty.factor} that is set to \code{inf} is converted to an \code{exclude}, and then internally reset to 1. Note: the penalty factors are internally rescaled to sum to nvars, and the lambda sequence will reflect this change.} \item{lower.limits}{Vector of lower limits for each coefficient; default \code{-Inf}. Each of these must be non-positive. Can be presented as a single value (which will then be replicated), else a vector of length \code{nvars}} \item{upper.limits}{Vector of upper limits for each coefficient; default \code{Inf}. See \code{lower.limits}} \item{maxit}{Maximum number of passes over the data for all lambda values; default is 10^5.} \item{type.gaussian}{Two algorithm types are supported for (only) \code{family="gaussian"}. The default when \code{nvar<500} is \code{type.gaussian="covariance"}, and saves all inner-products ever computed. This can be much faster than \code{type.gaussian="naive"}, which loops through \code{nobs} every time an inner-product is computed. The latter can be far more efficient for \code{nvar >> nobs} situations, or when \code{nvar > 500}.} \item{type.logistic}{If \code{"Newton"} then the exact hessian is used (default), while \code{"modified.Newton"} uses an upper-bound on the hessian, and can be faster.} \item{standardize.response}{This is for the \code{family="mgaussian"} family, and allows the user to standardize the response variables} \item{type.multinomial}{If \code{"grouped"} then a grouped lasso penalty is used on the multinomial coefficients for a variable. This ensures they are all in our out together. The default is \code{"ungrouped"}} \item{relax}{If \code{TRUE} then for each \emph{active set} in the path of solutions, the model is refit without any regularization. See \code{details} for more information. This argument is new, and users may experience convergence issues with small datasets, especially with non-gaussian families. Limiting the value of 'maxp' can alleviate these issues in some cases.} \item{trace.it}{If \code{trace.it=1}, then a progress bar is displayed; useful for big models that take a long time to fit.} \item{...}{Additional argument used in \code{relax.glmnet}. These include some of the original arguments to 'glmnet', and each must be named if used.} \item{fit}{For \code{relax.glmnet} a fitted 'glmnet' object} \item{maxp}{a limit on how many relaxed coefficients are allowed. Default is 'n-3', where 'n' is the sample size. This may not be sufficient for non-gaussian familes, in which case users should supply a smaller value. This argument can be supplied directly to 'glmnet'.} \item{path}{Since \code{glmnet} does not do stepsize optimization, the Newton algorithm can get stuck and not converge, especially with relaxed fits. With \code{path=TRUE}, each relaxed fit on a particular set of variables is computed pathwise using the original sequence of lambda values (with a zero attached to the end). Not needed for Gaussian models, and should not be used unless needed, since will lead to longer compute times. Default is \code{path=FALSE}. appropriate subset of variables} \item{check.args}{Should \code{relax.glmnet} make sure that all the data dependent arguments used in creating 'fit' have been resupplied. Default is 'TRUE'.} } \value{ An object with S3 class \code{"glmnet","*" }, where \code{"*"} is \code{"elnet"}, \code{"lognet"}, \code{"multnet"}, \code{"fishnet"} (poisson), \code{"coxnet"} or \code{"mrelnet"} for the various types of models. If the model was created with \code{relax=TRUE} then this class has a prefix class of \code{"relaxed"}. \item{call}{the call that produced this object} \item{a0}{Intercept sequence of length \code{length(lambda)}} \item{beta}{For \code{"elnet"}, \code{"lognet"}, \code{"fishnet"} and \code{"coxnet"} models, a \code{nvars x length(lambda)} matrix of coefficients, stored in sparse column format (\code{"CsparseMatrix"}). For \code{"multnet"} and \code{"mgaussian"}, a list of \code{nc} such matrices, one for each class.} \item{lambda}{The actual sequence of \code{lambda} values used. When \code{alpha=0}, the largest lambda reported does not quite give the zero coefficients reported (\code{lambda=inf} would in principle). Instead, the largest \code{lambda} for \code{alpha=0.001} is used, and the sequence of \code{lambda} values is derived from this.} \item{dev.ratio}{The fraction of (null) deviance explained (for \code{"elnet"}, this is the R-square). The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Hence dev.ratio=1-dev/nulldev.} \item{nulldev}{Null deviance (per observation). This is defined to be 2*(loglike_sat -loglike(Null)); The NULL model refers to the intercept model, except for the Cox, where it is the 0 model.} \item{df}{The number of nonzero coefficients for each value of \code{lambda}. For \code{"multnet"}, this is the number of variables with a nonzero coefficient for \emph{any} class.} \item{dfmat}{For \code{"multnet"} and \code{"mrelnet"} only. A matrix consisting of the number of nonzero coefficients per class} \item{dim}{dimension of coefficient matrix (ices)} \item{nobs}{number of observations} \item{npasses}{total passes over the data summed over all lambda values} \item{offset}{a logical variable indicating whether an offset was included in the model} \item{jerr}{error flag, for warnings and errors (largely for internal debugging).} \item{relaxed}{If \code{relax=TRUE}, this additional item is another glmnet object with different values for \code{beta} and \code{dev.ratio}} } \description{ Fit a generalized linear model via penalized maximum likelihood. The regularization path is computed for the lasso or elasticnet penalty at a grid of values for the regularization parameter lambda. Can deal with all shapes of data, including very large sparse data matrices. Fits linear, logistic and multinomial, poisson, and Cox regression models. } \details{ The sequence of models implied by \code{lambda} is fit by coordinate descent. For \code{family="gaussian"} this is the lasso sequence if \code{alpha=1}, else it is the elasticnet sequence. The objective function for \code{"gaussian"} is \deqn{1/2 RSS/nobs + \lambda*penalty,} and for the other models it is \deqn{-loglik/nobs + \lambda*penalty.} Note also that for \code{"gaussian"}, \code{glmnet} standardizes y to have unit variance (using 1/n rather than 1/(n-1) formula) before computing its lambda sequence (and then unstandardizes the resulting coefficients); if you wish to reproduce/compare results with other software, best to supply a standardized y. The coefficients for any predictor variables with zero variance are set to zero for all values of lambda. \subsection{Details on \code{family} option}{ From version 4.0 onwards, glmnet supports both the original built-in families, as well as \emph{any} family object as used by \code{stats:glm()}. This opens the door to a wide variety of additional models. For example \code{family=binomial(link=cloglog)} or \code{family=negative.binomial(theta=1.5)} (from the MASS library). Note that the code runs faster for the built-in families. The built in families are specifed via a character string. For all families, the object produced is a lasso or elasticnet regularization path for fitting the generalized linear regression paths, by maximizing the appropriate penalized log-likelihood (partial likelihood for the "cox" model). Sometimes the sequence is truncated before \code{nlambda} values of \code{lambda} have been used, because of instabilities in the inverse link functions near a saturated fit. \code{glmnet(...,family="binomial")} fits a traditional logistic regression model for the log-odds. \code{glmnet(...,family="multinomial")} fits a symmetric multinomial model, where each class is represented by a linear model (on the log-scale). The penalties take care of redundancies. A two-class \code{"multinomial"} model will produce the same fit as the corresponding \code{"binomial"} model, except the pair of coefficient matrices will be equal in magnitude and opposite in sign, and half the \code{"binomial"} values. Two useful additional families are the \code{family="mgaussian"} family and the \code{type.multinomial="grouped"} option for multinomial fitting. The former allows a multi-response gaussian model to be fit, using a "group -lasso" penalty on the coefficients for each variable. Tying the responses together like this is called "multi-task" learning in some domains. The grouped multinomial allows the same penalty for the \code{family="multinomial"} model, which is also multi-responsed. For both of these the penalty on the coefficient vector for variable j is \deqn{(1-\alpha)/2||\beta_j||_2^2+\alpha||\beta_j||_2.} When \code{alpha=1} this is a group-lasso penalty, and otherwise it mixes with quadratic just like elasticnet. A small detail in the Cox model: if death times are tied with censored times, we assume the censored times occurred just \emph{before} the death times in computing the Breslow approximation; if users prefer the usual convention of \emph{after}, they can add a small number to all censoring times to achieve this effect. } \subsection{Details on response for \code{family="cox"}}{ For Cox models, the response should preferably be a \code{Surv} object, created by the \code{Surv()} function in \pkg{survival} package. For right-censored data, this object should have type "right", and for (start, stop] data, it should have type "counting". To fit stratified Cox models, strata should be added to the response via the \code{stratifySurv()} function before passing the response to \code{glmnet()}. (For backward compatibility, right-censored data can also be passed as a two-column matrix with columns named 'time' and 'status'. The latter is a binary variable, with '1' indicating death, and '0' indicating right censored.) } \subsection{Details on \code{relax} option}{ If \code{relax=TRUE} a duplicate sequence of models is produced, where each active set in the elastic-net path is refit without regularization. The result of this is a matching \code{"glmnet"} object which is stored on the original object in a component named \code{"relaxed"}, and is part of the glmnet output. Generally users will not call \code{relax.glmnet} directly, unless the original 'glmnet' object took a long time to fit. But if they do, they must supply the fit, and all the original arguments used to create that fit. They can limit the length of the relaxed path via 'maxp'. } } \examples{ # Gaussian x = matrix(rnorm(100 * 20), 100, 20) y = rnorm(100) fit1 = glmnet(x, y) print(fit1) coef(fit1, s = 0.01) # extract coefficients at a single value of lambda predict(fit1, newx = x[1:10, ], s = c(0.01, 0.005)) # make predictions # Relaxed fit1r = glmnet(x, y, relax = TRUE) # can be used with any model # multivariate gaussian y = matrix(rnorm(100 * 3), 100, 3) fit1m = glmnet(x, y, family = "mgaussian") plot(fit1m, type.coef = "2norm") # binomial g2 = sample(c(0,1), 100, replace = TRUE) fit2 = glmnet(x, g2, family = "binomial") fit2n = glmnet(x, g2, family = binomial(link=cloglog)) fit2r = glmnet(x,g2, family = "binomial", relax=TRUE) fit2rp = glmnet(x,g2, family = "binomial", relax=TRUE, path=TRUE) # multinomial g4 = sample(1:4, 100, replace = TRUE) fit3 = glmnet(x, g4, family = "multinomial") fit3a = glmnet(x, g4, family = "multinomial", type.multinomial = "grouped") # poisson N = 500 p = 20 nzc = 5 x = matrix(rnorm(N * p), N, p) beta = rnorm(nzc) f = x[, seq(nzc)] \%*\% beta mu = exp(f) y = rpois(N, mu) fit = glmnet(x, y, family = "poisson") plot(fit) pfit = predict(fit, x, s = 0.001, type = "response") plot(pfit, y) # Cox set.seed(10101) N = 1000 p = 30 nzc = p/3 x = matrix(rnorm(N * p), N, p) beta = rnorm(nzc) fx = x[, seq(nzc)] \%*\% beta/3 hx = exp(fx) ty = rexp(N, hx) tcens = rbinom(n = N, prob = 0.3, size = 1) # censoring indicator y = cbind(time = ty, status = 1 - tcens) # y=Surv(ty,1-tcens) with library(survival) fit = glmnet(x, y, family = "cox") plot(fit) # Cox example with (start, stop] data set.seed(2) nobs <- 100; nvars <- 15 xvec <- rnorm(nobs * nvars) xvec[sample.int(nobs * nvars, size = 0.4 * nobs * nvars)] <- 0 x <- matrix(xvec, nrow = nobs) start_time <- runif(100, min = 0, max = 5) stop_time <- start_time + runif(100, min = 0.1, max = 3) status <- rbinom(n = nobs, prob = 0.3, size = 1) jsurv_ss <- survival::Surv(start_time, stop_time, status) fit <- glmnet(x, jsurv_ss, family = "cox") # Cox example with strata jsurv_ss2 <- stratifySurv(jsurv_ss, rep(1:2, each = 50)) fit <- glmnet(x, jsurv_ss2, family = "cox") # Sparse n = 10000 p = 200 nzc = trunc(p/10) x = matrix(rnorm(n * p), n, p) iz = sample(1:(n * p), size = n * p * 0.85, replace = FALSE) x[iz] = 0 sx = Matrix(x, sparse = TRUE) inherits(sx, "sparseMatrix") #confirm that it is sparse beta = rnorm(nzc) fx = x[, seq(nzc)] \%*\% beta eps = rnorm(n) y = fx + eps px = exp(fx) px = px/(1 + px) ly = rbinom(n = length(px), prob = px, size = 1) system.time(fit1 <- glmnet(sx, y)) system.time(fit2n <- glmnet(x, y)) } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent (2010), Journal of Statistical Software, Vol. 33(1), 1-22}, \doi{10.18637/jss.v033.i01}.\cr Simon, N., Friedman, J., Hastie, T. and Tibshirani, R. (2011) \emph{Regularization Paths for Cox's Proportional Hazards Model via Coordinate Descent, Journal of Statistical Software, Vol. 39(5), 1-13}, \doi{10.18637/jss.v039.i05}.\cr Tibshirani,Robert, Bien, J., Friedman, J., Hastie, T.,Simon, N.,Taylor, J. and Tibshirani, Ryan. (2012) \emph{Strong Rules for Discarding Predictors in Lasso-type Problems, JRSSB, Vol. 74(2), 245-266}, \url{https://arxiv.org/abs/1011.2234}.\cr Hastie, T., Tibshirani, Robert and Tibshirani, Ryan (2020) \emph{Best Subset, Forward Stepwise or Lasso? Analysis and Recommendations Based on Extensive Comparisons, Statist. Sc. Vol. 35(4), 579-592}, \url{https://arxiv.org/abs/1707.08692}.\cr Glmnet webpage with four vignettes: \url{https://glmnet.stanford.edu}. } \seealso{ \code{print}, \code{predict}, \code{coef} and \code{plot} methods, and the \code{cv.glmnet} function. } \author{ Jerome Friedman, Trevor Hastie, Balasubramanian Narasimhan, Noah Simon, Kenneth Tay and Rob Tibshirani\cr Maintainer: Trevor Hastie \email{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/PoissonExample.Rd0000644000176200001440000000075714046314073015563 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{PoissonExample} \alias{PoissonExample} \title{Synthetic dataset with count response} \format{ List containing the following elements: \describe{ \item{x}{500 by 20 matrix of numeric values.} \item{y}{Numeric vector of length 500 consisting of non-negative integers.} } } \usage{ data(PoissonExample) } \description{ Randomly generated data for Poisson regression example. } \keyword{data} glmnet/man/cv.glmnet.Rd0000644000176200001440000002637314405701475014521 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cv.glmnet.R \name{cv.glmnet} \alias{cv.glmnet} \title{Cross-validation for glmnet} \usage{ cv.glmnet( x, y, weights = NULL, offset = NULL, lambda = NULL, type.measure = c("default", "mse", "deviance", "class", "auc", "mae", "C"), nfolds = 10, foldid = NULL, alignment = c("lambda", "fraction"), grouped = TRUE, keep = FALSE, parallel = FALSE, gamma = c(0, 0.25, 0.5, 0.75, 1), relax = FALSE, trace.it = 0, ... ) } \arguments{ \item{x}{\code{x} matrix as in \code{glmnet}.} \item{y}{response \code{y} as in \code{glmnet}.} \item{weights}{Observation weights; defaults to 1 per observation} \item{offset}{Offset vector (matrix) as in \code{glmnet}} \item{lambda}{Optional user-supplied lambda sequence; default is \code{NULL}, and \code{glmnet} chooses its own sequence. Note that this is done for the full model (master sequence), and separately for each fold. The fits are then alligned using the master sequence (see the \code{allignment} argument for additional details). Adapting \code{lambda} for each fold leads to better convergence. When \code{lambda} is supplied, the same sequence is used everywhere, but in some GLMs can lead to convergence issues.} \item{type.measure}{loss to use for cross-validation. Currently five options, not all available for all models. The default is \code{type.measure="deviance"}, which uses squared-error for gaussian models (a.k.a \code{type.measure="mse"} there), deviance for logistic and poisson regression, and partial-likelihood for the Cox model. \code{type.measure="class"} applies to binomial and multinomial logistic regression only, and gives misclassification error. \code{type.measure="auc"} is for two-class logistic regression only, and gives area under the ROC curve. \code{type.measure="mse"} or \code{type.measure="mae"} (mean absolute error) can be used by all models except the \code{"cox"}; they measure the deviation from the fitted mean to the response. \code{type.measure="C"} is Harrel's concordance measure, only available for \code{cox} models.} \item{nfolds}{number of folds - default is 10. Although \code{nfolds} can be as large as the sample size (leave-one-out CV), it is not recommended for large datasets. Smallest value allowable is \code{nfolds=3}} \item{foldid}{an optional vector of values between 1 and \code{nfolds} identifying what fold each observation is in. If supplied, \code{nfolds} can be missing.} \item{alignment}{This is an experimental argument, designed to fix the problems users were having with CV, with possible values \code{"lambda"} (the default) else \code{"fraction"}. With \code{"lambda"} the \code{lambda} values from the master fit (on all the data) are used to line up the predictions from each of the folds. In some cases this can give strange values, since the effective \code{lambda} values in each fold could be quite different. With \code{"fraction"} we line up the predictions in each fold according to the fraction of progress along the regularization. If in the call a \code{lambda} argument is also provided, \code{alignment="fraction"} is ignored (with a warning).} \item{grouped}{This is an experimental argument, with default \code{TRUE}, and can be ignored by most users. For all models except the \code{"cox"}, this refers to computing \code{nfolds} separate statistics, and then using their mean and estimated standard error to describe the CV curve. If \code{grouped=FALSE}, an error matrix is built up at the observation level from the predictions from the \code{nfolds} fits, and then summarized (does not apply to \code{type.measure="auc"}). For the \code{"cox"} family, \code{grouped=TRUE} obtains the CV partial likelihood for the Kth fold by \emph{subtraction}; by subtracting the log partial likelihood evaluated on the full dataset from that evaluated on the on the (K-1)/K dataset. This makes more efficient use of risk sets. With \code{grouped=FALSE} the log partial likelihood is computed only on the Kth fold} \item{keep}{If \code{keep=TRUE}, a \emph{prevalidated} array is returned containing fitted values for each observation and each value of \code{lambda}. This means these fits are computed with this observation and the rest of its fold omitted. The \code{foldid} vector is also returned. Default is keep=FALSE. If \code{relax=TRUE}, then a list of such arrays is returned, one for each value of 'gamma'. Note: if the value 'gamma=1' is omitted, this case is included in the list since it corresponds to the original 'glmnet' fit.} \item{parallel}{If \code{TRUE}, use parallel \code{foreach} to fit each fold. Must register parallel before hand, such as \code{doMC} or others. See the example below.} \item{gamma}{The values of the parameter for mixing the relaxed fit with the regularized fit, between 0 and 1; default is \code{gamma = c(0, 0.25, 0.5, 0.75, 1)}} \item{relax}{If \code{TRUE}, then CV is done with respect to the mixing parameter \code{gamma} as well as \code{lambda}. Default is \code{relax=FALSE}} \item{trace.it}{If \code{trace.it=1}, then progress bars are displayed; useful for big models that take a long time to fit. Limited tracing if \code{parallel=TRUE}} \item{\dots}{Other arguments that can be passed to \code{glmnet}} } \value{ an object of class \code{"cv.glmnet"} is returned, which is a list with the ingredients of the cross-validation fit. If the object was created with \code{relax=TRUE} then this class has a prefix class of \code{"cv.relaxed"}. \item{lambda}{the values of \code{lambda} used in the fits.} \item{cvm}{The mean cross-validated error - a vector of length \code{length(lambda)}.} \item{cvsd}{estimate of standard error of \code{cvm}.} \item{cvup}{upper curve = \code{cvm+cvsd}.} \item{cvlo}{lower curve = \code{cvm-cvsd}.} \item{nzero}{number of non-zero coefficients at each \code{lambda}.} \item{name}{a text string indicating type of measure (for plotting purposes).} \item{glmnet.fit}{a fitted glmnet object for the full data.} \item{lambda.min}{value of \code{lambda} that gives minimum \code{cvm}.} \item{lambda.1se}{largest value of \code{lambda} such that error is within 1 standard error of the minimum.} \item{fit.preval}{if \code{keep=TRUE}, this is the array of prevalidated fits. Some entries can be \code{NA}, if that and subsequent values of \code{lambda} are not reached for that fold} \item{foldid}{if \code{keep=TRUE}, the fold assignments used} \item{index}{a one column matrix with the indices of \code{lambda.min} and \code{lambda.1se} in the sequence of coefficients, fits etc.} \item{relaxed}{if \code{relax=TRUE}, this additional item has the CV info for each of the mixed fits. In particular it also selects \code{lambda, gamma} pairs corresponding to the 1se rule, as well as the minimum error. It also has a component \code{index}, a two-column matrix which contains the \code{lambda} and \code{gamma} indices corresponding to the "min" and "1se" solutions.} } \description{ Does k-fold cross-validation for glmnet, produces a plot, and returns a value for \code{lambda} (and \code{gamma} if \code{relax=TRUE}) } \details{ The function runs \code{glmnet} \code{nfolds}+1 times; the first to get the \code{lambda} sequence, and then the remainder to compute the fit with each of the folds omitted. The error is accumulated, and the average error and standard deviation over the folds is computed. Note that \code{cv.glmnet} does NOT search for values for \code{alpha}. A specific value should be supplied, else \code{alpha=1} is assumed by default. If users would like to cross-validate \code{alpha} as well, they should call \code{cv.glmnet} with a pre-computed vector \code{foldid}, and then use this same fold vector in separate calls to \code{cv.glmnet} with different values of \code{alpha}. Note also that the results of \code{cv.glmnet} are random, since the folds are selected at random. Users can reduce this randomness by running \code{cv.glmnet} many times, and averaging the error curves. If \code{relax=TRUE} then the values of \code{gamma} are used to mix the fits. If \eqn{\eta} is the fit for lasso/elastic net, and \eqn{\eta_R} is the relaxed fit (with unpenalized coefficients), then a relaxed fit mixed by \eqn{\gamma} is \deqn{\eta(\gamma)=(1-\gamma)\eta_R+\gamma\eta.} There is practically no extra cost for having a lot of values for \code{gamma}. However, 5 seems sufficient for most purposes. CV then selects both \code{gamma} and \code{lambda}. } \examples{ set.seed(1010) n = 1000 p = 100 nzc = trunc(p/10) x = matrix(rnorm(n * p), n, p) beta = rnorm(nzc) fx = x[, seq(nzc)] \%*\% beta eps = rnorm(n) * 5 y = drop(fx + eps) px = exp(fx) px = px/(1 + px) ly = rbinom(n = length(px), prob = px, size = 1) set.seed(1011) cvob1 = cv.glmnet(x, y) plot(cvob1) coef(cvob1) predict(cvob1, newx = x[1:5, ], s = "lambda.min") title("Gaussian Family", line = 2.5) set.seed(1011) cvob1a = cv.glmnet(x, y, type.measure = "mae") plot(cvob1a) title("Gaussian Family", line = 2.5) set.seed(1011) par(mfrow = c(2, 2), mar = c(4.5, 4.5, 4, 1)) cvob2 = cv.glmnet(x, ly, family = "binomial") plot(cvob2) title("Binomial Family", line = 2.5) frame() set.seed(1011) cvob3 = cv.glmnet(x, ly, family = "binomial", type.measure = "class") plot(cvob3) title("Binomial Family", line = 2.5) \dontrun{ cvob1r = cv.glmnet(x, y, relax = TRUE) plot(cvob1r) predict(cvob1r, newx = x[, 1:5]) set.seed(1011) cvob3a = cv.glmnet(x, ly, family = "binomial", type.measure = "auc") plot(cvob3a) title("Binomial Family", line = 2.5) set.seed(1011) mu = exp(fx/10) y = rpois(n, mu) cvob4 = cv.glmnet(x, y, family = "poisson") plot(cvob4) title("Poisson Family", line = 2.5) # Multinomial n = 500 p = 30 nzc = trunc(p/10) x = matrix(rnorm(n * p), n, p) beta3 = matrix(rnorm(30), 10, 3) beta3 = rbind(beta3, matrix(0, p - 10, 3)) f3 = x \%*\% beta3 p3 = exp(f3) p3 = p3/apply(p3, 1, sum) g3 = glmnet:::rmult(p3) set.seed(10101) cvfit = cv.glmnet(x, g3, family = "multinomial") plot(cvfit) title("Multinomial Family", line = 2.5) # Cox beta = rnorm(nzc) fx = x[, seq(nzc)] \%*\% beta/3 hx = exp(fx) ty = rexp(n, hx) tcens = rbinom(n = n, prob = 0.3, size = 1) # censoring indicator y = cbind(time = ty, status = 1 - tcens) # y=Surv(ty,1-tcens) with library(survival) foldid = sample(rep(seq(10), length = n)) fit1_cv = cv.glmnet(x, y, family = "cox", foldid = foldid) plot(fit1_cv) title("Cox Family", line = 2.5) # Parallel require(doMC) registerDoMC(cores = 4) x = matrix(rnorm(1e+05 * 100), 1e+05, 100) y = rnorm(1e+05) system.time(cv.glmnet(x, y)) system.time(cv.glmnet(x, y, parallel = TRUE)) } } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent (2010), Journal of Statistical Software, Vol. 33(1), 1-22}, \doi{10.18637/jss.v033.i01}.\cr Simon, N., Friedman, J., Hastie, T. and Tibshirani, R. (2011) \emph{Regularization Paths for Cox's Proportional Hazards Model via Coordinate Descent, Journal of Statistical Software, Vol. 39(5), 1-13}, \doi{10.18637/jss.v039.i05}. } \seealso{ \code{glmnet} and \code{plot}, \code{predict}, and \code{coef} methods for \code{"cv.glmnet"} and \code{"cv.relaxed"} objects. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Noah Simon helped develop the 'coxnet' function.\cr Jeffrey Wong and B. Narasimhan helped with the parallel option\cr Maintainer: Trevor Hastie \email{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/plot.cv.glmnet.Rd0000644000176200001440000000402013556622666015471 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.cv.glmnet.R, R/plot.cv.relaxed.R \name{plot.cv.glmnet} \alias{plot.cv.glmnet} \alias{plot.cv.relaxed} \title{plot the cross-validation curve produced by cv.glmnet} \usage{ \method{plot}{cv.glmnet}(x, sign.lambda = 1, ...) \method{plot}{cv.relaxed}(x, se.bands = TRUE, ...) } \arguments{ \item{x}{fitted \code{"cv.glmnet"} object} \item{sign.lambda}{Either plot against \code{log(lambda)} (default) or its negative if \code{sign.lambda=-1}.} \item{\dots}{Other graphical parameters to plot} \item{se.bands}{Should shading be produced to show standard-error bands; default is \code{TRUE}} } \description{ Plots the cross-validation curve, and upper and lower standard deviation curves, as a function of the \code{lambda} values used. If the object has class \code{"cv.relaxed"} a different plot is produced, showing both \code{lambda} and \code{gamma} } \details{ A plot is produced, and nothing is returned. } \examples{ set.seed(1010) n = 1000 p = 100 nzc = trunc(p/10) x = matrix(rnorm(n * p), n, p) beta = rnorm(nzc) fx = (x[, seq(nzc)] \%*\% beta) eps = rnorm(n) * 5 y = drop(fx + eps) px = exp(fx) px = px/(1 + px) ly = rbinom(n = length(px), prob = px, size = 1) cvob1 = cv.glmnet(x, y) plot(cvob1) title("Gaussian Family", line = 2.5) cvob1r = cv.glmnet(x, y, relax = TRUE) plot(cvob1r) frame() set.seed(1011) par(mfrow = c(2, 2), mar = c(4.5, 4.5, 4, 1)) cvob2 = cv.glmnet(x, ly, family = "binomial") plot(cvob2) title("Binomial Family", line = 2.5) ## set.seed(1011) ## cvob3 = cv.glmnet(x, ly, family = "binomial", type = "class") ## plot(cvob3) ## title("Binomial Family", line = 2.5) } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent} } \seealso{ \code{glmnet} and \code{cv.glmnet}. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/deviance.glmnet.Rd0000644000176200001440000000305513553366407015664 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/deviance.glmnet.R \name{deviance.glmnet} \alias{deviance.glmnet} \title{Extract the deviance from a glmnet object} \usage{ \method{deviance}{glmnet}(object, ...) } \arguments{ \item{object}{fitted glmnet object} \item{\dots}{additional print arguments} } \value{ (1-dev.ratio)*nulldev } \description{ Compute the deviance sequence from the glmnet object } \details{ A glmnet object has components \code{dev.ratio} and \code{nulldev}. The former is the fraction of (null) deviance explained. The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Null deviance is defined to be 2*(loglike_sat -loglike(Null)); The NULL model refers to the intercept model, except for the Cox, where it is the 0 model. Hence dev.ratio=1-deviance/nulldev, and this \code{deviance} method returns (1-dev.ratio)*nulldev. } \examples{ x = matrix(rnorm(100 * 20), 100, 20) y = rnorm(100) fit1 = glmnet(x, y) deviance(fit1) } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent} } \seealso{ \code{glmnet}, \code{predict}, \code{print}, and \code{coef} methods. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/rmult.Rd0000644000176200001440000000117613554367224013765 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rmult.R \name{rmult} \alias{rmult} \title{Generate multinomial samples from a probability matrix} \usage{ rmult(p) } \arguments{ \item{p}{matrix of probabilities, with number of columns the number of classes} } \value{ a vector of class memberships } \description{ Generate multinomial samples } \details{ Simple function that calls the \code{rmultinom} function. It generates a class label for each row of its input matrix of class probabilities. } \author{ Trevor Hastie \cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } glmnet/man/print.cv.glmnet.Rd0000644000176200001440000000315513775432176015655 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print.cv.glmnet.R \name{print.cv.glmnet} \alias{print.cv.glmnet} \alias{print.cv.relaxed} \title{print a cross-validated glmnet object} \usage{ \method{print}{cv.glmnet}(x, digits = max(3, getOption("digits") - 3), ...) } \arguments{ \item{x}{fitted 'cv.glmnet' object} \item{digits}{significant digits in printout} \item{\dots}{additional print arguments} } \description{ Print a summary of the results of cross-validation for a glmnet model. } \details{ A summary of the cross-validated fit is produced, slightly different for a 'cv.relaxed' object than for a 'cv.glmnet' object. Note that a 'cv.relaxed' object inherits from class 'cv.glmnet', so by directly invoking \code{print.cv.glmnet(object)} will print the summary as if \code{relax=TRUE} had not been used. } \examples{ x = matrix(rnorm(100 * 20), 100, 20) y = rnorm(100) fit1 = cv.glmnet(x, y) print(fit1) fit1r = cv.glmnet(x, y, relax = TRUE) print(fit1r) ## print.cv.glmnet(fit1r) ## CHECK WITH TREVOR } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent}\cr \url{https://arxiv.org/abs/1707.08692}\cr Hastie, T., Tibshirani, Robert, Tibshirani, Ryan (2019) \emph{Extended Comparisons of Best Subset Selection, Forward Stepwise Selection, and the Lasso} } \seealso{ \code{glmnet}, \code{predict} and \code{coef} methods. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/BinomialExample.Rd0000644000176200001440000000076014046314073015655 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{BinomialExample} \alias{BinomialExample} \title{Synthetic dataset with binary response} \format{ List containing the following elements: \describe{ \item{x}{100 by 30 matrix of numeric values.} \item{y}{Numeric vector of length 100 containing 44 zeros and 56 ones.} } } \usage{ data(BinomialExample) } \description{ Randomly generated data for binomial regression example. } \keyword{data} glmnet/man/coxgrad.Rd0000644000176200001440000000356014013330131014222 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxgrad.R \name{coxgrad} \alias{coxgrad} \title{Compute gradient for Cox model} \usage{ coxgrad(eta, y, w, std.weights = TRUE, diag.hessian = FALSE) } \arguments{ \item{eta}{Fit vector (usually from glmnet at a particular lambda).} \item{y}{Survival response variable, must be a \code{Surv} or \code{stratifySurv} object.} \item{w}{Observation weights (default is all equal to 1).} \item{std.weights}{If TRUE (default), observation weights are standardized to sum to 1.} \item{diag.hessian}{If \code{TRUE}, compute the diagonal of the Hessian of the log partial likelihood as well. Default is \code{FALSE}.} } \value{ A single gradient vector the same length as \code{eta}. If \code{diag.hessian=TRUE}, the diagonal of the Hessian is included as an attribute "diag_hessian". } \description{ Compute the gradient of the log partial likelihood at a particular fit for Cox model. } \details{ Compute a gradient vector at the fitted vector for the log partial likelihood. This is like a residual vector, and useful for manual screening of predictors for \code{glmnet} in applications where \code{p} is very large (as in GWAS). Uses the Breslow approach to ties. This function is essentially a wrapper: it checks whether the response provided is right-censored or (start, stop] survival data, and calls the appropriate internal routine. } \examples{ set.seed(1) eta <- rnorm(10) time <- runif(10, min = 1, max = 10) d <- ifelse(rnorm(10) > 0, 1, 0) y <- survival::Surv(time, d) coxgrad(eta, y) # return diagonal of Hessian as well coxgrad(eta, y, diag.hessian = TRUE) # example with (start, stop] data y2 <- survival::Surv(time, time + runif(10), d) coxgrad(eta, y2) # example with strata y2 <- stratifySurv(y, rep(1:2, length.out = 10)) coxgrad(eta, y2) } \seealso{ \code{coxnet.deviance} } \keyword{Cox} \keyword{model} glmnet/man/MultinomialExample.Rd0000644000176200001440000000101614046314073016410 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{MultinomialExample} \alias{MultinomialExample} \title{Synthetic dataset with multinomial response} \format{ List containing the following elements: \describe{ \item{x}{500 by 30 matrix of numeric values.} \item{y}{Numeric vector of length 500 containing 142 ones, 174 twos and 184 threes.} } } \usage{ data(MultinomialExample) } \description{ Randomly generated data for multinomial regression example. } \keyword{data} glmnet/man/survfit.coxnet.Rd0000644000176200001440000000460113775432176015623 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survfit.coxnet.R \name{survfit.coxnet} \alias{survfit.coxnet} \title{Compute a survival curve from a coxnet object} \usage{ \method{survfit}{coxnet}(formula, s = NULL, ...) } \arguments{ \item{formula}{A class \code{coxnet} object.} \item{s}{Value(s) of the penalty parameter lambda at which the survival curve is required. Default is the entire sequence used to create the model. However, it is recommended that \code{survfit.coxnet} is called for a single penalty parameter.} \item{...}{This is the mechanism for passing additional arguments like (i) x= and y= for the x and y used to fit the model, (ii) weights= and offset= when the model was fit with these options, (iii) arguments for new data (newx, newoffset, newstrata), and (iv) arguments to be passed to survfit.coxph().} } \value{ If \code{s} is a single value, an object of class "survfitcox" and "survfit" containing one or more survival curves. Otherwise, a list of such objects, one element for each value in \code{s}. Methods defined for survfit objects are print, summary and plot. } \description{ Computes the predicted survivor function for a Cox proportional hazards model with elastic net penalty. } \details{ To be consistent with other functions in \code{glmnet}, if \code{s} is not specified, survival curves are returned for the entire lambda sequence. This is not recommended usage: it is best to call \code{survfit.coxnet} with a single value of the penalty parameter for the \code{s} option. } \examples{ set.seed(2) nobs <- 100; nvars <- 15 xvec <- rnorm(nobs * nvars) xvec[sample.int(nobs * nvars, size = 0.4 * nobs * nvars)] <- 0 x <- matrix(xvec, nrow = nobs) beta <- rnorm(nvars / 3) fx <- x[, seq(nvars / 3)] \%*\% beta / 3 ty <- rexp(nobs, exp(fx)) tcens <- rbinom(n = nobs, prob = 0.3, size = 1) y <- survival::Surv(ty, tcens) fit1 <- glmnet(x, y, family = "cox") # survfit object for Cox model where lambda = 0.1 sf1 <- survival::survfit(fit1, s = 0.1, x = x, y = y) plot(sf1) # example with new data sf2 <- survival::survfit(fit1, s = 0.1, x = x, y = y, newx = x[1:3, ]) plot(sf2) # example with strata y2 <- stratifySurv(y, rep(1:2, length.out = nobs)) fit2 <- glmnet(x, y2, family = "cox") sf3 <- survival::survfit(fit2, s = 0.1, x = x, y = y2) sf4 <- survival::survfit(fit2, s = 0.1, x = x, y = y2, newx = x[1:3, ], newstrata = c(1, 1, 1)) } glmnet/man/cox_obj_function.Rd0000644000176200001440000000146513775432176016157 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxpath.R \name{cox_obj_function} \alias{cox_obj_function} \title{Elastic net objective function value for Cox regression model} \usage{ cox_obj_function(y, pred, weights, lambda, alpha, coefficients, vp) } \arguments{ \item{y}{Survival response variable, must be a \code{Surv} or \code{stratifySurv} object.} \item{pred}{Model's predictions for \code{y}.} \item{weights}{Observation weights.} \item{lambda}{A single value for the \code{lambda} hyperparameter.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}.} \item{coefficients}{The model's coefficients.} \item{vp}{Penalty factors for each of the coefficients.} } \description{ Returns the elastic net objective function value for Cox regression model. } glmnet/man/get_start.Rd0000644000176200001440000000434113775432176014617 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{get_start} \alias{get_start} \title{Get null deviance, starting mu and lambda max} \usage{ get_start( x, y, weights, family, intercept, is.offset, offset, exclude, vp, alpha ) } \arguments{ \item{x}{Input matrix, of dimension \code{nobs x nvars}; each row is an observation vector. If it is a sparse matrix, it is assumed to be unstandardized. It should have attributes \code{xm} and \code{xs}, where \code{xm(j)} and \code{xs(j)} are the centering and scaling factors for variable j respsectively. If it is not a sparse matrix, it is assumed to be standardized.} \item{y}{Quantitative response variable.} \item{weights}{Observation weights.} \item{family}{A description of the error distribution and link function to be used in the model. This is the result of a call to a family function. (See \code{\link[stats:family]{family}} for details on family functions.)} \item{intercept}{Does the model we are fitting have an intercept term or not?} \item{is.offset}{Is the model being fit with an offset or not?} \item{offset}{Offset for the model. If \code{is.offset=FALSE}, this should be a zero vector of the same length as \code{y}.} \item{exclude}{Indices of variables to be excluded from the model.} \item{vp}{Separate penalty factors can be applied to each coefficient.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}.} } \description{ Return the null deviance, starting mu and lambda max values for initialization. For internal use only. } \details{ This function is called by \code{glmnet.path} for null deviance, starting mu and lambda max values. It is also called by \code{glmnet.fit} when used without warmstart, but they only use the null deviance and starting mu values. When \code{x} is not sparse, it is expected to already by centered and scaled. When \code{x} is sparse, the function will get its attributes \code{xm} and \code{xs} for its centering and scaling factors. Note that whether \code{x} is centered & scaled or not, the values of \code{mu} and \code{nulldev} don't change. However, the value of \code{lambda_max} does change, and we need \code{xm} and \code{xs} to get the correct value. } glmnet/man/mycoxph.Rd0000644000176200001440000000122013775432176014303 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survfit.coxnet.R \name{mycoxph} \alias{mycoxph} \title{Helper function to fit coxph model for survfit.coxnet} \usage{ mycoxph(object, s, ...) } \arguments{ \item{object}{A class \code{coxnet} object.} \item{s}{The value of the penalty parameter lambda at which the survival curve is required.} \item{...}{The same ... that was passed to survfit.coxnet.} } \description{ This function constructs the coxph call needed to run the "hack" of coxph with 0 iterations. It's a separate function as we have to deal with function options like strata, offset and observation weights. } glmnet/man/assess.glmnet.Rd0000644000176200001440000001050414046050560015370 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/assess.glmnet.R, R/confusion.glmnet.R, % R/roc.glmnet.R \name{assess.glmnet} \alias{assess.glmnet} \alias{confusion.glmnet} \alias{roc.glmnet} \title{assess performance of a 'glmnet' object using test data.} \usage{ assess.glmnet( object, newx = NULL, newy, weights = NULL, family = c("gaussian", "binomial", "poisson", "multinomial", "cox", "mgaussian"), ... ) confusion.glmnet( object, newx = NULL, newy, family = c("binomial", "multinomial"), ... ) roc.glmnet(object, newx = NULL, newy, ...) } \arguments{ \item{object}{Fitted \code{"glmnet"} or \code{"cv.glmnet"}, \code{"relaxed"} or \code{"cv.relaxed"} object, OR a matrix of predictions (for \code{roc.glmnet} or \code{assess.glmnet}). For \code{roc.glmnet} the model must be a 'binomial', and for \code{confusion.glmnet} must be either 'binomial' or 'multinomial'} \item{newx}{If predictions are to made, these are the 'x' values. Required for \code{confusion.glmnet}} \item{newy}{required argument for all functions; the new response values} \item{weights}{For observation weights for the test observations} \item{family}{The family of the model, in case predictions are passed in as 'object'} \item{...}{additional arguments to \code{predict.glmnet} when "object" is a "glmnet" fit, and predictions must be made to produce the statistics.} } \value{ \code{assess.glmnet} produces a list of vectors of measures. \code{roc.glmnet} a list of 'roc' two-column matrices, and \code{confusion.glmnet} a list of tables. If a single prediction is provided, or predictions are made from a CV object, the latter two drop the list status and produce a single matrix or table. } \description{ Given a test set, produce summary performance measures for the glmnet model(s) } \details{ \code{assess.glmnet} produces all the different performance measures provided by \code{cv.glmnet} for each of the families. A single vector, or a matrix of predictions can be provided, or fitted model objects or CV objects. In the case when the predictions are still to be made, the \code{...} arguments allow, for example, 'offsets' and other prediction parameters such as values for 'gamma' for 'relaxed' fits. \code{roc.glmnet} produces for a single vector a two column matrix with columns TPR and FPR (true positive rate and false positive rate). This object can be plotted to produce an ROC curve. If more than one predictions are called for, then a list of such matrices is produced. \code{confusion.glmnet} produces a confusion matrix tabulating the classification results. Again, a single table or a list, with a print method. } \examples{ data(QuickStartExample) x <- QuickStartExample$x; y <- QuickStartExample$y set.seed(11) train = sample(seq(length(y)),70,replace=FALSE) fit1 = glmnet(x[train,], y[train]) assess.glmnet(fit1, newx = x[-train,], newy = y[-train]) preds = predict(fit1, newx = x[-train, ], s = c(1, 0.25)) assess.glmnet(preds, newy = y[-train], family = "gaussian") fit1c = cv.glmnet(x, y, keep = TRUE) fit1a = assess.glmnet(fit1c$fit.preval, newy=y,family="gaussian") plot(fit1c$lambda, log="x",fit1a$mae,xlab="Log Lambda",ylab="Mean Absolute Error") abline(v=fit1c$lambda.min, lty=2, col="red") data(BinomialExample) x <- BinomialExample$x; y <- BinomialExample$y fit2 = glmnet(x[train,], y[train], family = "binomial") assess.glmnet(fit2,newx = x[-train,], newy=y[-train], s=0.1) plot(roc.glmnet(fit2, newx = x[-train,], newy=y[-train])[[10]]) fit2c = cv.glmnet(x, y, family = "binomial", keep=TRUE) idmin = match(fit2c$lambda.min, fit2c$lambda) plot(roc.glmnet(fit2c$fit.preval, newy = y)[[idmin]]) data(MultinomialExample) x <- MultinomialExample$x; y <- MultinomialExample$y set.seed(103) train = sample(seq(length(y)),100,replace=FALSE) fit3 = glmnet(x[train,], y[train], family = "multinomial") confusion.glmnet(fit3, newx = x[-train, ], newy = y[-train], s = 0.01) fit3c = cv.glmnet(x, y, family = "multinomial", type.measure="class", keep=TRUE) idmin = match(fit3c$lambda.min, fit3c$lambda) confusion.glmnet(fit3c$fit.preval, newy = y, family="multinomial")[[idmin]] } \seealso{ \code{cv.glmnet}, \code{glmnet.measures} and \code{vignette("relax",package="glmnet")} } \author{ Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{classification} \keyword{models} glmnet/man/glmnet-internal.Rd0000644000176200001440000000354014211522451015701 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnet-package.R \name{glmnet-internal} \alias{glmnet-internal} \alias{auc} \alias{assess.coxnet} \alias{auc.mat} \alias{cvtype} \alias{cvstats} \alias{cvcompute} \alias{getcoef} \alias{getcoef.multinomial} \alias{fix.lam} \alias{error.bars} \alias{getmin} \alias{elnet} \alias{mrelnet} \alias{lognet} \alias{fishnet} \alias{coefnorm} \alias{coxnet} \alias{cv.lognet} \alias{cv.elnet} \alias{cv.multnet} \alias{cv.mrelnet} \alias{cv.coxnet} \alias{cv.fishnet} \alias{cv.glmnet.raw} \alias{cv.relaxed.raw} \alias{blend.relaxed} \alias{checkgamma.relax} \alias{buildPredmat} \alias{buildPredmat.mrelnetlist} \alias{buildPredmat.multnetlist} \alias{buildPredmat.lognetlist} \alias{buildPredmat.array} \alias{buildPredmat.coxnetlist} \alias{buildPredmat.default} \alias{lambda.interp} \alias{nonzeroCoef} \alias{glmnet_softmax} \alias{getOptcv.glmnet} \alias{getOptcv.relaxed} \alias{jerr} \alias{jerr.elnet} \alias{jerr.lognet} \alias{jerr.fishnet} \alias{jerr.coxnet} \alias{jerr.mrelnet} \alias{plotCoef} \alias{zeromat} \alias{na.mean} \alias{check_dots} \alias{na_sparse_fix} \alias{prepareX} \title{Internal glmnet functions} \description{ These are not intended for use by users. \code{lambda.interp} does linear interpolation of the lambdas to obtain a prediction at a new point s. \code{glmnet_softmax} does the classification for multinomial models. \code{nonzeroCoef} determines in an efficient manner which variables are nonzero in each fit. \code{jerr} prints out error messages from the C++ routines. \code{plotCoef} is called by the \code{plot} method for \code{glmnet} objects. \code{check_dots} is used in \code{coef} and \code{predict} with argument \code{exact=TRUE}, to make sure user supplies original data used to fit the \code{"glmnet"} object. } \author{ Trevor Hastie } \keyword{internal} glmnet/man/CoxExample.Rd0000644000176200001440000000121414046314073014647 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{CoxExample} \alias{CoxExample} \title{Synthetic dataset with right-censored survival response} \format{ List containing the following elements: \describe{ \item{x}{1,000 by 30 matrix of numeric values.} \item{y}{1,000 by 2 matrix with column names "time" and "status". The first column consists of positive numbers representing time to event, while the second column represents the status indicator (0=right-censored, 1=observed).} } } \usage{ data(CoxExample) } \description{ Randomly generated data for Cox regression example. } \keyword{data} glmnet/man/figures/0000755000176200001440000000000013623043702013757 5ustar liggesusersglmnet/man/figures/logo.png0000644000176200001440000007500013623043702015427 0ustar liggesusersPNG  IHDRYuaiCCPkCGColorSpaceDisplayP3(c``RI,(aa``+) rwRR` b >@% 0|/:%5I^bՋD0գd S JSl): ȞbC@$XMH3}VHHIBOGbCnₜJc%VhʢG`(*x%(30s 8, Ě30n~@\;b 'v$%)-rH @=iF`yF'{Vc``ww1Py!e5DeXIfMM*i&Š$oiTXtXML:com.adobe.xmp 960 540 1 ze@IDATx}E< 9 JPd`Ĝs`9lzݗaM1`NAD9Tu{z`f[0+:NS IA2#Dfƽи̬}/QhHb DAM"ٴDrUȓ'1JhRjw2M@ #)B؉LF/ߓ| ?8_+4;uAuEIBBa~`;2 d:'w\vpخ"kO2eng &q(m-.{\"?T8}!ujS ;o7 UrcgJuTiUA:͒KN&7@rs,YxzC M"rC>"so|(O[ 붔T#Pr2c&sRZ+זɍSߐo~ @B_d,٧S+2fhR{v'-mJE{AD>Q(t7A ^ R3 %?2Rwn& 8`bZ׻g_oV}e7h-A%DC =\{ҧgH{"VX>cS ;aLEl\'VBnjm͗g^Xx&k. 5{th!W\2Z na/Z8~< =ߔEeN'i"oP3vLҺ0T7ltBn&MGTn!!U2埏,R%Y@jXh7RnBQU~war$f*8L$$'q\xx<W*%+u\ĉHLoA5X.&B M@Z\ 70sJ'r93ώSLG`ti q38&T򸂦P°vuȤ˿}c,Zrde9ɔ[˵q87J_Dȴrm|\y} j']:ZXH,M$-'5u-zuIڐL)c#W?Fztm])/4Qlg徧ޕf,]fvCv-䒳)Cf;Yk l]w B+#tǪXޥΪćZ&OBM V !uzvk||yugJK n8sX E Gđspb*&dN+\*a#qlܸUyO43!',YenUNյ\b8|`+XIҳP9ܱj.6H뮮oJvl tb`4+}2r(釭 @H -UDA:c/;/Jp! hA0Ԗyri9&X"6xĂRwDl3?^ 7 0 ~hi׺ |c}e}Awf `7UEQfHn uG9@>okD'Zyᒺߺx?oקCҒJq?TK$5q >ϖKOs.tS+3ߔ fV\, chp@(qnfuos@< 0J= H4&BQx3;F^rlYr$gAs%e|åK]Ham 4KTUg]Euke]%NGJN9r)+J<*YŊhצAdu+5!`4>bFJ l1T=k !a5zarj3fn&?)J3і-,_:%2T;^5RN/pqՄMxG\SkM(bg۩V\\|R'epIZ1D9*]i[ WB>nI?@l#M'arYSu+l,Y;x8EN*.Xٯ[{2W4IEdB ?n(`1`a`fE'*:TC{Z&cFyK7kOCqj&Q)6n揿\ 897\qQnܐ^rE6HMM7_KC-#8'ޔ*&"BH 9<{äELS*lM(׬*<0_^g-IvvW%kW94῅خFZz87>JFwIŜ&v~H9ʥdMf7EʝM5<~1p0T66H 7H\6V~bsxrnrR2@ÁY'y3xˊu[>(5ѳ) 9iAXs&ڶWMuSa԰Fp:|y'ꢾ~dj uPdm;it_ҡNQ J 6[eRMT%v\סd3&cJ>)_e+kauk܀\_rRpm$HRZV!=6{WZk#Ef'sNJ:Rka! CNuiA@i4m"H`Ob3H%\w$ZHZTsG+谉jYlC\(U<0͹7DVƐ@o\ԍdAeD\ &.mpE1`}sմ'(gI !(: W|&7aOaG X3lvQ@wf昈Bjk7n|3NrG9`j] #N$A$9EoFz 'O chџkM?੆ۂxFP[" ]0>jMwΑ>Y @vPG \( ܾ8 ^=x??,X ]t}7}VUTʺ5kkPc Q~}C dc,} &n#&'߃M6-4TQn! I`R̓_T]Mad , _NJu5īZ`… SNҫW#oUVSO@@AWNNF%r7|?TNp)0,0P- e UO=n].U@q(mŒKȝ,"tV ` s!zd^&}ix/^D9E0T2+KcrYO:z4̑o}Sg;JգK+:Wiw]C6kG;$r"UՄF:a~r|:b ^rRǓӤm۶r@y!p 7Â{KߒN"E v(7E=͞L,Jno>–HH=Zd Qk^'#vc~h=Rb:lloKDzN94Lle|y)7۠$f<(s["m G㏰Էs^bg7o#Z4"Y{[:)?#(x\c-οbjb7N?fv zAqx; bsm0H4o-WӞZ(?LJ0Xfmps^a\[()gAe u Ɣ)+-}T^[PZ㯥TB8h3(ɁaʤbHE=h^7w-J׶FZfeI>*%UWHIzI@u„1dKQ)> c  }R<۴ʑ)8c<&Rw%p< q4Qhc(4( z֢{mNqVgU"AQ'qH|_!1 wZuSa> CA6 9"4\v|M݅IAFl d@iծ?H#dSv (4~|x=1&yN{G !!!ٷ$_(r E1Ж8e bgɠOa-2қ"icqi#2%ʁR Lp>Evd!rθphH8T~bxR[ qY+,c/yrk\ɆQ#JZvj#~Ggm]oc̞?TTQZA 6rueY!RGR䉙Q4(QϾ<,ڔu3vu03OcT\28v"tZy/>.^b$3xtJ+< .VhD+/=+ [K,j}I=BZPĈmF˸Ѣw3lU .7=0G^iH@ /ܕbU-3\'UۆC~]qul+>nOJ9#8J'u! 4 g }5DIpcpm*`_S%XZ@T*A q-S(SdA7*1:2 JYok-U S7e+E k/"#u;DQ"~ V?ItGUٳ3?o;VF@Lr9vg>=* ?x;DzU6gP@Z&:R1IZp@2ɭ?F< GcX>rLIjAː@{!m:u~?tL ?pMN`eό;v}ީ= `wuHB c3j2A OnՏ!|[|Ss YUiDm5kKIAZQ`Q!9^B BɒW·P>f{cg/@P,R Gd@|lso=RUv_l(1,Xk 1}e>z@ácv`|i Pu:!XR#dm-g/˳?rGJiq2G_e={ujdʦ%@ ۆ"cCC`Hs)^7!8kgcm\twy,k-e%rhܧ/. 4t0*j -W%UXIQ .\V̟%]vFٶdhrԘe"X6uY$ ?(7Wf} /ҫp3NޤE~vkl?܆zQuJ8@ i Ér6W| (QQ؈tcHZA6ȝO >Z3t8r`rV u6.ak0a75Hieﹿ}Vk6 'gbA$"`%`^rҀ@A}tԫBRY>wat8* L8`Km 3 |H7dZ58`{W[q+>p)ۻ.9.sRBЁbMFD!j؛nzA>W@6@~ZfuT `Y> ##݊jW{fvkrQ94º)=Dϩm9`9/t1'PV |>;?&@(b W:`¾ XeUR.!ah}_t@5zO]EvsN` OV#ΈU ~Ĕp5e]%`$5YGпXE1>w \1eHwGqEEE2'eλoi15unQ si݂Dd $ ` dbK%C!,Y@$Vdo-?;9IE'p b?F~|# yKc xk2ң{Gtq Q<f`웅lx L>T.8ehؽ,&n{_yZ潻DW!3q?ԫt(yp%J,ap8vHK")+)K\_@7u;JklZb @Z\D *"k:*hVM qqgdt˔JƟ$ HG՘Fluҧg~:ȱpDaDR#3^pv98bfGvk+Xzl>ot>Y[A K"Sderf&";3@ $6P̗(3BQ42xW|Eʶl,Vx.𑀵)CMOK`۪Qwuu6883+)gmUGD41R8}`wޜrY\{N 6)DAR +DB~Ct`?I*T |8(t?fi/` Y2aRtGvE)a"/pDLL9Ng,_}Uѐ: D+9VKT)p :} Yp lFﳯ`ҫo_~p!#bDc->4LOAnՉrd>lDRPä[6/]v؃ccލZ)Js';!y|*a䡨ΞL@esyebj0{[,}VQ }Bfk^X2[rOK#9kA~(R֯]'8l26#EK)sbm#*]ݻH]YE4&:fh3G}}7Ig,dG){d`&Cv72v1NFe+=6l3g}ab%$猄`0!`y2E67X^ie)H!qEE8>CXvUf @YP4udiM5WňlʘF6:M:FjU#3Xa {)6I {2XyۼV' 赚M@f .08FBO2EG%M%r[7UZDhw$p\נ`#|ѵyAdT)g1:>(b t;Ҫ]"/Q G2ӠPu 7 3il~z B?Dj/ >tǐNNM&Ӵ '0$ g ^ PPWW!J#WWn"Jsm ,hS(%%h  e3"H࿷z]aeN?a?.YpfL$0{y̰,y )Z8SY%u hV|l׆:waHwd.I)> ^p@[ ~ q6;[XMSƌl=^"/efl,ʱO[ri 4izPW5fç!ϾM1iJ[c>R“,~0D\30d\7:tEQ!q4ώ$9Y0~,PDb$ 9M{ޕ}ST"Ƭ0q0b/LX,Elb Rs >g??H,|#5wg6v?`Ya*aY *$!\W ' ޸12wW噅+f 3Ek^tgoS0c"jɎ/e 2[ ;3_O.AIUarGZ*`ʫ[W+\kLrFaĞs Q^_68߼Ip SCv6N(B죰3JSWQayZ$Lzr7~M_ L{ͽCxu83wgw6o,AxylJWdz.v32% @xrL, oڽhSנr\i-/))~;\6dKg}\vҷ1k)IL>|%+qR,@`#)6C<3wd laH;G@d`Ŧ\l8yY)}q6S>u$Hr3j_Psm0b8@YO, ڬC% (}~dS'^sbvmF8=uּ$fuD@"bTd}>vj33M@L\eE% 'okhy"]IaSQ7ܼ殬D0 a}9vdOEq}Klrw6n~C DDA788OݸAE!Ss%Pvu}(6>sez"}јȧL 2gxzh\ZP$)S*)}CgAejB/0x0sU=6HKRT(+9bÁc0C=l栿 ҄Wp,Dr5F@YMu+oܲ נT̩T|I Fx fj`RNٜy%pUGn bka$0Sl:uq:k[T ۴N CZ3=WyQ4ҋzB/P-us e $j?6UF!G"!NN7bc(xb\tS R[87ՅT%)i;@2Ajb4D|IqyΒrGS3.dŶUFj!tKP,ug%L2T@He=:Cܪ28ؠ\52cc< ͑S'U?CH@8XY^6«O<ƭۼ^f{M^Q dxE2Ξl|2p6WY+K9Y)"@TWp*g (25'ȯ&~Wu\&eAb`l:49h$ "$1Ӷ!P6˂Ar@LDz4Px+51AڊDA)F7LLkQjm ]g̙)/n'0Cb*P#QYP p B!/JɜAt@d!f9xIu_!' @ hDE1 D_%HAZ-9b$rjɲī\d/,_z>lќ~ ~jq E7= e3pmx݅CGH.@B` ('[(<Tn%R8:e.Pd/кBY[TlR=: rS ,Dnjm!|R&ȄS܃J:t\l/xԢCfq'$Ⓧ}4!McQ]d_}N^*_}s 9[+P!=,E58(G7[q֏ ?zeA! %?|׬\Moy$!q0MVkO#^S_~~- 2Y{>I%&]tr2Z{ĒI4, UhB?nImQ8fiO68XѣtA͔l* x*F4Xx@/0Wy6\=!9tE1.W!}GA: p#Uʹ293CRVmqpyѬU i+y6":RͲ(BrEh d0!rv{Ta(?!TT)#e_B0'Y-RkqJhaH$a}n8_ m9Ar'a@S)@\5t]@gqF'G_S#.. W"Ը#6&AD`e!3T٘Gge&  Qy/+1'yƈSM]8GY8V%Y|U@1!'y*_}AVmZ ;y[da/@S59 648#ߺ6D#BiJS](GL >E ʺAh583U]w >Hdǟ3q!)3L.#rd+>™̋"4WH@:tP=atD> SVR.hG>Sg%,+mǟ&ǀKDoQpi# 7()ZLԔ8u>\|ر ycŚ y߸zo"-a|UV,~GL֓\v*DZaNR |Z,TK1hHh(Qgt?dR[=c];9% W 8},W*!k/y%Pg fr>YHYȦTA@rX[`'&j,UJ p ܉hrIXr] Ā;Dۍ7/r{_[2f5ZL/pqA`YРGϓI#ceSW+ -F<,T `̇A*+.*G4{a(RCa) f4[r,93袢 T~"UY):1X0z= >' @ G槂Z'@/ 2|`싥]ÈxEǹPꊾCFtl"4SpXRoXM?q'/wsf墜WKʊtun(VєWzghSQǷ˰ߺeIDUѝ5 :,)9fj!2TG / _+Ze!U4"?'o2GȔτ+ g7@% 9&9p+0|"_E6C&B#$hC)(I+J9S.k9tWל|}G D$͊IvlK*C'(Yts.(V\.8Qxwn8YP(`ik(sKl|O=zϙDo4C ]N^b'_",#n~0GO N] #KP_|FB1v!99C:%%j/"CghuudBi•&P*jd!OK6Z(&1EGE#?˔^ L>C}Ħs]bՇ)}N49ɷM\u3'A0X*$}{( D hZq@l<f۶vo" 7+hxi# שHmw\8U}\6lu'8 Qf$Tb]زnt޿:r) 6Mh||&rDFaIG2GeCge 3jlsvzcaV vai: e`#^@~)PO< SyF@Mbt|~+# &8?G!w5%zL#hNm ȺoWjaĤ' .'56`0F9:A% lY8aĺ恄دlG}?)۽%z&$uKN>[NwytXH; q#-D|0 ehEHv҄+C?5IDAT-L@Vl\f@<(BQ/ёP=N"& RLL''U +Q3;E) t y@ZM$N:ʕg](̙ҬC${CDÂ?s^wMAd'Mkp T1M%$ y3fai C utM~zeҷG0_& "2IsC}5!E|ѣP៏L|}裤"JCN}5YΜt8,*jA 7 ׎,D3g (M>Fnn|Wgjl’8L9'MF$-@1!H'|f\QS&W $N_(⨇y^{-אyjQ:"'BȃSo~0..+ml: "ZC{P.Si13۶sryȰ=<{oo>o(qKwN^n" 9\Wzߝv0-@N!kO@bM 8J̸T6BGʔϐ68iib%1n}!۟)+i5!+#>S4^A52daK#tN pT1Nf"hNB< RYO¢EҦ?6OZĕ&TňO>o Fv½1VwYVD"?ҳkDP! r Lz NQ`*9b9ECX&?ťD&! ~v5ҧkOJw={C$λ}=,L'[Jvn>Ҥp$\{[00$r9̙IɃ:3eptW~xUzoŧ\hҡ}'ߓAIVʼ)ALw I$1[HfL:R~=xxb__~t8s]8~Ka8\Z ϕndDBTXTa>}} t~/tiӖ^%0u!!;C,d[ܚHthX lx\swφ &W4~iUXB8ɚ,d \15i)Tդ韆/g} =SCupVlr}B1VkQT`-Z:4lʂ}JY HG ,w o [d\jfJگ&K]b003-Xw"lUCXjG`-Ĉ } 5o1Ga0!8 Cf7/y/}c`ȍ^n?25+_ xVl˖ծ Ik->nݺ 6 #l.L4B{f!*9Zd~a),,m߀DaGEh0%0T~vo=я]s;k73;cۦC4{)ύ塩s uO9&X[6vGZ8E_~9rC]Zclk]m"2QW]U t1ڡ®>Y\S.[L6S]~Ə(䷱Q6:3cQFɏ~zصIQ$ ?Cs?G&M|[㏇oQŷ/M;%kW# #>XŞ,kӞn˷c=R44) vCwh6u7T XĞD]JJ{l3ꚹk%lL!Qܡd-&4~g+ "Ky7@]+:k?wqᣥݓ6$OU>~4 קv2'l A?1 ׯ_/mڴsX[캝Oו7&\WeqF}ꩧR駟*,?p+bT| /x J~@uS1{Zߞ(>K+y[N:Z2.]R4uN:\wuҾ}{~ /4vDEk4]Xn:mhmյ6D峕O˵^+y(zK/mW1cFJޑ#Gʒ%Kd͚5vZYnn2d\z饚~GVRdٞO=.Nq-nI=z^$)GS^M89} /͖rks 'JÈC=4LMkh:]VJ?L $N=0_3nONYgزe<=:$XW_}5L0͛ .Luc|Xgx_2eJUVa~+갸 6E5뷆8uDq ϻ-1pukc _uϏd{s9a^o~Zi)0}il`ޑ.|ͬYqJ0-oQQQL dye;myq%q+m%f~֗Ԩ!*m} eZpKkzu$Y+%~ʕ)Ǝ+qDzY\X(4Y;(,6K端t,!QhU3zr\|űeEn)sUjŊfc=_~>g/OW^r)}' qYw.$ Yh]ίl">r_l:a?)x YƍCYg_",.1 %\t>LY#3?gf?Кęݵ9t~`{j2w59nb`իWyC+s=C q#!CiiiHLӵkW믿.o´׿UV Srsg>'X w)V9Ͽ˿,Lcm{SWǛiuۙxkS4"qX;k}~TH< `Μ9a:( |OʇlScǎGμ{ʙ˙:u@w(LX~E,2av~ Q8-%|1lʰq+-m-pQtX lGn݄AC=~~GRh>;/E4 EB>?pjtA)0Y߀Rkgz7c?? .H!N]i8R :tWD0roFp6m~O~L6ѾhٖAѪ@QuY}tוgw[u T? -gyzZ=ddqq$!u ∂uSBgxÇ tu؇*sΡ(L?pח9_0;;#lE=X`r~? pK,u&a(wy% ஻ a={v:>јB}T-$,ƿn[۠JgX l0g]g?p ^y5ʈ*|Iz6K"; >[X>׷*KPAeQBbYs^ҐM0u:ea`.lX>ϨAN6qD!w\fₕu5ˍrE{w{&lCE;;Oahfd`kܸqg;'珕AW {E5Da23831COo{}1Du}*r_c ƊL 裏j9|sVb>4x0k2 XhQNH?3m-~h}͠'[ƻE6ͻ˞ yUX@^O c=lq>@Qry,5V>fMkX',X5*nQhrZ*:i1c`]khj]r.H>mvOpkyS9aGwNlYh>ŧwȡgfќ)}jE9]x~`@1DN=#mJgE>N;s]m#n8@zƲCQ^o˼a(П7)/Vc-׮]2leXZ^k<"GgA"h*/ cQ0W)^0=w?=x3eGO?OzA"m=?/Ҕ:Fxj$[1HOIy»tWfNE-ӮҚUQ.ʙՄ+q~:) ?+Z]*,98x~W^O3y?~^HF*Dk'=䓚b!6Xm^ `h%bqK?3IO/2-%t'뇕G"#X;=(C,ϰ,_C^e&U $- ;9˰OV2]tmr8,fR6 )+|.mֻvW߮ \S!Rmoȕ~L~#;('L+Lyy''}po/mAi)Lb3%L[ #!AO <$W灵p,YWhWf +.!]:LStBXEWzrǂ. q>=9Ap+ {bȔ3I^B`rK䁧V yؖw)I}%QeoXl-z{\$̘ipn:qb?j9k2_=(rO#0$ FʑhO(VRP>Z-w|B v/-Ŧ huy)ӷi!H"<kH#.<ŷ厇ޔ %W쥴rrIy'؄ź:$$QȎXJyq3 |hlؼYZ)k\r‘˔I6XNtֽOH( WpyѲΒ_kjr #Ayˆrcs|`*U_GA4P($WM#* UCA-eD@`+ϖ)ӏ$`c^GX6;8}Aw \@^Xny`6oy  F8cݳPdzpwizRX Ӱry2/US&6pn@cdKv7ivk qRQU)Ӟ\7x)|R.Zȓ)g SZ$7Ak]C-`?qRJ fѲשaaĎ}AZ~ݓIL:J~ö֕ L !afhힹjV:9 aTd-2yr9#ua;6FM"D2MJ .EI5Lc9 ċ]ɖbwj e! QiGKN&-qE1J tBHRˣt!eĢVcڴ !ʰޘBEx_nP6aBMlg0@.>ua5:RWlbӠDa"֫ ?[/J`Ȯ2Py2zHwXPAkwlH:}m!{xuI%2`Αl._XkTieq΁|ǙXBy24N  ( vuX/+.J (=$)-=*{O9՛jbzy kO|On{hlZM;F֥0>iUnJHn^sr)C$.TL)jY܁)܌Mr=e۫ hvH?P OVzG/&7JQWŦC@Mm3 %kI5l6l(#/$npC\2nFLmeVZ$b?vHf~'_.E%7W?P.9uH`ϓ'l<5OȫO.ĕIR*,̓s8@UҦv<#kjX' 83vZcϾ %Kx45KqyOF@;Ͼ\cʒh#0S?@l'cdpѭG#H*MJI%nHV/7dҕB, CIpprDCಳ{FI ?.EAz6Y9qx/ҍ]4 c%i=+h6'aA*7gJU%`-l!8N;k0 OjZjD5\X]Z"m-9 nN˖}`lJq`ģV!Gz֦} noLFu<ڞӥ{WcEuV>vq>s ZրHDm44j4mŴ("4T+ae BG FΙ;sgee+Û ;3oνw=s=\: d$vW2_9(8 JŁ3gzy xO0v9U,a(҅xnP~#? SX~QўofF;n\3*X w#U" t?/[_N!5Di@88֦/Iǡ<ǁ˽qiLUf6WCA2:>׎?7! 8 0IEV]ύ@cFʒTG2 \#DQ|SLC ž9]]dhnj I*~_^kzs;:cw1ۆ-RpZ"7 εvJnN_ܰ{tӝ {yy&o~Dݍdq̄D69TGΨ{L{ёPDf7ӔUM)λW^ O9v ]yȊ;fȯVeԨĦqD~k~Sz%@'sՍmL'\vTybM+O'[c?"0H*rM_<is? rC\>C^yn!S`,OvX8|]DA@9>{ʤ/g^{cYZ;BEяTMV9e-RhM!FzW@/}\|m {ރ%򛈤nbgMCL_ņ ovnoSz"oK4:{GÙmܺb"2ymn&\=rvuΞGF`T== cz^ s'ixrky8x%&zˊ"+v&8x4o*fMpH*az ϨQۀ9 p?ybFaC?0SG ]*ZLzvűj{lێC\ݧDDK>8ȣkȸ#cȩya1 4a^s?9܋osRq4?]=g4f%\km:Q ^07?V/RфehtݳiPL>%ڪ~{vcɺK}>pn8gMGoŚ/(!&p_:/iIW_CiO!4F F8r:p`״H=l!hFgp#Ъ.`'?oYrmy\( <@ip!@L, 0 ~rTf+Dԧ&W/jyhThjQXk@-jĐ x ی(G͒m"\p]A䪥n a:W/,`f$;(| }B@hpε?KNaC8=.-ty|sQ;X"F x9|WueH\\&a45"(- eAa ҕ+"q\!=8LIC0pQ34'kWΕ{JhD^&i{!ʎ Ȁ7myGeL.(IJ\<dP ](kuVbDX7m'/*!!J45p c?'G Xf]p7lqviȈbH`-gt7VsK<ϓ');hJ0WQ؁΁McjR5JA-#RP{QԴNEƑ1;mWxy}B%L7  %`Y*?2(3L;/-6 %l-E$j  qu窹nNJ;Y*'2('4YVUh /{6ă@E ܪ!Ǒ}ik̺K}]q?VA #xPba.YPWOU-ە#<jyXC@FNe-)Dr\!a-?Mi^IENDB`glmnet/man/use.cox.path.Rd0000644000176200001440000000127614211506125015124 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxpath.R \name{use.cox.path} \alias{use.cox.path} \title{Check if glmnet should call cox.path} \usage{ use.cox.path(x, y) } \arguments{ \item{x}{Design matrix.} \item{y}{Response variable.} } \value{ TRUE if cox.path() should be called, FALSE otherwise. } \description{ Helper function to check if glmnet() should call cox.path(). } \details{ For \code{family="cox"}, we only call the original coxnet() function if (i) x is not sparse, (ii) y is right-censored data, and (iii) we are not fitting a stratified Cox model. This function also throws an error if y has a "strata" attribute but is not of type "stratifySurv". } glmnet/man/get_eta.Rd0000644000176200001440000000151513752553007014223 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{get_eta} \alias{get_eta} \title{Helper function to get etas (linear predictions)} \usage{ get_eta(x, beta, a0) } \arguments{ \item{x}{Input matrix, of dimension \code{nobs x nvars}; each row is an observation vector. If it is a sparse matrix, it is assumed to be unstandardized. It should have attributes \code{xm} and \code{xs}, where \code{xm(j)} and \code{xs(j)} are the centering and scaling factors for variable j respsectively. If it is not a sparse matrix, it is assumed to be standardized.} \item{beta}{Feature coefficients.} \item{a0}{Intercept.} } \description{ Given x, coefficients and intercept, return linear predictions. Wrapper that works with both regular and sparse x. Only works for single set of coefficients and intercept. } glmnet/man/response.coxnet.Rd0000644000176200001440000000133213775432176015755 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/response.coxnet.R \name{response.coxnet} \alias{response.coxnet} \title{Make response for coxnet} \usage{ response.coxnet(y) } \arguments{ \item{y}{Response variable. Either a class "Surv" object or a two-column matrix with columns named 'time' and 'status'.} } \value{ A class "Surv" object. } \description{ Internal function to make the response y passed to glmnet suitable for coxnet (i.e. glmnet with family = "cox"). Sanity checks are performed here too. } \details{ If y is a class "Surv" object, this function returns y with no changes. If y is a two-column matrix with columns named 'time' and 'status', it is converted into a "Surv" object. } glmnet/man/na.replace.Rd0000644000176200001440000000304013553366407014623 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/makeX.R \name{na.replace} \alias{na.replace} \title{Replace the missing entries in a matrix columnwise with the entries in a supplied vector} \usage{ na.replace(x, m = rowSums(x, na.rm = TRUE)) } \arguments{ \item{x}{A matrix with potentially missing values, and also potentially in sparse matrix format (i.e. inherits from "sparseMatrix")} \item{m}{Optional argument. A vector of values used to replace the missing entries, columnwise. If missing, the column means of 'x' are used} } \value{ A version of 'x' is returned with the missing values replaced. } \description{ Missing entries in any given column of the matrix are replaced by the column means or the values in a supplied vector. } \details{ This is a simple imputation scheme. This function is called by \code{makeX} if the \code{na.impute=TRUE} option is used, but of course can be used on its own. If 'x' is sparse, the result is sparse, and the replacements are done so as to maintain sparsity. } \examples{ set.seed(101) ### Single data frame X = matrix(rnorm(20), 10, 2) X[3, 1] = NA X[5, 2] = NA X3 = sample(letters[1:3], 10, replace = TRUE) X3[6] = NA X4 = sample(LETTERS[1:3], 10, replace = TRUE) X4[9] = NA dfn = data.frame(X, X3, X4) x = makeX(dfn) m = rowSums(x, na.rm = TRUE) na.replace(x, m) x = makeX(dfn, sparse = TRUE) na.replace(x, m) } \seealso{ \code{makeX} and \code{glmnet} } \author{ Trevor Hastie\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} glmnet/man/pen_function.Rd0000644000176200001440000000127213752553007015302 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{pen_function} \alias{pen_function} \title{Elastic net penalty value} \usage{ pen_function(coefficients, alpha = 1, vp = 1) } \arguments{ \item{coefficients}{The model's coefficients (excluding intercept).} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}.} \item{vp}{Penalty factors for each of the coefficients.} } \description{ Returns the elastic net penalty value without the \code{lambda} factor. } \details{ The penalty is defined as \deqn{(1-\alpha)/2 \sum vp_j \beta_j^2 + \alpha \sum vp_j |\beta|.} Note the omission of the multiplicative \code{lambda} factor. } glmnet/man/Cindex.Rd0000644000176200001440000000276113752553007014031 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Cindex.R \name{Cindex} \alias{Cindex} \title{compute C index for a Cox model} \usage{ Cindex(pred, y, weights = rep(1, nrow(y))) } \arguments{ \item{pred}{Predictions from a \code{"coxnet"} object} \item{y}{a survival response object - a matrix with two columns "time" and "status"; see documentation for "glmnet"} \item{weights}{optional observation weights} } \description{ Computes Harrel's C index for predictions from a \code{"coxnet"} object. } \details{ Computes the concordance index, taking into account censoring. } \examples{ set.seed(10101) N = 1000 p = 30 nzc = p/3 x = matrix(rnorm(N * p), N, p) beta = rnorm(nzc) fx = x[, seq(nzc)] \%*\% beta/3 hx = exp(fx) ty = rexp(N, hx) tcens = rbinom(n = N, prob = 0.3, size = 1) # censoring indicator y = cbind(time = ty, status = 1 - tcens) # y=Surv(ty,1-tcens) with library(survival) fit = glmnet(x, y, family = "cox") pred = predict(fit, newx = x) apply(pred, 2, Cindex, y=y) cv.glmnet(x, y, family = "cox", type.measure = "C") } \references{ Harrel Jr, F. E. and Lee, K. L. and Mark, D. B. (1996) \emph{Tutorial in biostatistics: multivariable prognostic models: issues in developing models, evaluating assumptions and adequacy, and measuring and reducing error}, Statistics in Medicine, 15, pages 361--387. } \seealso{ \code{cv.glmnet} } \author{ Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{Cox} \keyword{cross-validation} \keyword{models} glmnet/man/coxnet.deviance.Rd0000644000176200001440000000464014013330131015650 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxnet.deviance.R \name{coxnet.deviance} \alias{coxnet.deviance} \title{Compute deviance for Cox model} \usage{ coxnet.deviance( pred = NULL, y, x = NULL, offset = NULL, weights = NULL, std.weights = TRUE, beta = NULL ) } \arguments{ \item{pred}{Fit vector or matrix (usually from glmnet at a particular lambda or a sequence of lambdas).} \item{y}{Survival response variable, must be a \code{Surv} or \code{stratifySurv} object.} \item{x}{Optional \code{x} matrix, to be supplied if \code{pred = NULL}.} \item{offset}{Optional offset vector.} \item{weights}{Observation weights (default is all equal to 1).} \item{std.weights}{If TRUE (default), observation weights are standardized to sum to 1.} \item{beta}{Optional coefficient vector/matrix, to be supplied if \code{pred = NULL}.} } \value{ A vector of deviances, one for each column of predictions. } \description{ Compute the deviance (-2 log partial likelihood) for Cox model. } \details{ Computes the deviance for a single set of predictions, or for a matrix of predictions. The user can either supply the predictions directly through the \code{pred} option, or by supplying the \code{x} matrix and \code{beta} coefficients. Uses the Breslow approach to ties. The function first checks if \code{pred} is passed: if so, it is used as the predictions. If \code{pred} is not passed but \code{x} and \code{beta} are passed, then these values are used to compute the predictions. If neither \code{x} nor \code{beta} are passed, then the predictions are all taken to be 0. \code{coxnet.deviance()} is a wrapper: it calls the appropriate internal routine based on whether the response is right-censored data or (start, stop] survival data. } \examples{ set.seed(1) eta <- rnorm(10) time <- runif(10, min = 1, max = 10) d <- ifelse(rnorm(10) > 0, 1, 0) y <- survival::Surv(time, d) coxnet.deviance(pred = eta, y = y) # if pred not provided, it is set to zero vector coxnet.deviance(y = y) # example with x and beta x <- matrix(rnorm(10 * 3), nrow = 10) beta <- matrix(1:3, ncol = 1) coxnet.deviance(y = y, x = x, beta = beta) # example with (start, stop] data y2 <- survival::Surv(time, time + runif(10), d) coxnet.deviance(pred = eta, y = y2) # example with strata y2 <- stratifySurv(y, rep(1:2, length.out = 10)) coxnet.deviance(pred = eta, y = y2) } \seealso{ \code{coxgrad} } \keyword{Cox} \keyword{model} glmnet/man/predict.glmnetfit.Rd0000644000176200001440000000474313775432176016253 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{predict.glmnetfit} \alias{predict.glmnetfit} \title{Get predictions from a \code{glmnetfit} fit object} \usage{ \method{predict}{glmnetfit}( object, newx, s = NULL, type = c("link", "response", "coefficients", "nonzero"), exact = FALSE, newoffset, ... ) } \arguments{ \item{object}{Fitted "glmnetfit" object.} \item{newx}{Matrix of new values for \code{x} at which predictions are to be made. Must be a matrix. This argument is not used for \code{type = c("coefficients","nonzero")}.} \item{s}{Value(s) of the penalty parameter lambda at which predictions are required. Default is the entire sequence used to create the model.} \item{type}{Type of prediction required. Type "link" gives the linear predictors (eta scale); Type "response" gives the fitted values (mu scale). Type "coefficients" computes the coefficients at the requested values for s. Type "nonzero" returns a list of the indices of the nonzero coefficients for each value of s.} \item{exact}{This argument is relevant only when predictions are made at values of \code{s} (lambda) \emph{different} from those used in the fitting of the original model. If \code{exact=FALSE} (default), then the predict function uses linear interpolation to make predictions for values of \code{s} (lambda) that do not coincide with those used in the fitting algorithm. While this is often a good approximation, it can sometimes be a bit coarse. With \code{exact=TRUE}, these different values of \code{s} are merged (and sorted) with \code{object$lambda}, and the model is refit before predictions are made. In this case, it is required to supply the original data x= and y= as additional named arguments to predict() or coef(). The workhorse \code{predict.glmnet()} needs to update the model, and so needs the data used to create it. The same is true of weights, offset, penalty.factor, lower.limits, upper.limits if these were used in the original call. Failure to do so will result in an error.} \item{newoffset}{If an offset is used in the fit, then one must be supplied for making predictions (except for type="coefficients" or type="nonzero").} \item{...}{This is the mechanism for passing arguments like \code{x=} when \code{exact=TRUE}; see \code{exact} argument.} } \value{ The object returned depends on type. } \description{ Gives fitted values, linear predictors, coefficients and number of non-zero coefficients from a fitted \code{glmnetfit} object. } glmnet/man/SparseExample.Rd0000644000176200001440000000107614046314073015361 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{SparseExample} \alias{SparseExample} \title{Synthetic dataset with sparse design matrix} \format{ List containing the following elements: \describe{ \item{x}{100 by 20 matrix of numeric values. x is in sparse matrix format, having class "dgCMatrix".} \item{y}{Numeric vector of length 100.} } } \usage{ data(SparseExample) } \description{ Randomly generated data for Gaussian regression example with the design matrix x being in sparse matrix format. } \keyword{data} glmnet/man/glmnet.fit.Rd0000644000176200001440000001505114211522451014650 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{glmnet.fit} \alias{glmnet.fit} \title{Fit a GLM with elastic net regularization for a single value of lambda} \usage{ glmnet.fit( x, y, weights, lambda, alpha = 1, offset = rep(0, nobs), family = gaussian(), intercept = TRUE, thresh = 1e-10, maxit = 1e+05, penalty.factor = rep(1, nvars), exclude = c(), lower.limits = -Inf, upper.limits = Inf, warm = NULL, from.glmnet.path = FALSE, save.fit = FALSE, trace.it = 0 ) } \arguments{ \item{x}{Input matrix, of dimension \code{nobs x nvars}; each row is an observation vector. If it is a sparse matrix, it is assumed to be unstandardized. It should have attributes \code{xm} and \code{xs}, where \code{xm(j)} and \code{xs(j)} are the centering and scaling factors for variable j respsectively. If it is not a sparse matrix, it is assumed that any standardization needed has already been done.} \item{y}{Quantitative response variable.} \item{weights}{Observation weights. \code{glmnet.fit} does NOT standardize these weights.} \item{lambda}{A single value for the \code{lambda} hyperparameter.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}. The penalty is defined as \deqn{(1-\alpha)/2||\beta||_2^2+\alpha||\beta||_1.} \code{alpha=1} is the lasso penalty, and \code{alpha=0} the ridge penalty.} \item{offset}{A vector of length \code{nobs} that is included in the linear predictor. Useful for the "poisson" family (e.g. log of exposure time), or for refining a model by starting at a current fit. Default is NULL. If supplied, then values must also be supplied to the \code{predict} function.} \item{family}{A description of the error distribution and link function to be used in the model. This is the result of a call to a family function. Default is \code{gaussian()}. (See \code{\link[stats:family]{family}} for details on family functions.)} \item{intercept}{Should intercept be fitted (default=TRUE) or set to zero (FALSE)?} \item{thresh}{Convergence threshold for coordinate descent. Each inner coordinate-descent loop continues until the maximum change in the objective after any coefficient update is less than thresh times the null deviance. Default value is \code{1e-10}.} \item{maxit}{Maximum number of passes over the data; default is \code{10^5}. (If a warm start object is provided, the number of passes the warm start object performed is included.)} \item{penalty.factor}{Separate penalty factors can be applied to each coefficient. This is a number that multiplies \code{lambda} to allow differential shrinkage. Can be 0 for some variables, which implies no shrinkage, and that variable is always included in the model. Default is 1 for all variables (and implicitly infinity for variables listed in exclude). Note: the penalty factors are internally rescaled to sum to \code{nvars}.} \item{exclude}{Indices of variables to be excluded from the model. Default is none. Equivalent to an infinite penalty factor.} \item{lower.limits}{Vector of lower limits for each coefficient; default \code{-Inf}. Each of these must be non-positive. Can be presented as a single value (which will then be replicated), else a vector of length \code{nvars}.} \item{upper.limits}{Vector of upper limits for each coefficient; default \code{Inf}. See \code{lower.limits}.} \item{warm}{Either a \code{glmnetfit} object or a list (with names \code{beta} and \code{a0} containing coefficients and intercept respectively) which can be used as a warm start. Default is \code{NULL}, indicating no warm start. For internal use only.} \item{from.glmnet.path}{Was \code{glmnet.fit()} called from \code{glmnet.path()}? Default is FALSE.This has implications for computation of the penalty factors.} \item{save.fit}{Return the warm start object? Default is FALSE.} \item{trace.it}{Controls how much information is printed to screen. If \code{trace.it=2}, some information about the fitting procedure is printed to the console as the model is being fitted. Default is \code{trace.it=0} (no information printed). (\code{trace.it=1} not used for compatibility with \code{glmnet.path}.)} } \value{ An object with class "glmnetfit" and "glmnet". The list returned contains more keys than that of a "glmnet" object. \item{a0}{Intercept value.} \item{beta}{A \code{nvars x 1} matrix of coefficients, stored in sparse matrix format.} \item{df}{The number of nonzero coefficients.} \item{dim}{Dimension of coefficient matrix.} \item{lambda}{Lambda value used.} \item{dev.ratio}{The fraction of (null) deviance explained. The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Hence dev.ratio=1-dev/nulldev.} \item{nulldev}{Null deviance (per observation). This is defined to be 2*(loglike_sat -loglike(Null)). The null model refers to the intercept model.} \item{npasses}{Total passes over the data.} \item{jerr}{Error flag, for warnings and errors (largely for internal debugging).} \item{offset}{A logical variable indicating whether an offset was included in the model.} \item{call}{The call that produced this object.} \item{nobs}{Number of observations.} \item{warm_fit}{If \code{save.fit=TRUE}, output of C++ routine, used for warm starts. For internal use only.} \item{family}{Family used for the model.} \item{converged}{A logical variable: was the algorithm judged to have converged?} \item{boundary}{A logical variable: is the fitted value on the boundary of the attainable values?} \item{obj_function}{Objective function value at the solution.} } \description{ Fit a generalized linear model via penalized maximum likelihood for a single value of lambda. Can deal with any GLM family. } \details{ WARNING: Users should not call \code{glmnet.fit} directly. Higher-level functions in this package call \code{glmnet.fit} as a subroutine. If a warm start object is provided, some of the other arguments in the function may be overriden. \code{glmnet.fit} solves the elastic net problem for a single, user-specified value of lambda. \code{glmnet.fit} works for any GLM family. It solves the problem using iteratively reweighted least squares (IRLS). For each IRLS iteration, \code{glmnet.fit} makes a quadratic (Newton) approximation of the log-likelihood, then calls \code{elnet.fit} to minimize the resulting approximation. In terms of standardization: \code{glmnet.fit} does not standardize \code{x} and \code{weights}. \code{penalty.factor} is standardized so that they sum up to \code{nvars}. } glmnet/man/QuickStartExample.Rd0000644000176200001440000000073014046314073016212 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{QuickStartExample} \alias{QuickStartExample} \title{Synthetic dataset with Gaussian response} \format{ List containing the following elements: \describe{ \item{x}{100 by 20 matrix of numeric values.} \item{y}{Numeric vector of length 100.} } } \usage{ data(QuickStartExample) } \description{ Randomly generated data for Gaussian regression example. } \keyword{data} glmnet/man/weighted_mean_sd.Rd0000644000176200001440000000120114013330131016047 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{weighted_mean_sd} \alias{weighted_mean_sd} \title{Helper function to compute weighted mean and standard deviation} \usage{ weighted_mean_sd(x, weights = rep(1, nrow(x))) } \arguments{ \item{x}{Observation matrix.} \item{weights}{Optional weight vector.} } \value{ A list with components. \item{mean}{vector of weighted means of columns of x} \item{sd}{vector of weighted standard deviations of columns of x} } \description{ Helper function to compute weighted mean and standard deviation. Deals gracefully whether x is sparse matrix or not. } glmnet/man/MultiGaussianExample.Rd0000644000176200001440000000105514046314073016706 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{MultiGaussianExample} \alias{MultiGaussianExample} \title{Synthetic dataset with multiple Gaussian responses} \format{ List containing the following elements: \describe{ \item{x}{100 by 20 matrix of numeric values.} \item{y}{100 by 4 matrix of numeric values, each column representing one response vector.} } } \usage{ data(MultiGaussianExample) } \description{ Randomly generated data for multi-response Gaussian regression example. } \keyword{data} glmnet/man/dev_function.Rd0000644000176200001440000000106013752553007015271 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{dev_function} \alias{dev_function} \title{Elastic net deviance value} \usage{ dev_function(y, mu, weights, family) } \arguments{ \item{y}{Quantitative response variable.} \item{mu}{Model's predictions for \code{y}.} \item{weights}{Observation weights.} \item{family}{A description of the error distribution and link function to be used in the model. This is the result of a call to a family function.} } \description{ Returns the elastic net deviance value. } glmnet/DESCRIPTION0000644000176200001440000000403414471023421013245 0ustar liggesusersPackage: glmnet Type: Package Title: Lasso and Elastic-Net Regularized Generalized Linear Models Version: 4.1-8 Date: 2023-08-19 Authors@R: c(person("Jerome", "Friedman", role=c("aut")), person("Trevor", "Hastie", role=c("aut", "cre"), email = "hastie@stanford.edu"), person("Rob", "Tibshirani", role=c("aut")), person("Balasubramanian", "Narasimhan", role=c("aut")), person("Kenneth","Tay",role=c("aut")), person("Noah", "Simon", role=c("aut")), person("Junyang", "Qian", role=c("ctb")), person("James", "Yang", role=c("aut"))) Depends: R (>= 3.6.0), Matrix (>= 1.0-6) Imports: methods, utils, foreach, shape, survival, Rcpp Suggests: knitr, lars, testthat, xfun, rmarkdown SystemRequirements: C++17 Description: Extremely efficient procedures for fitting the entire lasso or elastic-net regularization path for linear regression, logistic and multinomial regression models, Poisson regression, Cox model, multiple-response Gaussian, and the grouped multinomial regression; see and . There are two new and important additions. The family argument can be a GLM family object, which opens the door to any programmed family (). This comes with a modest computational cost, so when the built-in families suffice, they should be used instead. The other novelty is the relax option, which refits each of the active sets in the path unpenalized. The algorithm uses cyclical coordinate descent in a path-wise fashion, as described in the papers cited. License: GPL-2 VignetteBuilder: knitr Encoding: UTF-8 URL: https://glmnet.stanford.edu RoxygenNote: 7.2.3 LinkingTo: RcppEigen, Rcpp NeedsCompilation: yes Packaged: 2023-08-20 00:30:42 UTC; hastie Author: Jerome Friedman [aut], Trevor Hastie [aut, cre], Rob Tibshirani [aut], Balasubramanian Narasimhan [aut], Kenneth Tay [aut], Noah Simon [aut], Junyang Qian [ctb], James Yang [aut] Maintainer: Trevor Hastie Repository: CRAN Date/Publication: 2023-08-22 03:10:09 UTC glmnet/build/0000755000176200001440000000000014470257255012652 5ustar liggesusersglmnet/build/vignette.rds0000644000176200001440000000046514470257255015216 0ustar liggesusersRJ0:mAI$_胔=5,&#I.6@{:!$ sܦf"|b #include #include "tools/cpp/runfiles/runfiles.h" // must be quotes! #include "bazel_data_util.hpp" #include namespace glmnetpp { Eigen::MatrixXd load_csv_direct(const std::string& path) { using namespace Eigen; std::ifstream indata; indata.open(path); std::string line; std::vector values; size_t rows = 0; while (std::getline(indata, line)) { std::stringstream lineStream(line); std::string cell; while (std::getline(lineStream, cell, ',')) { values.push_back(std::stod(cell)); } ++rows; } return Map>( values.data(), rows, (rows == 0) ? 0 : values.size()/rows); } // Loads csv by modifying the path to be bazel's path. Eigen::MatrixXd load_csv(const std::string& argv0, const std::string& path) { using bazel::tools::cpp::runfiles::Runfiles; std::string error; std::unique_ptr runfiles(Runfiles::Create(argv0, &error)); // Important: // If this is a test, use Runfiles::CreateForTest(&error). // Otherwise, if you don't have the value for argv[0] for whatever // reason, then use Runfiles::Create(&error). if (runfiles == nullptr) { throw std::runtime_error(error); } std::string new_path = runfiles->Rlocation("__main__/" + path); return load_csv_direct(new_path); } } // namespace ghostbasil glmnet/src/glmnetpp/bug_reports/tools/bazel_data_util.hpp0000644000176200001440000000035514341220705023505 0ustar liggesusers#pragma once #include #include namespace glmnetpp { Eigen::MatrixXd load_csv_direct(const std::string& path); Eigen::MatrixXd load_csv(const std::string& argv0, const std::string& path); } // namespace glmnetpp glmnet/src/glmnetpp/bug_reports/data/0000755000176200001440000000000014341220705017417 5ustar liggesusersglmnet/src/glmnetpp/bug_reports/data/BUILD.bazel0000644000176200001440000000015014341220705021271 0ustar liggesusersfilegroup( name = "data", srcs = glob(["**/*.csv"]), visibility = ["//visibility:public"], )glmnet/src/glmnetpp/bug_reports/data/2022_11_07/0000755000176200001440000000000014470257261020625 5ustar liggesusersglmnet/src/glmnetpp/bug_reports/data/2022_11_07/generate_data.R0000644000176200001440000000051514341220705023522 0ustar liggesusersload(url("https://github.com/DylanDijk/RepoA/blob/main/reprod_features.rda?raw=true")) load(url("https://github.com/DylanDijk/RepoA/blob/main/reprod_response.rda?raw=true")) X = reprod_features y = reprod_response write.table(X, "X.csv", sep=',', row.names=F, col.names=F) write.table(y, "y.csv", sep=',', row.names=F, col.names=F)glmnet/src/glmnetpp/bug_reports/2022_11_07.cpp0000644000176200001440000000262614341220705020414 0ustar liggesusers#include #include "tools/bazel_data_util.hpp" #include int main(int argc, char** argv) { using namespace glmnetpp; std::string data_path = "bug_reports/data/2022_11_07/"; Eigen::MatrixXd X = load_csv(argv[0], data_path + "X.csv"); Eigen::MatrixXd y = load_csv(argv[0], data_path + "y.csv"); size_t n = X.rows(); size_t p = X.cols(); size_t nc = y.cols(); double alpha = 1.0; Eigen::VectorXi jd(1); jd.setZero(); Eigen::MatrixXd offset(n, nc); offset.setZero(); Eigen::VectorXd vp(p); vp.setOnes(); Eigen::MatrixXd cl(2, p); cl.row(0).array() = -9.9e35; cl.row(1).array() = 9.9e35; int ne = 129; int nx = 128; int nlam = 100; double flmin = 1e-4; Eigen::VectorXd ulam(1); ulam[0] = 0; double thr = 1e-7; bool isd = true; bool intr = true; int maxit = 1e5; int kopt = 0; int lmu; Eigen::MatrixXd a0(nc, nlam); a0.setZero(); Eigen::VectorXd ca(nx * nlam * nc); Eigen::VectorXi ia(nx); Eigen::VectorXi nin(nlam); double dev0; Eigen::VectorXd dev(nlam); Eigen::VectorXd alm(nlam); int nlp; int jerr; transl::lognet( alpha, X, y, offset, jd, vp, cl, ne, nx, nlam, flmin, ulam, thr, isd, intr, maxit, kopt, lmu, a0, ca, ia, nin, dev0, dev, alm, nlp, jerr ); return 0; } glmnet/src/glmnetpp/bug_reports/BUILD.bazel0000644000176200001440000000103714341220705020365 0ustar liggesuserscc_library( name = "bug_reports_tools", srcs = glob([ "tools/**/*.hpp", "tools/**/*.cpp", ]), deps = [ "@bazel_tools//tools/cpp/runfiles", "@eigen", ] ) [cc_binary( name = type_, srcs = glob([ "**/*.hpp", "{}.cpp".format(type_), ]), defines = ["EIGEN_INITIALIZE_MATRICES_BY_NAN"], data = ["//bug_reports/data:data"], deps = [ ":bug_reports_tools", "//test:testutil", "//:glmnetpp", ], ) for type_ in [ "2022_11_07", ]]glmnet/src/glmnetpp/CMakeLists.txt0000644000176200001440000000777514171551160016736 0ustar liggesuserscmake_minimum_required(VERSION 3.7) project("glmnetpp" VERSION 4.2.0 DESCRIPTION "A C++ implementation of elastic net solver.") option(GLMNETPP_ENABLE_TEST "Enable unit tests to be built." ON) option(GLMNETPP_ENABLE_BENCHMARK "Enable benchmarks to be built." OFF) option(GLMNETPP_ENABLE_COVERAGE "Build glmnetpp with coverage" OFF) option(GLMNETPP_MOCK_LEGACY "Build glmnetpp with mock versions of the fortran." OFF) # Define a set of strict build flags set(GLMNETPP_STRICT_WARNINGS -g -Wall) if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") set(GLMNETPP_STRICT_WARNINGS ${GLMNETPP_STRICT_WARNINGS} -Wextra -Werror -pedantic) endif() # Stoopid hack on windows if (WIN32) SET(CMAKE_FIND_LIBRARY_PREFIXES "") SET(CMAKE_FIND_LIBRARY_SUFFIXES ".lib" ".dll") endif() # Dependency on Eigen find_package(Eigen3 3.3 CONFIG REQUIRED NO_MODULE) message(STATUS "Eigen3 found at ${EIGEN3_INCLUDE_DIR}") # Set include dirs set(GLMNETPP_INCLUDEDIR "${${PROJECT_NAME}_SOURCE_DIR}/include") set(GLMNETPP_SOURCEDIR "${${PROJECT_NAME}_SOURCE_DIR}/src") # Add this library as shared library. add_library(${PROJECT_NAME} INTERFACE) target_include_directories(${PROJECT_NAME} SYSTEM INTERFACE $ $) # Set C++14 standard for project target target_compile_features(${PROJECT_NAME} INTERFACE cxx_std_14) # Set install destinations install(TARGETS ${PROJECT_NAME} EXPORT ${PROJECT_NAME}_Targets ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) # Create GlmnetppConfigVersion.cmake which contains current project version # This is supposed to help with (major) version compatibility. include(CMakePackageConfigHelpers) write_basic_package_version_file("${PROJECT_NAME}ConfigVersion.cmake" VERSION ${PROJECT_VERSION} COMPATIBILITY SameMajorVersion) configure_package_config_file( "${PROJECT_SOURCE_DIR}/cmake/${PROJECT_NAME}Config.cmake.in" "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" INSTALL_DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) install(EXPORT ${PROJECT_NAME}_Targets FILE ${PROJECT_NAME}Targets.cmake NAMESPACE ${PROJECT_NAME}:: DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) install(DIRECTORY ${PROJECT_SOURCE_DIR}/include DESTINATION ${CMAKE_INSTALL_PREFIX}) # Build source library if (GLMNETPP_ENABLE_TEST OR GLMNETPP_ENABLE_BENCHMARK) if (NOT GLMNETPP_MOCK_LEGACY) # MUST be called at the top-level CMakeLists.txt enable_language(Fortran) endif() endif() add_subdirectory(${PROJECT_SOURCE_DIR}/src) # Automate the choosing of config # if CMAKE_BUILD_TYPE not defined if (NOT CMAKE_BUILD_TYPE) # if binary directory ends with "release", use release mode if (${PROJECT_BINARY_DIR} MATCHES "release$") set(CMAKE_BUILD_TYPE RELEASE) # otherwise, use debug mode else() set(CMAKE_BUILD_TYPE DEBUG) endif() endif() message(STATUS "Compiling in ${CMAKE_BUILD_TYPE} mode") if (GLMNETPP_ENABLE_TEST) # This will perform memcheck include(CTest) enable_testing() # Find googletest find_package(GTest 1.11 CONFIG REQUIRED) # add test subdirectory add_subdirectory(${PROJECT_SOURCE_DIR}/test ${PROJECT_BINARY_DIR}/test) endif() if (GLMNETPP_ENABLE_BENCHMARK) find_package(benchmark REQUIRED HINTS ${CMAKE_CURRENT_SOURCE_DIR}/../../benchmark/build) # add benchmark subdirectory add_subdirectory(${PROJECT_SOURCE_DIR}/benchmark ${PROJECT_BINARY_DIR}/benchmark) endif() glmnet/src/glmnetpp/README.md0000644000176200001440000000251214341220705015432 0ustar liggesusers# glmnetpp C++ Core Engine ## Overview The `glmnetpp` C++ core engine implements the core components of the system. ## Dependencies If you have set up a conda environment following the instructions in the main repo README, you should already have a C++ toolchain installed along with Bazel. If not, we require a C++ toolchain that supports C++-14 and [OpenMP](https://www.openmp.org/) and an installation of [Bazel](https://bazel.build/) Suggested compilers: - [GCC >= 9.3.0](https://gcc.gnu.org/) - [Clang >= 10.0.0](https://clang.llvm.org/) ## Build ``` mamba update -y conda mamba env create conda activate glmnetpp ``` __Note__: On Linux, it's best to specify whether you want to use `clang` or `gcc`. Add the appropriate flag to each `bazel` call below: ``` # For gcc # For clang bazel ... --config=gcc bazel ... --config=clang ``` __To build `glmnetpp`__: ``` bazel build //:glmnetpp ``` Note that `glmnetpp` is a header-only library, so this will simply collect all the headers and register its dependencies. For release mode, add the flag `-c opt` after `build`. For debug mode, add the flag `-c dbg` after `build`. __To run all tests__: ``` bazel test -c dbg //test/... ``` __To run a particular test__: ``` bazel test -c dbg //test:name-of-test ``` __To run the benchmarks__: ``` bazel run -c opt //benchmark:name-of-benchmark ```glmnet/src/glmnetpp/include/0000755000176200001440000000000014171551160015601 5ustar liggesusersglmnet/src/glmnetpp/include/glmnetpp_bits/0000755000176200001440000000000014171551160020450 5ustar liggesusersglmnet/src/glmnetpp/include/glmnetpp_bits/elnet_driver.hpp0000644000176200001440000000041514171551160023643 0ustar liggesusers#pragma once #include #include #include #include #include glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/0000755000176200001440000000000014471023421022765 5ustar liggesusersglmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/sp_gaussian_cov.hpp0000644000176200001440000000123414140040573026661 0ustar liggesusers#pragma once #include namespace glmnetpp { template struct SpElnetPoint< util::glm_type::gaussian, util::mode_type::cov, SpElnetPointInternalPolicy> : ElnetPoint< util::glm_type::gaussian, util::mode_type::cov, SpElnetPointInternalPolicy> { private: using base_t = ElnetPoint< util::glm_type::gaussian, util::mode_type::cov, SpElnetPointInternalPolicy>; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/sp_gaussian_multi.hpp0000644000176200001440000000142414171551160027230 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { template struct SpElnetPoint< util::glm_type::gaussian, util::mode_type::multi, ElnetPointInternalPolicy> : ElnetPoint< util::glm_type::gaussian, util::mode_type::multi, ElnetPointInternalPolicy> { private: using base_t = ElnetPoint< util::glm_type::gaussian, util::mode_type::multi, ElnetPointInternalPolicy>; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/binomial_multi_class.hpp0000644000176200001440000000374214171551160027700 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPoint< util::glm_type::binomial, util::mode_type::multi_class, ElnetPointInternalPolicy> : ElnetPointBinomialBase< ElnetPoint< util::glm_type::binomial, util::mode_type::multi_class, ElnetPointInternalPolicy> > { private: using base_t = ElnetPointBinomialBase< ElnetPoint::multi_class, ElnetPointInternalPolicy> >; using typename base_t::value_t; using typename base_t::index_t; using typename base_t::state_t; using typename base_t::update_t; public: using base_t::base_t; template GLMNETPP_STRONG_INLINE void irls(const PointConfigPack& pack) { this->initialize(pack); while (1) { this->reset_converged(); if (this->has_reached_max_passes()) { throw util::maxit_reached_error(); } try { std::for_each( this->class_begin(), this->class_end(), [&, this](auto ic) { state_t state = this->setup_wls(ic); if (state == state_t::continue_) return; base_t::wls(pack); this->update_irls_class(); }); } // catch so that we can still finish some of the IRLS update catch (const util::max_active_reached_error& e) {} state_t state = this->update_irls(pack); if (state == state_t::break_) break; } } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/binomial_two_class.hpp0000644000176200001440000000171714171551160027357 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPoint< util::glm_type::binomial, util::mode_type::two_class, ElnetPointInternalPolicy> : ElnetPointBinomialBase< ElnetPoint< util::glm_type::binomial, util::mode_type::two_class, ElnetPointInternalPolicy> > { private: using base_t = ElnetPointBinomialBase< ElnetPoint< util::glm_type::binomial, util::mode_type::two_class, ElnetPointInternalPolicy> >; using typename base_t::value_t; using typename base_t::index_t; using typename base_t::state_t; using typename base_t::update_t; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/gaussian_base.hpp0000644000176200001440000000345114171551160026310 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPointGaussianBase : ElnetPointCRTPBase { private: using base_t = ElnetPointCRTPBase; protected: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::update_t; using typename base_t::state_t; using base_t::self; public: using base_t::base_t; template void fit(const PointConfigPack& pack) { this->initialize(pack); if (this->is_warm_ever()) { self().partial_fit(pack); } while (1) { bool converged_kkt = this->initial_fit( [&]() { return base_t::template fit(pack); } ); if (converged_kkt) return; self().partial_fit(pack); } } protected: template GLMNETPP_STRONG_INLINE void partial_fit(const PointPackType& pack) { this->set_warm_ever(); // fit on partial subset while (1) { bool converged = false, _ = false; std::tie(converged, _) = base_t::template fit(pack); if (converged) break; } } template GLMNETPP_STRONG_INLINE state_t update(index_t k, const PointPackType& pack, DiffType&& diff) { state_t state = base_t::template update(k, pack, diff); if (state == state_t::continue_) return state_t::continue_; this->update_rsq(k, diff); return state_t::noop_; } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/gaussian_wls.hpp0000644000176200001440000000335714171551160026210 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { template struct ElnetPoint< util::glm_type::gaussian, util::mode_type::wls, ElnetPointInternalPolicy> : ElnetPointGaussianBase< ElnetPoint< util::glm_type::gaussian, util::mode_type::wls, ElnetPointInternalPolicy> > { private: using base_t = ElnetPointGaussianBase< ElnetPoint::wls, ElnetPointInternalPolicy> >; using typename base_t::update_t; using typename base_t::value_t; using typename base_t::index_t; using typename base_t::state_t; // hide some unnecessary parts of the base class using base_t::fit; // dummy struct to stay consistent with interface. struct PointConfigPack {}; public: using base_t::base_t; template GLMNETPP_STRONG_INLINE void fit(IntType m, IntType& jerr) { try { base_t::fit(PointConfigPack()); } catch (const util::elnet_error& e) { jerr = e.err_code(m-1); // m is 1-indexed } } template GLMNETPP_STRONG_INLINE void update(index_t k, const PointPackType& pack) { value_t beta_diff = 0; auto state = base_t::template update(k, pack, beta_diff); if (state == state_t::continue_) return; this->update_resid(k, beta_diff); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/gaussian_naive.hpp0000644000176200001440000000247014171551160026500 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { template struct ElnetPoint< util::glm_type::gaussian, util::mode_type::naive, ElnetPointInternalPolicy> : ElnetPointGaussianBase< ElnetPoint< util::glm_type::gaussian, util::mode_type::naive, ElnetPointInternalPolicy> > { private: using base_t = ElnetPointGaussianBase< ElnetPoint::naive, ElnetPointInternalPolicy> >; using typename base_t::update_t; using typename base_t::value_t; using typename base_t::index_t; using typename base_t::state_t; public: using base_t::base_t; template GLMNETPP_STRONG_INLINE void update(index_t k, const PointPackType& pack) { value_t beta_diff = 0; auto state = base_t::template update(k, pack, beta_diff); if (state == state_t::continue_) return; this->update_resid(k, beta_diff); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/gaussian_multi.hpp0000644000176200001440000000245514171551160026533 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { template struct ElnetPoint< util::glm_type::gaussian, util::mode_type::multi, ElnetPointInternalPolicy> : ElnetPointGaussianBase< ElnetPoint< util::glm_type::gaussian, util::mode_type::multi, ElnetPointInternalPolicy> > { private: using base_t = ElnetPointGaussianBase< ElnetPoint::multi, ElnetPointInternalPolicy> >; using typename base_t::update_t; using typename base_t::value_t; using typename base_t::index_t; using typename base_t::state_t; public: using base_t::base_t; template GLMNETPP_STRONG_INLINE void update(index_t k, const PointPackType& pack) { auto&& del = this->beta_buffer(); auto state = base_t::template update(k, pack, del); if (state == state_t::continue_) return; this->update_resid(k, del); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/0000755000176200001440000000000014341220705024601 5ustar liggesusersglmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/sp_gaussian_cov.hpp0000644000176200001440000000571714157246224030520 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct SpElnetPointInternal< util::glm_type::gaussian, util::mode_type::cov, ValueType, IndexType, BoolType> : ElnetPointInternalGaussianCovBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianCovBase< ValueType, IndexType, BoolType>; public: using typename base_t::value_t; using typename base_t::index_t; template SpElnetPointInternal( value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, GType& g, const WType& w, const XType& X, const XMType& xm, const XSType& xs, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : base_t(thr, maxit, nx, nlp, ia, g, xv, vp, cl, ju) , w_(w.data(), w.size()) , X_(X.rows(), X.cols(), X.nonZeros(), X.outerIndexPtr(), X.innerIndexPtr(), X.valuePtr(), X.innerNonZeroPtr()) , xm_(xm.data(), xm.size()) , xs_(xs.data(), xs.size()) {} GLMNETPP_STRONG_INLINE void update_active(index_t k) { base_t::update_active(k, [&](index_t j, index_t l) { return compute_sp_cov( X_.col(j), X_.col(k), w_, xm_(j), xm_(k), xs_(j), xs_(k) ); }); } private: /* * Computes weighted covariance between two features. * It is assumed that both features are weighted by w * and that the mean and standard deviation are weighted by w. */ template GLMNETPP_STRONG_INLINE static auto compute_sp_cov( const X1Type& x1, const X2Type& x2, const WType& w, value_t xm1, value_t xm2, value_t xs1, value_t xs2) { auto wx2 = x2.cwiseProduct(w); return (x1.dot(wx2) - xm1 * xm2) / (xs1 * xs2); } using typename base_t::vec_t; using spmat_t = Eigen::SparseMatrix; Eigen::Map w_; // weights for each column of X_ Eigen::Map X_; // data matrix (sparse) Eigen::Map xm_; // col-wise mean Eigen::Map xs_; // col-wise stddev // (may not be actually the stddev of X, but something passed by user) }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/sp_gaussian_multi.hpp0000644000176200001440000000760414171551160031052 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct SpElnetPointInternal< util::glm_type::gaussian, util::mode_type::multi, ValueType, IndexType, BoolType> : ElnetPointInternalGaussianMultiBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianMultiBase< ValueType, IndexType, BoolType>; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType>; public: using typename base_t::value_t; using typename base_t::index_t; template SpElnetPointInternal( value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, value_t ys0, YType& y, const XType& X, const WType& w, const XMType& xm, const XSType& xs, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(thr, maxit, y.cols(), nx, nlp, ia, ys0, xv, vp, cl, ju, int_param) , o_(y.cols()) , X_(X.rows(), X.cols(), X.nonZeros(), X.outerIndexPtr(), X.innerIndexPtr(), X.valuePtr(), X.innerNonZeroPtr()) , y_(y.data(), y.rows(), y.cols()) , w_(w.data(), w.size()) , xm_(xm.data(), xm.size()) , xs_(xs.data(), xm.size()) { o_.setZero(); base_t::construct([&](index_t k, auto& g) { return compute_abs_grad(k, g); }); } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType& pack) { base_t::update_beta(k, pack.l1_regul(), pack.l2_regul(), [&](index_t j, auto& g) { return compute_grad(j, g); }); } template GLMNETPP_STRONG_INLINE void update_resid(index_t k, const DiffType& diff) { for (index_t j = 0; j < y_.cols(); ++j) { gaussian_naive_t::update_resid(y_.col(j), diff(j)/xs_(k), X_.col(k)); } o_ += (xm_(k) / xs_(k)) * diff; } template GLMNETPP_STRONG_INLINE bool check_kkt(const PointPackType& pack) { return base_t::check_kkt(pack.l1_regul(), [&](index_t k, auto& g) { return compute_abs_grad(k, g); }); } private: template GLMNETPP_STRONG_INLINE void compute_grad(index_t k, GType&& g) const { for (index_t j = 0; j < y_.cols(); ++j) { // TODO: seems similar to other sparse ones... g(j) = X_.col(k).cwiseProduct(w_).dot( (y_.col(j).array() + o_(j)).matrix()) / xs_(k); } } template GLMNETPP_STRONG_INLINE value_t compute_abs_grad(index_t k, GType&& g) const { compute_grad(k, g); return g.norm(); } using typename base_t::vec_t; using typename base_t::mat_t; using typename base_t::sp_mat_t; vec_t o_; // factor of w_ that is missing from the true residual Eigen::Map X_; // data matrix Eigen::Map y_; // residual uncentered Eigen::Map w_; // weights for each datapt Eigen::Map xm_; // mean of each column of X Eigen::Map xs_; // stddev of each column of X }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/binomial_multi_class.hpp0000644000176200001440000001127314171551160031512 0ustar liggesusers#pragma once #include #include #include #include #include #include #include #include #include #include namespace glmnetpp { /* * Dense elastic-net point solver for Binomial multi-class method. */ template struct ElnetPointInternal< util::glm_type::binomial, util::mode_type::multi_class, ValueType, IndexType, BoolType> : ElnetPointInternalBinomialMultiClassBase { private: using base_t = ElnetPointInternalBinomialMultiClassBase; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType>; using typename base_t::state_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template ElnetPointInternal( bool isd, bool intr, index_t kopt, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, GType& g, value_t& dev0, const XType& X, const YType& y, const WType& w, const VPType& vp, const CLType& cl, const JUType& ju, ISType& is, const IntParamType& int_param) : base_t(isd, intr, kopt, thr, maxit, nx, nlp, ia, g, dev0, y, w, vp, cl, ju, is, int_param) , X_(X.data(), X.rows(), X.cols()) { base_t::construct( [&](index_t j) { return compute_xv(j, this->weight()); }, [&](index_t ic) { base_t::initialize_resid(ic); }, [&](index_t j) { return compute_grad(j); }); } GLMNETPP_STRONG_INLINE void update_intercept() { base_t::update_intercept(this->resid().sum()); } GLMNETPP_STRONG_INLINE void update_resid(index_t k, value_t beta_diff) { gaussian_naive_t::update_resid( this->resid(), beta_diff, (this->new_weight().array() * X_.col(k).array()).matrix()); } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType& pack) { base_t::update_beta(k, compute_grad(k), pack.l1_regul(), pack.l2_regul()); } GLMNETPP_STRONG_INLINE state_t setup_wls(index_t ic) { state_t state = base_t::setup_wls(ic); if (state != state_t::noop_) return state; if (!this->optimization_type()) { auto& xv_ic = this->curr_x_var(); base_t::for_each_with_skip(this->all_begin(), this->all_end(), [&](index_t j) { xv_ic(j) = this->compute_xv(j, this->new_weight()); }, [&](index_t j) { return this->is_excluded(j); }); } return state_t::noop_; } template GLMNETPP_STRONG_INLINE state_t update_irls(const PointConfigPack& pack) { return base_t::update_irls(pack.elastic_prop(), pack.l1_regul(), [&](index_t l, value_t s, auto& buff) { buff -= s * X_.col(l); }, [&](auto& buff) { buff.array() = buff.array().exp(); }, [&](index_t ic) { base_t::initialize_resid(ic); }, [&](index_t k) { return this->compute_grad(k); }); } GLMNETPP_STRONG_INLINE void update_irls_class() { base_t::has_converged_irls_class(); base_t::update_irls_class( [&](auto& buff) { std::for_each(this->active_begin(), this->active_end(), [&](index_t k) { buff += this->beta(k) * X_.col(k); }); }); } private: using typename base_t::mat_t; template GLMNETPP_STRONG_INLINE value_t compute_xv(index_t j, const WType& w) const { return w.dot(X_.col(j).array().square().matrix()); } GLMNETPP_STRONG_INLINE value_t compute_grad(index_t k) const { return base_t::compute_grad(this->resid(), X_.col(k)); } Eigen::Map X_; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/binomial_two_class.hpp0000644000176200001440000001102214171551160031161 0ustar liggesusers#pragma once #include #include #include #include #include #include #include #include #include #include namespace glmnetpp { /* * Dense elastic-net point solver for Binomial two-class method. */ template struct ElnetPointInternal< util::glm_type::binomial, util::mode_type::two_class, ValueType, IndexType, BoolType> : ElnetPointInternalBinomialTwoClassBase { private: using base_t = ElnetPointInternalBinomialTwoClassBase; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType>; using typename base_t::state_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template ElnetPointInternal( bool isd, bool intr, index_t kopt, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, const GType& g, value_t& dev0, const XType& X, const YType& y, const WType& w, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(isd, intr, kopt, thr, maxit, nx, nlp, ia, g, dev0, y, w, vp, cl, ju, int_param) , X_(X.data(), X.rows(), X.cols()) { this->construct( [&](index_t j) { return compute_xv(X_.col(j), this->weight()); }, [&](index_t j) { return this->compute_grad(j); }); } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType& pack) { auto gk = compute_grad(k); base_t::update_beta(k, gk, pack.l1_regul(), pack.l2_regul()); } GLMNETPP_STRONG_INLINE void update_intercept() { base_t::update_intercept(this->resid().sum()); } GLMNETPP_STRONG_INLINE void update_resid(index_t k, value_t beta_diff) { gaussian_naive_t::update_resid( this->resid(), beta_diff, (this->new_weight().array() * X_.col(k).array()).matrix()); } template GLMNETPP_STRONG_INLINE void setup_wls(const PointPackType&) { base_t::setup_wls(); const auto& ixx = this->strong_map(); auto& xv = this->x_var(); const auto& v = this->new_weight(); if (!this->optimization_type()) { base_t::for_each_with_skip(this->all_begin(), this->all_end(), [&](index_t j) { xv(j) = compute_xv(X_.col(j), v); }, [&](index_t j) { return !ixx[j]; }); } } template GLMNETPP_STRONG_INLINE state_t update_irls(const PointConfigPack& pack) { auto predict_f = [&](index_t i) { auto fi = this->intercept() + this->offset()(i); std::for_each(this->active_begin(), this->active_end(), [&](auto k) { fi += this->beta(k) * X_(i,k); }); return fi; }; state_t state = base_t::update_irls_invariants(predict_f); if (state == state_t::break_) return state_t::break_; auto grad_f = [&](index_t k) { return this->compute_grad(k); }; return base_t::update_irls_strong_set(grad_f, pack.l1_regul()); } private: using typename base_t::mat_t; template GLMNETPP_STRONG_INLINE static value_t compute_xv(const XType& x, const WType& w) { return w.dot(x.array().square().matrix()); } GLMNETPP_STRONG_INLINE value_t compute_grad(index_t j) const { return this->resid().dot(X_.col(j)); } Eigen::Map X_; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/gaussian_base.hpp0000644000176200001440000007520314171551160030130 0ustar liggesusers#pragma once #include #include #include #include namespace glmnetpp { /* * Base class for internal implementation of Gaussian elastic-net point solver. * This contains all the common interface and members across all versions of gaussian: * - dense gaussian cov * - dense gaussian naive * - sparse gaussian cov * - sparse gaussian naive */ template struct ElnetPointInternalGaussianBase : ElnetPointInternalBase { private: using base_t = ElnetPointInternalBase; protected: using typename base_t::vec_t; using typename base_t::ivec_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template ElnetPointInternalGaussianBase( value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju, value_t rsq = 0.0) : base_t(thr, maxit, nx, nlp, ia, vp, cl, ju) , rsq_(rsq) , xv_(xv.data(), xv.size()) {} GLMNETPP_STRONG_INLINE bool is_warm_ever() const { return iz_; } GLMNETPP_STRONG_INLINE void set_warm_ever() { iz_ = true; } GLMNETPP_STRONG_INLINE void update_dlx(index_t k, value_t beta_diff) { base_t::update_dlx(beta_diff, xv_(k)); } GLMNETPP_STRONG_INLINE constexpr void update_intercept() const {} GLMNETPP_STRONG_INLINE auto rsq() const { return rsq_; } GLMNETPP_STRONG_INLINE auto rsq_prev() const { return rsq_prev_; } /* Static interface */ GLMNETPP_STRONG_INLINE static void update_rsq(value_t& rsq, value_t beta_diff, value_t gk, value_t x_var) { rsq += beta_diff * (2.0 * gk - beta_diff * x_var); } protected: using base_t::update_dlx; using base_t::update_intercept; GLMNETPP_STRONG_INLINE auto& rsq() { return rsq_; } GLMNETPP_STRONG_INLINE void initialize() { rsq_prev_ = rsq_; } GLMNETPP_STRONG_INLINE void update_rsq(index_t k, value_t beta_diff, value_t gk) { update_rsq(rsq_, beta_diff, gk, xv_(k)); } GLMNETPP_STRONG_INLINE auto x_var(index_t i) const { return xv_[i]; } private: // internal non-captures bool iz_ = false; // true if a partial fit was done with a previous lambda (warm ever) value_t rsq_; // R^2 value_t rsq_prev_ = 0.0; // previous R^2 // captures Eigen::Map xv_; // variance of columns of x }; /* * Base class for internal implementation of Gaussian univariate-response methods. */ template struct ElnetPointInternalGaussianUniBase : ElnetPointInternalGaussianBase { private: using base_t = ElnetPointInternalGaussianBase; protected: using typename base_t::vec_t; using typename base_t::value_t; using typename base_t::index_t; template ElnetPointInternalGaussianUniBase( value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : base_t(thr, maxit, nx, nlp, ia, xv, vp, cl, ju) , a_(xv.size()) { a_.setZero(); } GLMNETPP_STRONG_INLINE void update_beta(index_t k, value_t ab, value_t dem, value_t gk) { const auto& cl = this->endpts(); base_t::update_beta( a_(k), gk, this->x_var(k), this->penalty()(k), cl(0,k), cl(1,k), ab, dem); } public: GLMNETPP_STRONG_INLINE auto beta(index_t k) const { return a_(k); } private: vec_t a_; // uncompressed beta }; /* * Base class for internal implementation of Gaussian covariance method. * This contains all the common interface and members for gaussian covariance methods: * - dense gaussian cov * - sparse gaussian cov */ template struct ElnetPointInternalGaussianCovBase : ElnetPointInternalGaussianUniBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianUniBase< ValueType, IndexType, BoolType>; protected: using typename base_t::vec_t; using typename base_t::mat_t; public: using typename base_t::value_t; using typename base_t::index_t; template ElnetPointInternalGaussianCovBase( value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, GType& g, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : base_t(thr, maxit, nx, nlp, ia, xv, vp, cl, ju) , da_(g.size()) , g_(g.data(), g.size()) , c_(g.size(), nx) {} GLMNETPP_STRONG_INLINE bool is_excluded(index_t j) const { return !this->exclusion()[j]; } template GLMNETPP_STRONG_INLINE void initialize(const PointPackType&) { base_t::initialize(); } template GLMNETPP_STRONG_INLINE constexpr bool initial_fit(InitialFitIntType f) const { bool converged = false, kkt_passed = false; std::tie(converged, kkt_passed) = f(); return kkt_passed; } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType& pack) { base_t::update_beta(k, pack.ab, pack.dem, g_(k)); } GLMNETPP_STRONG_INLINE void compress_active() { index_t j = 0; std::for_each(this->active_begin(), this->active_end(), [&](auto k) { da_(j++) = this->beta(k); }); } GLMNETPP_STRONG_INLINE void update_compressed_active() { index_t j = 0; std::for_each(this->active_begin(), this->active_end(), [&](auto k) { da_(j++) -= this->beta(k); }); } GLMNETPP_STRONG_INLINE void update_grad_compressed_active() { base_t::for_each_with_skip(this->all_begin(), this->all_end(), [&](auto j) { auto n_act = this->n_active(); g_(j) += da_.head(n_act).dot(c_.row(j).head(n_act)); }, [&](auto j) { return this->is_active(j) || this->is_excluded(j); }); } GLMNETPP_STRONG_INLINE void update_rsq(index_t k, value_t beta_diff) { base_t::update_rsq(k, beta_diff, g_(k)); } GLMNETPP_STRONG_INLINE void update_grad(index_t j, index_t k, value_t beta_diff) { g_(j) -= c_(j, this->active_idx(k)) * beta_diff; } template GLMNETPP_STRONG_INLINE constexpr bool check_kkt(const PointPackType&) const { return true; } protected: template GLMNETPP_STRONG_INLINE void update_active(index_t k, XTXFType xtx_f) { base_t::update_active(k); for (auto it = this->all_begin(); it != this->all_end(); ++it) { auto j = *it; if (this->is_excluded(j)) continue; // Note: j == k case is after the is_active(j) check in legacy code. // Since base_t::update_active adds mm_(k), // it will now be considered active, // so we have to first check this case. if (j == k) { c_(j, this->n_active()-1) = this->x_var(j); continue; } if (this->is_active(j)) { c_(j, this->n_active()-1) = c_(k, this->active_idx(j)); continue; } c_(j, this->n_active()-1) = xtx_f(j, k); } } private: vec_t da_; // compressed beta Eigen::Map g_; // gradient (not absolute gradient) mat_t c_; // X^TX cache but of dimension (nvars, max_nvars) }; /* * Base class for internal implementation of Gaussian naive method. * This contains all the common interface and members for gaussian naive methods: * - dense gaussian naive * - sparse gaussian naive */ template struct ElnetPointInternalGaussianNaiveBase : ElnetPointInternalGaussianUniBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianUniBase< ValueType, IndexType, BoolType>; protected: using typename base_t::vec_t; using typename base_t::mat_t; public: using typename base_t::value_t; using typename base_t::index_t; template ElnetPointInternalGaussianNaiveBase( value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : base_t(thr, maxit, nx, nlp, ia, xv, vp, cl, ju) , g_(ju.size()) , ix_(ju.size(), false) { g_.setZero(); } using base_t::update_intercept; GLMNETPP_STRONG_INLINE bool is_excluded(index_t j) const { return !ix_[j]; } template GLMNETPP_STRONG_INLINE constexpr bool initial_fit(InitialFitIntType f) const { return initial_fit([&]() { return this->has_reached_max_passes(); }, f); } template GLMNETPP_STRONG_INLINE void initialize(const PointPackType& pack) { base_t::initialize(); initialize_strong_set(pack); } GLMNETPP_STRONG_INLINE void update_rsq(index_t k, value_t beta_diff) { base_t::update_rsq(k, beta_diff, gk_cache_); } template GLMNETPP_STRONG_INLINE bool check_kkt(value_t ab, AbsGradFType abs_grad_f) { auto skip_f = [&](auto k) { return !is_excluded(k) || !this->exclusion()[k]; }; update_abs_grad(g_, abs_grad_f, skip_f); return check_kkt(g_, this->penalty(), ix_, ab, skip_f); } const auto& abs_grad() const { return g_; } /* Static interface */ template GLMNETPP_STRONG_INLINE static void update_resid( RType&& r, value_t beta_diff, const XType& x) { r -= beta_diff * x; } template GLMNETPP_STRONG_INLINE static value_t update_intercept( value_t& intercept, RType&& r, value_t& dlx, bool intr, value_t r_sum, value_t var, const VType& v) { auto d = base_t::update_intercept(intercept, dlx, intr, r_sum, var); if (d) update_resid(r, d, v); return d; } template GLMNETPP_STRONG_INLINE constexpr static bool initial_fit( HasReachedMaxPassesType has_reached_max_passes, InitialFitIntType f) { // Keep doing initial fit until either doesn't converge or // converged and kkt passed. while (1) { if (has_reached_max_passes()) { throw util::maxit_reached_error(); } bool converged = false, kkt_passed = false; std::tie(converged, kkt_passed) = f(); if (!converged) break; if (kkt_passed) return true; } return false; } /* * Updates absolute gradient abs_grad by iterating through each element * and assigning compute_grad_f(k). Iteration skips over k whenever skip_f(k) is true. */ template GLMNETPP_STRONG_INLINE static void update_abs_grad( AbsGradType&& abs_grad, ComputeAbsGradFType compute_abs_grad_f, SkipFType skip_f) { base_t::for_each_with_skip( util::counting_iterator(0), util::counting_iterator(abs_grad.size()), [&](index_t j) { abs_grad(j) = compute_abs_grad_f(j); }, skip_f); } /* * Checks KKT condition and computes strong map. See base_t::compute_strong_map; * Returns true if no update occured (KKT all passed). */ template GLMNETPP_STRONG_INLINE static bool check_kkt( AbsGradType&& abs_grad, const PenaltyType& penalty, StrongMapType&& strong_map, value_t l1_regul, SkipFType skip_f) { return !base_t::compute_strong_map(abs_grad, penalty, strong_map, l1_regul, skip_f); } template GLMNETPP_STRONG_INLINE static bool check_kkt( AbsGradType&& abs_grad, const PenaltyType& penalty, StrongMapType&& strong_map, value_t l1_regul, FType f, SkipFType skip_f) { return !base_t::compute_strong_map(abs_grad, penalty, strong_map, l1_regul, f, skip_f); } protected: GLMNETPP_STRONG_INLINE void update_beta(index_t k, value_t ab, value_t dem, value_t gk) { gk_cache_ = gk; base_t::update_beta(k, ab, dem, gk_cache_); } template GLMNETPP_STRONG_INLINE void initialize_strong_set(const PointPackType& pack) { base_t::compute_strong_map( g_, this->penalty(), ix_, pack.elastic_prop(), pack.lmda(), pack.prev_lmda(), [&](auto k) { return !is_excluded(k) || !this->exclusion()[k]; } ); } template GLMNETPP_STRONG_INLINE void construct(AbsGradFType abs_grad_f) { update_abs_grad(g_, abs_grad_f, [&](auto j) { return !this->exclusion()[j]; }); } private: value_t gk_cache_ = 0.0; // caches gradient at k when updating beta vec_t g_; // cwise-absolute gradient std::vector ix_; // strong set indicators }; /* * Base class for internal implementation of multi-response Gaussian elastic-net point solver. * This contains all the common interface and members across all versions of multi-response gaussian: * - dense gaussian multi * - sparse gaussian multi */ template struct ElnetPointInternalGaussianMultiBase : ElnetPointInternalGaussianBase { private: using base_t = ElnetPointInternalGaussianBase; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase; // hide base protected members using base_t::initialize; protected: using typename base_t::vec_t; using typename base_t::mat_t; using typename base_t::ivec_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template ElnetPointInternalGaussianMultiBase( value_t thr, index_t maxit, index_t nr, index_t nx, index_t& nlp, IAType& ia, value_t ys0, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(thr * ys0 / nr, maxit, nx, nlp, ia, xv, vp, cl /* actually won't be used */, ju, ys0) , bnorm_thr_(int_param.bnorm_thr) , bnorm_mxit_(int_param.bnorm_mxit) , ys0_(ys0) , nr_(nr) , a_(nr, xv.size()) , g_curr_(nr) , g_next_(nr) , b_diff_(nr) , g_(xv.size()) , ix_(xv.size(), false) , isc_(nr) , cl_(cl.data(), cl.size()) { a_.setZero(); } GLMNETPP_STRONG_INLINE auto& beta_buffer() { return b_diff_; } GLMNETPP_STRONG_INLINE const auto& abs_grad() const { return g_; } GLMNETPP_STRONG_INLINE bool is_excluded(index_t k) const { return !ix_[k]; } GLMNETPP_STRONG_INLINE auto beta(index_t k) const { return a_.col(k); } GLMNETPP_STRONG_INLINE auto beta(index_t k, index_t l) const { return a_(k, l); } constexpr GLMNETPP_STRONG_INLINE void update_intercept() const {} template GLMNETPP_STRONG_INLINE void update_dlx(index_t k, const DiffType& diff) { base_t::update_dlx(diff, this->x_var(k)); } /* * Slightly different formula for updating residual. * Our rsq starts at ys0_ and decrements down. * Later the rsq that we eventually save will be 1-rsq_/ys0_. */ template void update_rsq(index_t k, const DiffType& diff) { this->rsq() -= (diff.array() * (2.0 * g_curr_ - this->x_var(k) * diff).array()).sum(); } template GLMNETPP_STRONG_INLINE constexpr bool initial_fit(InitialFitIntType f) const { return gaussian_naive_t::initial_fit([&]() { return this->has_reached_max_passes(); }, f); } template GLMNETPP_STRONG_INLINE void initialize(const PointPackType& pack) { base_t::initialize(); base_t::compute_strong_map( g_, this->penalty(), ix_, pack.elastic_prop(), pack.lmda(), pack.prev_lmda(), [&](index_t k) { return !is_excluded(k) || !this->exclusion()[k]; }); } template GLMNETPP_STRONG_INLINE void update_beta( index_t k, value_t l1_regul, value_t l2_regul, GradFType grad_f) { Eigen::Map cl_slice( cl_.data() + k * 2 * nr_, 2, nr_); update_beta(k, a_.col(k), this->x_var(k), this->penalty()(k), g_curr_, g_next_, l1_regul, l2_regul, bnorm_thr_, bnorm_mxit_, isc_, cl_slice, grad_f); } /* Static interface */ template GLMNETPP_STRONG_INLINE static void update_beta( index_t k, AType&& ak, value_t xvk, value_t vpk, GCurrType&& g_curr, GNextType&& g_next, value_t l1_regul, value_t l2_regul, value_t bnorm_thr, index_t bnorm_mxit, ISCType&& isc, const CLType& cl, GradFType grad_f) { grad_f(k, g_curr); g_next = g_curr + xvk * ak; auto gkn = g_next.norm(); auto u = 1.0 - l1_regul * vpk / gkn; if (u <= 0.0) { ak.setZero(); } else { ak = (u/(xvk + l2_regul * vpk)) * g_next; chkbnds(g_next, gkn, xvk, cl, l2_regul*vpk, l1_regul*vpk, ak, isc, bnorm_thr, bnorm_mxit); } } protected: template GLMNETPP_STRONG_INLINE void construct(AbsGradFType abs_grad_f) { gaussian_naive_t::update_abs_grad(g_, [&](index_t j) { return abs_grad_f(j, g_curr_); }, [&](auto j) { return !this->exclusion()[j]; }); } template GLMNETPP_STRONG_INLINE bool check_kkt(value_t ab, AbsGradFType abs_grad_f) { auto skip_f = [&](auto k) { return !is_excluded(k) || !this->exclusion()[k]; }; gaussian_naive_t::update_abs_grad(g_, [&](index_t j) { return abs_grad_f(j, g_curr_); }, skip_f); return gaussian_naive_t::check_kkt(g_, this->penalty(), ix_, ab, skip_f); } private: /* * TODO: Document what this is doing. * This and the next routine are only needed for multi-response stuff. * Maybe move this in a multi-only static interface? */ template GLMNETPP_STRONG_INLINE static void chkbnds( const GKType& gk, value_t gkn, value_t xv, const CLType& cl, value_t al1, value_t al2, AType& a, ISCType&& isc, value_t thr, index_t mxit) { auto al1p = 1.0 + al1/xv; auto al2p = al2/xv; isc.setZero(); auto gsq = gkn*gkn; auto asq = a.squaredNorm(); double usq = 0.0; double u = 0.0; int kn = -1; while (1) { double vmx = 0.0; for (int k = 0; k < a.size(); ++k) { auto v = std::max(a(k)-cl(1,k), cl(0,k)-a(k)); if (v > vmx) { vmx = v; kn = k; } } if (vmx <= 0.0) break; if (isc(kn)) break; gsq -= gk(kn)*gk(kn); // numerical stability: take abs auto g = std::sqrt(std::abs(gsq))/xv; if (a(kn) < cl(0, kn)) u = cl(0, kn); if (a(kn) > cl(1, kn)) u = cl(1, kn); usq += u*u; double b = 0.0; if (usq == 0.0) b = std::max(0., (g-al2p)/al1p); else { // numerical stability: take abs auto b0 = std::sqrt(std::abs(asq - a(kn) * a(kn))); b = bnorm(b0, al1p, al2p, g, usq, thr, mxit); } asq = usq + b*b; if (asq <= 0.0) { a.setZero(); break; } a(kn) = u; isc(kn) = 1; auto f = 1.0/(xv * (al1p+al2p/std::sqrt(asq))); for (int j = 0; j < a.size(); ++j) { if (!isc(j)) a(j) = f * gk(j); } } } /* * TODO: Document what this is doing. */ GLMNETPP_STRONG_INLINE static value_t bnorm( value_t b0, value_t al1p, value_t al2p, value_t g, value_t usq, value_t thr, index_t mxit) { auto b = b0; auto zsq = b*b + usq; if (zsq <= 0.0) { return 0.0; } auto z = std::sqrt(zsq); auto f = b*(al1p+al2p/z)-g; int it = 0; for (; it < mxit; ++it) { b -= f/(al1p+al2p*usq/(z*zsq)); zsq = b*b + usq; if (zsq <= 0.0) { return 0.0; } z = std::sqrt(zsq); f = b*(al1p+al2p/z)-g; if (std::abs(f) <= thr) break; if (b <= 0.0) { b = 0.0; break; } } if (it >= mxit) throw util::bnorm_maxit_reached_error(); return b; } const value_t bnorm_thr_; const index_t bnorm_mxit_; const value_t ys0_; // frobenius norm of y_ after standardizing const index_t nr_; // number of responses mat_t a_; // matrix of coefficients vec_t g_curr_; // g_curr_(k) = y^T x_col_k vec_t g_next_; // g_next_(k) = y^T x_col_k + beta_diff_k * xv(k) vec_t b_diff_; // temporarily stores beta difference during CD vec_t g_; // absolute gradient std::vector ix_; // strong set ivec_t isc_; // buffer for efficiency during chkbnds Eigen::Map cl_; // override base capture of cl // this class specifically expects cl to be 3-d Array, which we capture as a vector. }; /* * Base class for internal implementation of Gaussian general WLS elastic-net point solver. * This contains all the common interface and members across all versions of the general WLS solver: * - dense gaussian wls * - sparse gaussian wls * This class is intended for the general programmable method. */ template struct ElnetPointInternalGaussianWLSBase : ElnetPointInternalBaseViewer { private: using base_t = ElnetPointInternalBaseViewer; using gaussian_t = ElnetPointInternalGaussianBase; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase; protected: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::ivec_t; using typename base_t::vec_t; using typename base_t::mat_t; public: template ElnetPointInternalGaussianWLSBase( value_t alm0, value_t almc, value_t alpha, RType& r, XVType& xv, const VType& v, bool intr, const JUType& ju, const VPType& vp, const CLType& cl, index_t nx, value_t thr, index_t maxit, AType& a, value_t& aint, GType& g, IAType& ia, IYType& iy, index_t& iz, MMType& mm, index_t& nino, value_t& rsqc, index_t& nlp) : base_t(thr, maxit, nino, nx, nlp, ia, vp, cl, ju) , lmda_(almc) , prev_lmda_(alm0) , alpha_(alpha) , l1_regul_(almc * alpha) , l2_regul_(almc * (1.0 - alpha)) , xmz_(v.sum()) , intr_(intr) , iz_(iz) , rsq_(rsqc) , r_(r.data(), r.size()) , xv_(xv.data(), xv.size()) , v_(v.data(), v.size()) , a_(a.data(), a.size()) , a0_(aint) , g_(g.data(), g.size()) , ix_(iy.data(), iy.size()) { base_t::construct(mm); } GLMNETPP_STRONG_INLINE bool is_warm_ever() const { return iz_; } GLMNETPP_STRONG_INLINE void set_warm_ever() const { iz_ = true; } GLMNETPP_STRONG_INLINE bool is_excluded(index_t j) const { return !strong_map()[j]; } GLMNETPP_STRONG_INLINE value_t beta(index_t k) const { return a_(k); } /* * Tries initial fit until either it did not converge or both converged and kkt conditions are all satisfied. * The only difference from naive/multi is that it does not do an initial check of whether max iter has reached. */ template GLMNETPP_STRONG_INLINE constexpr bool initial_fit(InitialFitIntType f) const { return gaussian_naive_t::initial_fit([&]() { return this->has_reached_max_passes(); }, f); } GLMNETPP_STRONG_INLINE void update_dlx(index_t k, value_t diff) { base_t::update_dlx(diff, xv_(k)); } GLMNETPP_STRONG_INLINE void update_rsq(index_t k, value_t diff) { gaussian_t::update_rsq(rsq_, diff, gk_, xv_(k)); } protected: GLMNETPP_STRONG_INLINE auto& resid() { return r_; } GLMNETPP_STRONG_INLINE const auto& resid() const { return r_; } GLMNETPP_STRONG_INLINE const auto& weight() const { return v_; } GLMNETPP_STRONG_INLINE auto new_weight_sum() const { return xmz_; } template GLMNETPP_STRONG_INLINE void initialize(ComputeXVFType compute_xv_f) { base_t::compute_strong_map( abs_grad(), this->penalty(), strong_map(), alpha_, lmda_, prev_lmda_, [&](index_t j) { xv_(j) = compute_xv_f(j); }, [&](index_t j) { return !is_excluded(j) || !this->exclusion()[j]; }); } template GLMNETPP_STRONG_INLINE bool check_kkt(XVFType xv_f, AbsGradFType abs_grad_f) { auto skip_f = [&](index_t k) { return !is_excluded(k) || !this->exclusion()[k]; }; gaussian_naive_t::update_abs_grad(g_, abs_grad_f, skip_f); return gaussian_naive_t::check_kkt(g_, this->penalty(), ix_, l1_regul_, [&](index_t j) { xv_(j) = xv_f(j); }, skip_f); } GLMNETPP_STRONG_INLINE void update_beta(index_t k, value_t gk) { gk_ = gk; const auto& cl = this->endpts(); base_t::update_beta( a_(k), gk_, xv_(k), this->penalty()(k), cl(0,k), cl(1,k), l1_regul_, l2_regul_); } GLMNETPP_STRONG_INLINE auto update_intercept(value_t r_sum) { auto d = gaussian_naive_t::update_intercept( a0_, this->resid(), this->convg_measure(), intr_, r_sum, xmz_, v_); if (d) gaussian_t::update_rsq(rsq_, d, r_sum, xmz_); return d; } template GLMNETPP_STRONG_INLINE void construct(ComputeXVFType compute_xv_f, AbsGradFType abs_grad_f) { gaussian_naive_t::update_abs_grad(g_, abs_grad_f, [&](index_t j) { return !this->exclusion()[j]; }); base_t::for_each_with_skip(this->all_begin(), this->all_end(), [&](index_t j) { xv_(j) = compute_xv_f(j); }, [&](index_t j) { return is_excluded(j); }); } private: GLMNETPP_STRONG_INLINE auto& abs_grad() const { return g_; } GLMNETPP_STRONG_INLINE auto& strong_map() { return ix_; } GLMNETPP_STRONG_INLINE const auto& strong_map() const { return ix_; } value_t gk_ = 0.0; // caches current gradient const value_t lmda_; const value_t prev_lmda_; const value_t alpha_; const value_t l1_regul_; const value_t l2_regul_; const value_t xmz_; const bool intr_; index_t& iz_; value_t& rsq_; Eigen::Map r_; // scaled residual vector Eigen::Map xv_; // weighted variance of x-cols but not computed yet. // Compute on-the-fly as features enter strong set. Eigen::Map v_; // square-root of the weights. Eigen::Map a_; // coefficients value_t& a0_; // intercept Eigen::Map g_; // absolute gradient Eigen::Map ix_; // strong map }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/gaussian_wls.hpp0000644000176200001440000000665114171551160030024 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPointInternal< util::glm_type::gaussian, util::mode_type::wls, ValueType, IndexType, BoolType> : ElnetPointInternalGaussianWLSBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianWLSBase< ValueType, IndexType, BoolType>; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType>; protected: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::mat_t; public: template ElnetPointInternal( value_t alm0, value_t almc, value_t alpha, const XType& x, RType& r, XVType& xv, const VType& v, bool intr, const JUType& ju, const VPType& vp, const CLType& cl, index_t nx, value_t thr, index_t maxit, AType& a, value_t& aint, GType& g, IAType& ia, IYType& iy, index_t& iz, MMType& mm, index_t& nino, value_t& rsqc, index_t& nlp) : base_t(alm0, almc, alpha, r, xv, v, intr, ju, vp, cl, nx, thr, maxit, a, aint, g, ia, iy, iz, mm, nino, rsqc, nlp) , X_(x.data(), x.rows(), x.cols()) { base_t::construct( [&](index_t j) { return compute_xv(j); }, [&](index_t j) { return compute_abs_grad(j); }); } template GLMNETPP_STRONG_INLINE void initialize(const PointPackType&) { base_t::initialize([&](index_t j) { return compute_xv(j); }); } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType&) { base_t::update_beta(k, compute_grad(k)); } GLMNETPP_STRONG_INLINE void update_resid(index_t k, value_t beta_diff) { gaussian_naive_t::update_resid( this->resid(), beta_diff, X_.col(k).cwiseProduct(this->weight())); } GLMNETPP_STRONG_INLINE void update_intercept() { base_t::update_intercept(this->resid().sum()); } template GLMNETPP_STRONG_INLINE bool check_kkt(const PointPackType&) { return base_t::check_kkt( [&](index_t k) { return compute_xv(k); }, [&](index_t k) { return compute_abs_grad(k); }); } private: GLMNETPP_STRONG_INLINE value_t compute_xv(index_t k) const { return X_.col(k).array().square().matrix().dot(this->weight()); } GLMNETPP_STRONG_INLINE value_t compute_grad(index_t k) const { return base_t::compute_grad(this->resid(), X_.col(k)); } GLMNETPP_STRONG_INLINE value_t compute_abs_grad(index_t k) const { return std::abs(compute_grad(k)); } Eigen::Map X_; // data matrix }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/gaussian_naive.hpp0000644000176200001440000000525014171551160030313 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPointInternal< util::glm_type::gaussian, util::mode_type::naive, ValueType, IndexType, BoolType> : ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType>; public: using typename base_t::value_t; using typename base_t::index_t; template ElnetPointInternal(value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, YType& y, const XType& X, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : base_t(thr, maxit, nx, nlp, ia, xv, vp, cl, ju) , X_(X.data(), X.rows(), X.cols()) , y_(y.data(), y.size()) { base_t::construct([this](index_t k) { return compute_abs_grad(k); }); } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType& pack) { base_t::update_beta(k, pack.ab, pack.dem, compute_grad(k)); } GLMNETPP_STRONG_INLINE void update_resid(index_t k, value_t beta_diff) { base_t::update_resid(y_, beta_diff, X_.col(k)); } template GLMNETPP_STRONG_INLINE bool check_kkt(const PointPackType& pack) { return base_t::check_kkt(pack.ab, [this](index_t k) { return compute_abs_grad(k); }); } private: GLMNETPP_STRONG_INLINE value_t compute_grad(index_t k) const { return base_t::compute_grad(y_, X_.col(k)); } GLMNETPP_STRONG_INLINE value_t compute_abs_grad(index_t k) const { return std::abs(compute_grad(k)); } using typename base_t::vec_t; using typename base_t::mat_t; Eigen::Map X_; // data matrix Eigen::Map y_; // scaled residual vector // Note: this is slightly different from sparse version residual vector. // Sparse one will not be scaled by sqrt(weights), but this one will. }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/gaussian_multi.hpp0000644000176200001440000000636214171551160030350 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPointInternal< util::glm_type::gaussian, util::mode_type::multi, ValueType, IndexType, BoolType> : ElnetPointInternalGaussianMultiBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianMultiBase< ValueType, IndexType, BoolType>; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType>; public: using typename base_t::value_t; using typename base_t::index_t; template ElnetPointInternal(value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, value_t ys0, YType& y, const XType& X, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(thr, maxit, y.cols(), nx, nlp, ia, ys0, xv, vp, cl, ju, int_param) , X_(X.data(), X.rows(), X.cols()) , y_(y.data(), y.rows(), y.cols()) { base_t::construct([&](index_t k, auto& g) { return compute_abs_grad(k, g); }); } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType& pack) { base_t::update_beta(k, pack.l1_regul(), pack.l2_regul(), [&](index_t j, auto& g) { return compute_grad(j, g); }); } template GLMNETPP_STRONG_INLINE void update_resid(index_t k, const DiffType& beta_diff) { for (index_t j = 0; j < y_.cols(); ++j) { gaussian_naive_t::update_resid(y_.col(j), beta_diff(j), X_.col(k)); } } template GLMNETPP_STRONG_INLINE bool check_kkt(const PointPackType& pack) { return base_t::check_kkt(pack.l1_regul(), [&](index_t k, auto& g) { return compute_abs_grad(k, g); }); } private: template GLMNETPP_STRONG_INLINE void compute_grad(index_t k, GType&& g) const { g.noalias() = y_.transpose() * X_.col(k); } template GLMNETPP_STRONG_INLINE value_t compute_abs_grad(index_t k, GType&& g) const { compute_grad(k, g); return g.norm(); } using typename base_t::vec_t; using typename base_t::mat_t; Eigen::Map X_; // data matrix Eigen::Map y_; // scaled residual vector // Note: this is slightly different from sparse version residual vector. // Sparse one will not be scaled by sqrt(weights), but this one will }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/binomial_base.hpp0000644000176200001440000014412014341220705030100 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { /* * Base class for internal implementation of Binomial elastic-net point solver. * This contains all the common interface and members across all versions of binomial: * - multi-class * - two-class * - sparse/dense */ template struct ElnetPointInternalBinomialBase : ElnetPointInternalNonLinearBase { private: using base_t = ElnetPointInternalNonLinearBase; protected: using state_t = util::control_flow; using typename base_t::vec_t; using typename base_t::ivec_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template ElnetPointInternalBinomialBase( bool isd, bool intr, index_t kopt, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, index_t no, index_t ni, value_t& dev0, const WType& w, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(thr, maxit, nx, nlp, intr, ia, dev0, vp, cl, ju) , isd_(isd) , kopt_(kopt) , pmin_(int_param.pmin) , vmin_((1.0+int_param.pmin) * int_param.pmin * (1.0-int_param.pmin)) , w_(w.data(), w.size()) {} constexpr GLMNETPP_STRONG_INLINE bool is_total_var_too_small() const { return xmz_ <= vmin_; } GLMNETPP_STRONG_INLINE auto deviance() const { return dev1_; } protected: GLMNETPP_STRONG_INLINE auto optimization_type() const { return kopt_; } GLMNETPP_STRONG_INLINE auto& new_weight_sum() { return xmz_; } GLMNETPP_STRONG_INLINE auto new_weight_sum() const { return xmz_; } GLMNETPP_STRONG_INLINE const auto& weight() const { return w_; } GLMNETPP_STRONG_INLINE auto& deviance() { return dev1_; } GLMNETPP_STRONG_INLINE auto do_standardize() const { return isd_; } GLMNETPP_STRONG_INLINE auto min_prob() const { return pmin_; } private: value_t xmz_ = 0.0; // TODO: technically not needed for multi-group lasso const bool isd_; // TODO: technically not needed for multi-group lasso const index_t kopt_; // TODO: technically not needed for multi-group lasso const value_t pmin_; const value_t vmin_; // TODO: technically not needed for multi-group lasso value_t dev1_ = 0.0; Eigen::Map w_; }; /* * Base class for Binomial all uni-response methods. */ template struct ElnetPointInternalBinomialUniBase : ElnetPointInternalBinomialBase { private: using base_t = ElnetPointInternalBinomialBase; protected: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::vec_t; public: template ElnetPointInternalBinomialUniBase( bool isd, bool intr, index_t kopt, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, index_t no, index_t ni, value_t& dev0, const WType& w, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(isd, intr, kopt, thr, maxit, nx, nlp, ia, no, ni, dev0, w, vp, cl, ju, int_param) , r_(no) , v_(no) {} protected: GLMNETPP_STRONG_INLINE auto& resid() { return r_; } GLMNETPP_STRONG_INLINE const auto& resid() const { return r_; } GLMNETPP_STRONG_INLINE auto& new_weight() { return v_; } GLMNETPP_STRONG_INLINE const auto& new_weight() const { return v_; } private: vec_t r_; vec_t v_; }; /* * Base class for Binomial two-class methods. */ template struct ElnetPointInternalBinomialTwoClassBase : ElnetPointInternalBinomialUniBase { private: using base_t = ElnetPointInternalBinomialUniBase; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase; protected: using typename base_t::state_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template ElnetPointInternalBinomialTwoClassBase( bool isd, bool intr, index_t kopt, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, const GType& g, value_t& dev0, const YType& y, const WType& w, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(isd, intr, kopt, thr, maxit, nx, nlp, ia, y.size(), vp.size(), dev0, w, vp, cl, ju, int_param) , b_(vp.size() + 1) , xv_(vp.size()) , bs_(vp.size() + 1) , q_(y.size()) , fmax_(std::log(1.0 / int_param.pmin - 1.0)) , fmin_(-fmax_) , y_(y.data(), y.size()) , g_(g.data(), g.size()) {} template GLMNETPP_STRONG_INLINE void initialize(const PackType& p) { base_t::compute_strong_map( this->abs_grad(), this->penalty(), this->strong_map(), p.elastic_prop(), p.lmda(), p.prev_lmda(), [&](auto k) { return this->strong_map()[k] || !this->exclusion()[k]; }); } constexpr GLMNETPP_STRONG_INLINE auto class_begin() const { return util::counting_iterator(0); } constexpr GLMNETPP_STRONG_INLINE auto class_end() const { return util::counting_iterator(1); } GLMNETPP_STRONG_INLINE void update_dlx(index_t k, value_t beta_diff) { base_t::update_dlx(beta_diff, xv_(k)); } GLMNETPP_STRONG_INLINE value_t beta(index_t k) const { return b_(k+1); } GLMNETPP_STRONG_INLINE value_t intercept() const { return b_(0); } GLMNETPP_STRONG_INLINE const auto& q() const { return q_; } protected: using typename base_t::vec_t; using typename base_t::ivec_t; using typename base_t::mat_t; GLMNETPP_STRONG_INLINE auto& x_var() { return xv_; } GLMNETPP_STRONG_INLINE const auto& offset() const { return g_; } GLMNETPP_STRONG_INLINE value_t& beta(index_t k) { return b_(k+1); } GLMNETPP_STRONG_INLINE value_t& intercept() { return b_(0); } GLMNETPP_STRONG_INLINE void update_beta(index_t k, value_t gk, value_t l1_regul, value_t l2_regul) { const auto& cl = this->endpts(); base_t::update_beta( beta(k), gk, xv_(k), this->penalty()(k), cl(0,k), cl(1,k), l1_regul, l2_regul); } GLMNETPP_STRONG_INLINE auto update_intercept(value_t r_sum) { return gaussian_naive_t::update_intercept( intercept(), this->resid(), this->convg_measure(), this->has_intercept(), r_sum, this->new_weight_sum(), this->new_weight()); } GLMNETPP_STRONG_INLINE void setup_wls() { bs_(0) = b_(0); std::for_each(this->active_begin(), this->active_end(), [&](auto k) { bs_(k+1) = b_(k+1); }); } template GLMNETPP_STRONG_INLINE state_t update_irls_invariants(PredictFType predict_f) { auto& v = this->new_weight(); auto& r = this->resid(); const auto& w = this->weight(); // computes the linear prediction fi at x_i // and computes the corresponding probability prediction. // To make sure coefficients don't wander off, we set the probability to 0 or 1 // depending on how negative/positive fi is. for (index_t i = 0; i < q_.size(); ++i) { auto fi = predict_f(i); if (fi < fmin_) { q_(i) = 0.0; } else if (fi > fmax_) { q_(i) = 1.0; } else { q_(i) = 1.0/(1.0 + std::exp(-fi)); } } v.array() = w.array() * q_.array() * (1.0 - q_.array()); this->new_weight_sum() = v.sum(); if (this->is_total_var_too_small()) return state_t::break_; r.array() = w.array() * (y_-q_).array(); return state_t::noop_; } template GLMNETPP_STRONG_INLINE state_t update_irls_strong_set( GradFType grad_f, value_t l1_regul) { value_t diff0 = b_(0) - bs_(0); bool ix = base_t::has_converged_irls( this->new_weight_sum() * diff0 * diff0, [&](index_t k) { auto d = b_(k+1)-bs_(k+1); return xv_(k)*d*d; }); if (ix) { auto skip_f = [&](auto k) { return !this->is_excluded(k) || !this->exclusion()[k]; }; gaussian_naive_t::update_abs_grad( this->abs_grad(), [&](index_t k) { return std::abs(grad_f(k)); }, skip_f); bool kkt_passed = gaussian_naive_t::check_kkt( this->abs_grad(), this->penalty(), this->strong_map(), l1_regul, skip_f); if (kkt_passed) return state_t::break_; } return state_t::noop_; } // TODO: can this be generalized with the multi-class version? template void construct(XVFType xv_f, GradFType grad_f) { b_.setZero(); bs_.setZero(); auto& v = this->new_weight(); auto& r = this->resid(); const auto& w = this->weight(); auto& ga = this->abs_grad(); const auto& ju = this->exclusion(); auto& xmz = this->new_weight_sum(); auto& dev = this->deviance(); auto& dev0 = this->null_deviance(); auto q0 = w.dot(y_); if (q0 <= this->min_prob()) { throw util::prob_min_reached_error(0); } if (q0 >= 1.-this->min_prob()) { throw util::prob_max_reached_error(0); } if (!this->has_intercept()) q0 = 0.5; auto bz = 0.0; if (this->has_intercept()) bz = std::log(q0/(1.0-q0)); dev = 0.0; xmz = 0.0; if ((g_.array() == 0).all()) { auto vi = q0*(1.-q0); b_(0) = bz; v = vi * w; r.array() = w.array() * (y_.array()-q0); q_.array() = q0; xmz = vi; dev = -(bz*q0 + std::log(1.0-q0)); } else { b_(0) = 0.0; if (this->has_intercept()) { b_(0) = azero(y_,g_,w); } q_.array() = 1.0 / (1.0 + (-b_(0)-g_.array()).exp()); v.array() = w.array() * q_.array() * (1.0-q_.array()); r.array() = w.array() * (y_-q_).array(); xmz = v.sum(); dev = -(b_(0)*q0 + w.dot( (y_.array()*g_.array() + (1.0-q_.array()).log()).matrix() )); } // if we approximate the Hessian with an upper bound for approximate Newton algorithms if (this->optimization_type() > 0) { if (this->do_standardize() && this->has_intercept()) { xv_.array() = 0.25; } else { for (index_t j = 0; j < xv_.size(); ++j) { if (ju[j]) { xv_(j) = 0.25 * xv_f(j); } } } } dev0 = dev; for (index_t i = 0; i < y_.size(); ++i) { if (y_(i) > 0.0) dev0 += w(i)*y_(i)*std::log(y_(i)); if (y_(i) < 1.0) dev0 += w(i)*(1.0-y_(i))*std::log(1.0-y_(i)); } this->set_thresh(this->thresh() * dev0); for (index_t j = 0; j < ga.size(); ++j) { if (!ju[j]) continue; ga(j) = std::abs(grad_f(j)); } } private: template GLMNETPP_STRONG_INLINE static auto azero( const YType& y, const GType& g, const QType& q ) { auto n = y.size(); Eigen::VectorXd e(n); Eigen::VectorXd p(n); Eigen::VectorXd w(n); auto az = 0.0; e.array() = (-g).array().exp(); auto qy = q.dot(y); p.array() = 1./(1. + e.array()); while (1) { w.array() = q.array() * p.array() * (1.0 - p.array()); auto d = (qy - q.dot(p)) / w.sum(); az += d; if (std::abs(d) < 1e-7) break; auto ea0 = std::exp(-az); p.array() = 1./(1. + ea0 * e.array()); } return az; } vec_t b_; // coefficients + intercept at index 0 vec_t xv_; // new weighted variance of columns of x vec_t bs_; // old coefficients + intercept at index 0 vec_t q_; // probability predictions const value_t fmax_; // max linear prediction const value_t fmin_; // min linear prediction Eigen::Map y_; // original y response Eigen::Map g_; // offsets }; // ======================================================================== // Dense Multi-class Base classes // ======================================================================== /* * Base class for Binomial all multi-response methods. */ template struct ElnetPointInternalBinomialMultiBase : ElnetPointInternalBinomialBase { private: using base_t = ElnetPointInternalBinomialBase; protected: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::state_t; using typename base_t::mat_t; using typename base_t::vec_t; public: template ElnetPointInternalBinomialMultiBase( bool isd, bool intr, index_t kopt, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, GType& g, value_t& dev0, const YType& y, const WType& w, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(isd, intr, kopt, thr, maxit, nx, nlp, ia, y.rows(), vp.size(), dev0, w, vp, cl, ju, int_param) , nc_(y.cols()) , exmx_(int_param.exmx) , exmn_(-exmx_) , emin_(int_param.pmin / (1.0 - int_param.pmin)) , emax_(1.0 / emin_) , b_(vp.size() + 1, y.cols()) , bs_(vp.size() + 1, y.cols()) , q_(y.rows(), y.cols()) , sxp_(y.rows()) , y_(y.data(), y.rows(), y.cols()) , g_(g.data(), g.rows(), g.cols()) { b_.setZero(); bs_.setZero(); sxp_.setZero(); } GLMNETPP_STRONG_INLINE const auto& q() const { return q_; } GLMNETPP_STRONG_INLINE const auto& sxp() const { return sxp_; } GLMNETPP_STRONG_INLINE auto class_begin() const { return util::counting_iterator(0); } GLMNETPP_STRONG_INLINE auto class_end() const { return util::counting_iterator(nc_); } GLMNETPP_STRONG_INLINE bool is_excluded(index_t k) const { return !this->strong_map()[k]; } GLMNETPP_STRONG_INLINE auto n_classes() const { return nc_; } GLMNETPP_STRONG_INLINE value_t beta(index_t k, index_t ic) const { return b_(k+1, ic); } GLMNETPP_STRONG_INLINE value_t intercept(index_t ic) const { return b_(0, ic); } protected: GLMNETPP_STRONG_INLINE auto& q() { return q_; } GLMNETPP_STRONG_INLINE value_t log_mean_pred_max() const { return exmx_; } GLMNETPP_STRONG_INLINE value_t log_mean_pred_min() const { return exmn_; } GLMNETPP_STRONG_INLINE value_t mean_min() const { return emin_; } GLMNETPP_STRONG_INLINE value_t mean_max() const { return emax_; } GLMNETPP_STRONG_INLINE const auto& y() const { return y_; } GLMNETPP_STRONG_INLINE auto& sxp() { return sxp_; } GLMNETPP_STRONG_INLINE const auto& offset() const { return g_; } GLMNETPP_STRONG_INLINE auto& beta() { return b_; } GLMNETPP_STRONG_INLINE const auto& beta() const { return b_; } GLMNETPP_STRONG_INLINE auto& old_beta() { return bs_; } GLMNETPP_STRONG_INLINE const auto& old_beta() const { return bs_; } // Helper routine to initialize residual. // This is specific to multi-class binomial. template GLMNETPP_STRONG_INLINE void initialize_resid( RType&& r, const Eigen::MatrixBase& y, const Eigen::MatrixBase& v) { r = this->weight().cwiseProduct(y - v); } template GLMNETPP_STRONG_INLINE void initialize_resid( RType&& r, const Eigen::MatrixBase& y, const Eigen::MatrixBase& v, value_t scale) { r = this->weight().cwiseProduct(y - v) / scale; } template GLMNETPP_STRONG_INLINE void initialize_resid( index_t ic, RType&& r) { initialize_resid(r, y_.col(ic), q_.col(ic).cwiseQuotient(sxp_)); } template GLMNETPP_STRONG_INLINE void initialize_resid( index_t ic, RType&& r, value_t scale) { initialize_resid(r, y_.col(ic), q_.col(ic).cwiseQuotient(sxp_), scale); } template GLMNETPP_STRONG_INLINE void initialize(const PackType& p) { base_t::compute_strong_map( this->abs_grad(), this->penalty(), this->strong_map(), p.elastic_prop(), p.lmda(), p.prev_lmda(), [&](auto k) { return !this->is_excluded(k) || !this->exclusion()[k]; }); } // TODO: this is probably generalizable with two-class. GLMNETPP_STRONG_INLINE void construct() { auto no = y_.rows(); auto nc = y_.cols(); auto& dev0 = this->null_deviance(); auto& dev = this->deviance(); const auto& w = this->weight(); dev0 = 0.0; for (index_t ic = 0; ic < nc; ++ic) { auto q0 = w.dot(y_.col(ic)); if (q0 <= this->min_prob()) { throw util::prob_min_reached_error(ic); } if (q0 >= 1.0 - this->min_prob()) { throw util::prob_max_reached_error(ic); } if (!this->has_intercept()) { q0 = 1.0 / nc; b_(0, ic) = 0.0; } else { b_(0, ic) = std::log(q0); dev -= q0 * b_(0, ic); } } if (!this->has_intercept()) dev = std::log(nc); if ((g_.array() == 0).all()) { b_.row(0).array() -= b_.row(0).sum() / nc; for (index_t ic = 0; ic < nc; ++ic) { q_.col(ic).array() = std::exp(b_(0, ic)); sxp_ += q_.col(ic); } } else { for (index_t i = 0; i < no; ++i) { g_.row(i).array() -= g_.row(i).sum() / nc; } if (this->has_intercept()) kazero(b_.row(0)); dev = 0.0; for (index_t ic = 0; ic < nc; ++ic) { q_.col(ic).array() = b_(0,ic) + g_.col(ic).array(); dev -= w.dot( (y_.col(ic).array() * q_.col(ic).array()).matrix() ); q_.col(ic).array() = q_.col(ic).array().exp(); sxp_ += q_.col(ic); } vec_t sxpl = (w.array() * sxp_.array().log()).matrix(); for (index_t ic = 0; ic < nc; ++ic) { dev += y_.col(ic).dot(sxpl); } } for (index_t ic = 0; ic < nc; ++ic) { for (index_t i = 0; i < no; ++i) { if (y_(i,ic) > 0) dev0 += w(i) * y_(i,ic) * std::log(y_(i,ic)); } } dev0 += dev; this->set_thresh(this->thresh() * dev0); } GLMNETPP_STRONG_INLINE bool update_strong_map(value_t l1_regul) { return base_t::compute_strong_map( this->abs_grad(), this->penalty(), this->strong_map(), l1_regul, [&](auto k) { return this->strong_map()[k] || !this->exclusion()[k]; }); } template GLMNETPP_STRONG_INLINE void update_irls_class( PredBuffType&& pred_buff, value_t intr, const OffsetType& offset, QType&& q, UpdatePredictionFType update_prediction_f) { pred_buff.array() = intr + offset.array(); update_prediction_f(pred_buff); pred_buff.array() = pred_buff.array().max(this->log_mean_pred_min()).min(this->log_mean_pred_max()); this->sxp() -= q; q.array() = (this->mean_min() * this->sxp().array()).max( pred_buff.array().exp()).min( this->mean_max() * this->sxp().array()); this->sxp() += q; } private: template GLMNETPP_STRONG_INLINE auto kazero(AZType&& az) { const auto& w = this->weight(); az.setZero(); mat_t e = this->offset().array().exp().matrix(); vec_t s = e.rowwise().sum(); double dm; auto n = this->y().rows(); auto kk = this->y().cols(); do { dm = 0.0; for (index_t k = 0; k < kk; ++k) { auto t = 0.0; auto u = 0.0; for (index_t i = 0; i < n; ++i) { auto pik = e(i,k)/s(i); t += w(i) * (y_(i,k) - pik); u += w(i) * pik * (1.0-pik); } auto d = t/u; az(k) += d; auto ed = std::exp(d); dm = std::max(dm, std::abs(d)); for (index_t i = 0; i < n; ++i) { auto z = e(i,k); e(i,k) = z * ed; s(i) += -z + e(i,k); } } } while(dm >= 1e-7); az.array() -= az.sum() / kk; } const index_t nc_; // number of classes const value_t exmx_; // max linear prediction const value_t exmn_; // min linear prediction const value_t emin_; // min probability prediction const value_t emax_; // max probability prediction mat_t b_; // matrix of coefficients with intercepts at row 0 mat_t bs_; // matrix of old coefficients with intercepts at row 0 mat_t q_; // matrix of probability predictions vec_t sxp_; // sum of exponential terms to normalize the probabilities Eigen::Map y_; // original y response Eigen::Map g_; // offsets }; /* * Base class for Binomial multi-class (non-group lasso) methods. */ template struct ElnetPointInternalBinomialMultiClassBase : ElnetPointInternalBinomialMultiBase { private: using base_t = ElnetPointInternalBinomialMultiBase; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase; protected: using typename base_t::state_t; using typename base_t::vec_t; using typename base_t::ivec_t; using typename base_t::mat_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template ElnetPointInternalBinomialMultiClassBase( bool isd, bool intr, index_t kopt, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, GType& g, value_t& dev0, const YType& y, const WType& w, const VPType& vp, const CLType& cl, const JUType& ju, ISType& is, const IntParamType& int_param) : base_t(isd, intr, kopt, thr, maxit, nx, nlp, ia, g, dev0, y, w, vp, cl, ju, int_param) , xv_(vp.size(), y.cols()) , di_(y.rows()) , r_(y.rows()) , v_(y.rows()) , pfm_((1.0 + int_param.pmin) * int_param.pmin) , pfx_((1.0 - int_param.pmin) * (1.0 - int_param.pmin)) , is_(is.data(), is.size()) , bs_ic_(nullptr, 0) , b_ic_(nullptr, 0) , q_ic_(nullptr, 0) , y_ic_(nullptr, 0) , xv_ic_(nullptr, 0) , g_ic_(nullptr, 0) {} using base_t::beta; template GLMNETPP_STRONG_INLINE void initialize(const PackType& p) { base_t::initialize(p); ig_ = false; } GLMNETPP_STRONG_INLINE bool has_skipped_all_classes() const { return !ig_; } GLMNETPP_STRONG_INLINE void reset_converged() { ix_ = false; } GLMNETPP_STRONG_INLINE void update_dlx(index_t k, value_t beta_diff) { base_t::update_dlx(beta_diff, xv_ic_(k)); } GLMNETPP_STRONG_INLINE value_t& beta(index_t k) { return b_ic_(k+1); } GLMNETPP_STRONG_INLINE value_t beta(index_t k) const { return b_ic_(k+1); } protected: using base_t::initialize_resid; GLMNETPP_STRONG_INLINE auto& resid() { return r_; } GLMNETPP_STRONG_INLINE const auto& resid() const { return r_; } GLMNETPP_STRONG_INLINE auto& curr_x_var() { return xv_ic_; } GLMNETPP_STRONG_INLINE auto curr_intercept() const { return b_ic_(0); } GLMNETPP_STRONG_INLINE auto& curr_intercept() { return b_ic_(0); } GLMNETPP_STRONG_INLINE auto& curr_q() { return q_ic_; } GLMNETPP_STRONG_INLINE auto& curr_offset() { return g_ic_; } GLMNETPP_STRONG_INLINE auto& prediction_buffer() { return di_; } GLMNETPP_STRONG_INLINE auto& new_weight() { return v_; } GLMNETPP_STRONG_INLINE const auto& new_weight() const { return v_; } GLMNETPP_STRONG_INLINE void initialize_resid(index_t ic) { base_t::initialize_resid(ic, this->resid()); } GLMNETPP_STRONG_INLINE auto update_intercept(value_t r_sum) { return gaussian_naive_t::update_intercept( curr_intercept(), this->resid(), this->convg_measure(), this->has_intercept(), r_sum, this->new_weight_sum(), this->new_weight()); } GLMNETPP_STRONG_INLINE void update_beta(index_t k, value_t gk, value_t l1_regul, value_t l2_regul) { const auto& cl = this->endpts(); base_t::update_beta( beta(k), gk, xv_ic_(k), this->penalty()(k), cl(0,k), cl(1,k), l1_regul, l2_regul); } GLMNETPP_STRONG_INLINE state_t setup_wls(size_t ic) { const auto& g = this->offset(); const auto& y = this->y(); auto& q = this->q(); auto& b = this->beta(); auto& bs = this->old_beta(); const auto& sxp = this->sxp(); // set the viewers to current class new (&bs_ic_) Eigen::Map (bs.col(ic).data(), bs.rows()); new (&b_ic_ ) Eigen::Map (b.col(ic).data(), b.rows()); new (&q_ic_ ) Eigen::Map (q.col(ic).data(), q.rows()); new (&y_ic_ ) Eigen::Map(y.col(ic).data(), y.rows()); new (&xv_ic_) Eigen::Map (xv_.col(ic).data(), xv_.rows()); new (&g_ic_ ) Eigen::Map(g.col(ic).data(), g.rows()); auto& v = this->new_weight(); auto& xmz = this->new_weight_sum(); const auto& w = this->weight(); // do some setup bs_ic_(0) = b_ic_(0); std::for_each(this->active_begin(), this->active_end(), [&](auto i) { bs_ic_(i+1) = b_ic_(i+1); }); xmz = 0.0; for (index_t i = 0; i < y_ic_.size(); ++i) { auto pic = q_ic_(i) / sxp(i); if (pic < pfm_) { pic = 0.0; v(i) = 0.0; } else if (pic > pfx_) { pic = 1.0; v(i) = 0.0; } else { v(i) = w(i) * pic * (1.0 - pic); xmz += v(i); } this->resid()(i) = w(i) * (y_ic_(i) - pic); } if (this->is_total_var_too_small()) return state_t::continue_; ig_ = true; return state_t::noop_; } template GLMNETPP_STRONG_INLINE state_t update_irls( value_t elastic_prop, value_t l1_regul, UpdateYPredFType update_y_pred_f, UpdatePPredFType update_p_pred_f, InitResidFType init_resid_f, ComputeGradFType compute_grad_f) { auto beta = elastic_prop; auto ab = l1_regul; auto& y = this->y(); auto& b = this->beta(); auto& sxp = this->sxp(); auto& q = this->q(); auto nc = y.cols(); auto s = -b.row(0).sum() / nc; b.row(0).array() += s; di_.array() = s; // Bug fix: necessary to subtract since otherwise the next loop does 1 too many iterations. // Original Fortran code has a memory issue here. // TODO: honestly, this part of the code should not be executed when max active is reached. // It should just throw to the top-level caller (elnet_path) and handle there. auto begin = this->active_begin(); auto end = this->active_end(); if (this->has_reached_max_active()) --end; std::for_each(begin, end, [&](auto l) { if (this->penalty()(l) <= 0) { s = b.row(l+1).sum()/nc; } else { s = elc(beta, this->endpts().col(l), b.row(l+1)); } b.row(l+1).array() -= s; update_y_pred_f(l, s, di_); }); update_p_pred_f(di_); sxp.array() *= di_.array(); std::for_each(this->class_begin(), this->class_end(), [&](index_t ic) { q.col(ic).array() *= di_.array(); }); if (this->has_reached_max_active()) throw util::max_active_reached_error(); if (!ig_) return state_t::break_; if (!has_some_class_not_converged()) { update_abs_grad(init_resid_f, compute_grad_f); ix_ = base_t::update_strong_map(ab); if (!has_some_class_not_converged()) return state_t::break_; } return state_t::noop_; } template GLMNETPP_STRONG_INLINE void update_irls_class( UpdatePredictionFType update_prediction_f) { base_t::update_irls_class( di_, this->curr_intercept(), this->curr_offset(), this->curr_q(), update_prediction_f); } GLMNETPP_STRONG_INLINE void has_converged_irls_class() { // only check for convergence of current class if all previous classes seemed to have converged. if (!has_some_class_not_converged()) { auto d = bs_ic_(0) - b_ic_(0); ix_ = !base_t::has_converged_irls( this->new_weight_sum() * d * d, [&](index_t k) { auto d = b_ic_(k+1) - bs_ic_(k+1); return xv_ic_(k) * d * d; }); } } template void construct( XVFType xv_f, InitResidFType init_resid_f, ComputeGradFType compute_grad_f) { base_t::construct(); if (this->optimization_type() > 0) { if (this->do_standardize() && this->has_intercept()) { xv_.array() = 0.25; } else { base_t::for_each_with_skip(this->all_begin(), this->all_end(), [&](index_t j) { xv_.row(j).array() = 0.25 * xv_f(j); }, [&](index_t j) { return !this->exclusion()[j]; }); } } update_abs_grad(init_resid_f, compute_grad_f); } private: GLMNETPP_STRONG_INLINE bool has_some_class_not_converged() const { return ix_; } template GLMNETPP_STRONG_INLINE void update_abs_grad( InitResidFType init_resid_f, ComputeGradFType compute_grad_f) { auto& ga = this->abs_grad(); auto skip_f = [&](auto k) { return this->strong_map()[k] || !this->exclusion()[k]; }; base_t::for_each_with_skip(this->all_begin(), this->all_end(), [&](auto k) { ga(k) = 0.; }, skip_f); std::for_each(this->class_begin(), this->class_end(), [&](auto ic) { init_resid_f(ic); base_t::for_each_with_skip(this->all_begin(), this->all_end(), [&](index_t k) { ga(k) = std::max(ga(k), std::abs(compute_grad_f(k))); }, skip_f); }); } template GLMNETPP_STRONG_INLINE auto elc(value_t parm, const CLType& cl, const AType& a) { auto n = a.size(); auto am = a.sum()/n; auto out = am; if (parm && (n != 2)) { // Note: it is VERY important that we take head(n). // Otherwise, is_ is reshaped to size n, which is undefined behavior for the caller. is_.head(n) = Eigen::VectorXi::LinSpaced(n, 0, n-1); std::sort(is_.data(), is_.data() + n, [&](size_t i, size_t j) { return a[i] < a[j]; }); if (a(is_(0)) == a(is_(n-1))) { out = a(0); } else { double ad = 0.0; if (n % 2 == 1) { ad = a(is_(n/2)); } else { ad = 0.5*(a(is_(n/2))+a(is_(n/2-1))); } if (parm == 1.0) { out = ad; } else { auto b1 = std::min(am, ad); auto b2 = std::max(am, ad); auto k2=1; while (a(is_(k2-1)) <= b1) { ++k2; } auto k1 = k2-1; while (a(is_(k2-1)) < b2) { ++k2; } auto r = parm/((1.0-parm)*n); auto is = 0; auto sm = n-2*(k1-1); auto s = 0.0; for (int k = k1; k < k2; ++k) { sm -= 2; s = r * sm + am; if (s > a(is_(k-1)) && s <= a(is_(k))) { is = k; break; } } if (is) { out = s; } else { auto r2 = 2.0 * r; auto s1 = a(is_(k1-1)); auto am2 = 2.0 * am; auto cri = r2 * (a.array()-s1).abs().sum() + s1*(s1-am2); out = s1; for (int k = k1+1; k < k2+1; ++k) { s = a(is_(k-1)); if (s == s1) continue; auto c = r2 * (a.array()-s).abs().sum() + s*(s-am2); if (c < cri) { cri = c; out = s; } s1 = s; } } } } } out = std::max((a.array()-cl(1)).maxCoeff(), std::min((a.array()-cl(0)).minCoeff(), out) ); return out; } mat_t xv_; // matrix of weighted variance of columns of x (note: each class has different weights) vec_t di_; // buffer of length(n) used for both linear predictions and probability predictions vec_t r_; // residual vec_t v_; // new weights const value_t pfm_; // min probability predictions const value_t pfx_; // max probability predictions bool ig_ = false; // true if all classes were skipped in WLS bool ix_ = false; // true if some class did not converge Eigen::Map is_; // a buffer used only for elc // current views of column for given class ic (see set_class()) Eigen::Map bs_ic_; Eigen::Map b_ic_; Eigen::Map q_ic_; Eigen::Map y_ic_; Eigen::Map xv_ic_; Eigen::Map g_ic_; }; /* * Base class for Binomial multi-class group lasso methods. */ template struct ElnetPointInternalBinomialMultiClassGroupBase : ElnetPointInternalBinomialMultiBase { private: using base_t = ElnetPointInternalBinomialMultiBase; using gaussian_multi_t = ElnetPointInternalGaussianMultiBase; protected: using typename base_t::state_t; using typename base_t::mat_t; using typename base_t::vec_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template ElnetPointInternalBinomialMultiClassGroupBase( bool intr, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, GType& g, value_t& dev0, const YType& y, const WType& w, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(true /* isd not used */, intr, 2 /* kopt not used */, thr, maxit, nx, nlp, ia, g, dev0, y, w, vp, cl, ju, int_param) , bnorm_thr_(int_param.bnorm_thr) , bnorm_mxit_(int_param.bnorm_mxit) , eps_(int_param.eps) , xv_(xv.data(), xv.size()) , r_(y.rows(), y.cols()) , g_next_(y.cols()) , del_(y.cols()) , isc_(y.cols()) , sc_(y.rows()) {} using base_t::initialize; using base_t::beta; using base_t::intercept; constexpr GLMNETPP_STRONG_INLINE bool has_skipped_all_classes() const { return false; } GLMNETPP_STRONG_INLINE void increment_passes() { base_t::passes() += r_.cols(); } constexpr GLMNETPP_STRONG_INLINE void update_intercept() const {} GLMNETPP_STRONG_INLINE auto& beta_buffer() { return del_; } template GLMNETPP_STRONG_INLINE void update_dlx(index_t k, const DiffType& diff) { base_t::update_dlx(diff, xv_(k)); } protected: GLMNETPP_STRONG_INLINE auto& resid() { return r_; } GLMNETPP_STRONG_INLINE const auto& resid() const { return r_; } GLMNETPP_STRONG_INLINE auto intercept() { return this->beta().row(0); } GLMNETPP_STRONG_INLINE auto intercept() const { return this->beta().row(0); } GLMNETPP_STRONG_INLINE auto beta(index_t k) { return base_t::beta().row(k+1).transpose(); } GLMNETPP_STRONG_INLINE auto beta(index_t k) const { return base_t::beta().row(k+1).transpose(); } GLMNETPP_STRONG_INLINE void initialize_resid(index_t ic) { base_t::initialize_resid(ic, r_.col(ic)); } GLMNETPP_STRONG_INLINE void initialize_resid(index_t ic, value_t scale) { base_t::initialize_resid(ic, r_.col(ic), scale); } template GLMNETPP_STRONG_INLINE void construct( InitResidFType init_resid_f, ComputeAbsGradFType compute_abs_grad_f) { base_t::construct(); update_abs_grad(init_resid_f, compute_abs_grad_f); } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const ComputeGradFType& compute_grad_f) { gaussian_multi_t::update_beta(k, beta(k), xv_(k), this->penalty()(k), g_next_, g_next_, l1_regul_scaled_, l2_regul_scaled_, bnorm_thr_, bnorm_mxit_, isc_, [&](auto i, auto) { return this->endpts()(i,k); }, compute_grad_f); } template GLMNETPP_STRONG_INLINE void setup_wls( value_t l1_regul, value_t l2_regul, InitResidFType init_resid_f) { const auto& q = this->q(); const auto& sxp = this->sxp(); value_t t = 0.0; for (int ic = 0; ic < q.cols(); ++ic) { t = std::max(t, (q.col(ic).array()* (1.0-q.col(ic).array()/sxp.array())/ sxp.array()).maxCoeff() ); } if (t < eps_) throw util::below_min_variance_error(); t *= 2.0; l1_regul_scaled_ = l1_regul / t; l2_regul_scaled_ = l2_regul / t; this->old_intercept() = this->intercept(); std::for_each(this->active_begin(), this->active_end(), [&](index_t k) { this->old_beta(k).noalias() = this->beta(k); }); for (int ic = 0; ic < r_.cols(); ++ic) { init_resid_f(ic, t); } } template GLMNETPP_STRONG_INLINE state_t update_irls( value_t l1_regul, UpdatePredictionFType update_prediction_f, InitResidFType init_resid_f, ComputeAbsGradFType compute_abs_grad_f) { value_t int_diff = (this->intercept() - this->old_intercept()).array().abs().maxCoeff(); bool ix = base_t::has_converged_irls( int_diff * int_diff, [&](index_t k) { value_t b_diff = (this->beta(k) - this->old_beta(k)).array().abs().maxCoeff(); return b_diff * b_diff * xv_(k); }); std::for_each(this->class_begin(), this->class_end(), [&](index_t ic) { base_t::update_irls_class( sc_, intercept()(ic), this->offset().col(ic), this->q().col(ic), [&](auto& sc) { update_prediction_f(ic, sc); }); }); intercept().array() -= intercept().sum() / this->n_classes(); // if IRLS converged, check the strong rule if any additional features should be added. if (ix) { update_abs_grad(init_resid_f, compute_abs_grad_f); ix = base_t::update_strong_map(l1_regul); // if no update to strong map, done. if (!ix) return state_t::break_; } return state_t::noop_; } private: GLMNETPP_STRONG_INLINE auto old_intercept() { return base_t::old_beta().row(0); } GLMNETPP_STRONG_INLINE auto old_intercept() const { return base_t::old_beta().row(0); } GLMNETPP_STRONG_INLINE auto old_beta(index_t k) { return base_t::old_beta().row(k+1).transpose(); } GLMNETPP_STRONG_INLINE auto old_beta(index_t k) const { return base_t::old_beta().row(k+1).transpose(); } /* * Updates absolute gradient quantity. * Note that the implementation detail is slightly different from ElnetPointInternalBinomialMultiClassBase. * This is slightly more efficient since we can save the residual _matrix_ and not just a vector. */ template GLMNETPP_STRONG_INLINE void update_abs_grad( InitResidFType init_resid_f, ComputeAbsGradFType compute_abs_grad_f) { std::for_each(this->class_begin(), this->class_end(), init_resid_f); auto& ga = this->abs_grad(); auto skip_f = [&](index_t j) { return !this->is_excluded(j) || !this->exclusion()[j]; }; // TODO: I believe this is the correct implementation base_t::for_each_with_skip(this->all_begin(), this->all_end(), [&](index_t j) { ga(j) = compute_abs_grad_f(j, g_next_); }, skip_f); // TODO: But this is how it's done in fortran... //base_t::for_each_with_skip(this->all_begin(), this->all_end(), // [&](index_t j) { auto gj = compute_abs_grad_f(j, g_next_); ga(j) = gj*gj; }, // skip_f); //ga.array() = ga.array().sqrt(); } const value_t bnorm_thr_; // internal parameter (copied in ctor) for bnorm threshold convergence. const index_t bnorm_mxit_; // internal parameter (copied in ctor) for bnorm max iteration. value_t eps_; // min variance value_t l1_regul_scaled_ = 0.0; // l1 regularization (lmda * elastic_prop) scaled by some variance quantity. value_t l2_regul_scaled_ = 0.0; // l2 regularization (lmda * elastic_prop) scaled by some variance quantity. Eigen::Map xv_; // weighted variance of each column of x mat_t r_; // residual matrix vec_t g_next_; // buffer for storing gradient vec_t del_; // difference in current beta (row of matrix) vec_t isc_; // buffer only needed for chkbnds vec_t sc_; // buffer for storing objective values }; // ======================================================================== // Sparse Base classes // ======================================================================== /* * Base class for sparse Binomial methods. * This contains all extra things that sparse binomial methods require * (does not contain BinomialBase stuff). */ template struct SpElnetPointInternalBinomialBase : ElnetPointInternalStaticBase { private: using base_t = ElnetPointInternalStaticBase; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType>; public: using typename base_t::value_t; using typename base_t::index_t; using bool_t = BoolType; template SpElnetPointInternalBinomialBase( const XType& X, const XBType& xb, const XSType& xs ) : X_(X.rows(), X.cols(), X.nonZeros(), X.outerIndexPtr(), X.innerIndexPtr(), X.valuePtr(), X.innerNonZeroPtr()) , xb_(xb.data(), xb.size()) , xs_(xs.data(), xs.size()) , xm_(X.cols()) {} protected: GLMNETPP_STRONG_INLINE auto sum_weighted_resid() const { return svr_; } template GLMNETPP_STRONG_INLINE void update_active(index_t k, const NewWeightType& new_weight) { xm_(k) = X_.col(k).dot(new_weight); } GLMNETPP_STRONG_INLINE void update_intercept(value_t diff, value_t new_weight_sum) { if (diff) svr_ -= diff * new_weight_sum; } template GLMNETPP_STRONG_INLINE void update_resid(index_t k, RType& r, value_t beta_diff, const VType& v, value_t new_weight_sum) { auto d_scaled = beta_diff/ xs_(k); gaussian_naive_t::update_resid(r, d_scaled, X_.col(k).cwiseProduct(v)); o_ += d_scaled * xb_(k); svr_ -= d_scaled*(xm_(k)-xb_(k)*new_weight_sum); } template void update_with_new_weights( index_t j, const VType& v, index_t opt_type, value_t new_weight_sum, value_t& xv_j) { xm_(j) = X_.col(j).dot(v); if (!opt_type) { xv_j = X_.col(j).cwiseProduct(X_.col(j)).dot(v); xv_j = (xv_j - 2.0*xb_(j)*xm_(j)+new_weight_sum*xb_(j)*xb_(j))/(xs_(j) * xs_(j)); } } void update_shifts(value_t sum_weighted_resid) { svr_ = sum_weighted_resid; o_ = 0.0; } template void update_prediction(index_t l, value_t s, DiType& di, value_t& b0) { auto s_scaled = s/xs_(l); di -= s_scaled * X_.col(l); b0 += s_scaled * xb_(l); } template GLMNETPP_STRONG_INLINE value_t compute_xv(index_t j, const WType& w) const { auto xj_sq = X_.col(j).cwiseProduct(X_.col(j)); return (xj_sq.dot(w) - xb_(j) * xb_(j)) / (xs_(j) * xs_(j)); } template GLMNETPP_STRONG_INLINE value_t compute_grad(index_t k, const RType& r, const VType& v) const { auto gk = X_.col(k).dot((r.array() + v.array() * o_).matrix()); return (gk - svr_ * xb_(k))/xs_(k); } private: using sp_mat_t = Eigen::SparseMatrix; using vec_t = Eigen::Matrix; value_t o_ = 0.0; // mean shift needed in gradient update // accumulates the updates coming from the centering of the columns of x. value_t svr_ = 0.0; // sum of weighted residual Eigen::Map X_; // sparse data matrix Eigen::Map xb_; // X column means Eigen::Map xs_; // X column stddevs vec_t xm_; // weighted means of columns of X. // Note: original code includes xmz_ as the first element, // but in our implementation, xmz_ is a separate variable in a base class. }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/poisson_naive.hpp0000644000176200001440000001352614171551160030200 0ustar liggesusers#pragma once #include #include #include #include #include #include #include #include #include #include namespace glmnetpp { /* * Dense elastic-net point solver for Poisson naive method. */ template struct ElnetPointInternal< util::glm_type::poisson, util::mode_type::naive, ValueType, IndexType, BoolType> : ElnetPointInternalPoissonBase { private: using base_t = ElnetPointInternalPoissonBase; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType>; using typename base_t::state_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template ElnetPointInternal( bool intr, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, value_t& dev0, const XType& X, const YType& y, const GType& g, const QType& q, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(intr, thr, maxit, nx, nlp, ia, X.rows(), X.cols(), dev0, y, g, q, vp, cl, ju, int_param) , X_(X.data(), X.rows(), X.cols()) , t_(X.rows()) , f_(X.rows()) { t_.array() = this->orig_weight().array() * this->y().array(); this->construct(t_.sum(), [&](bool offset_all_zero, bool intr) { if (!offset_all_zero) { if (intr) { this->null_deviance_intr() = t_.dot(this->offset()) - this->y_mean() * (1.0 - this->intercept()); } else { this->null_deviance_intr() = t_.dot(this->offset()) - this->new_weight_sum(); } } }, [&]() { this->resid() = t_ - this->weight(); }, [&](index_t i) { auto& dvr = this->null_deviance(); if (t_(i) > 0.0) dvr += t_(i) * std::log(this->y()(i)); }, [&](index_t j) { return compute_abs_grad(j); }); f_.array() = this->intercept() + this->offset().array(); } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType& pack) { auto gk = compute_grad(k); base_t::update_beta(k, gk, pack.l1_regul(), pack.l2_regul()); } GLMNETPP_STRONG_INLINE void update_resid(index_t k, value_t beta_diff) { gaussian_naive_t::update_resid( this->resid(), beta_diff, (this->weight().array() * X_.col(k).array()).matrix()); } GLMNETPP_STRONG_INLINE void update_intercept() { auto d = base_t::update_intercept( this->intercept(), this->convg_measure(), this->has_intercept(), this->resid().sum(), this->new_weight_sum()); if (d) { gaussian_naive_t::update_resid(this->resid(), d, this->weight()); } } template GLMNETPP_STRONG_INLINE void setup_wls(const PointPackType&) { auto& v = this->x_var(); const auto& w = this->weight(); base_t::setup_wls([&](auto k) { v(k) = compute_xv(X_.col(k), w); }); } template GLMNETPP_STRONG_INLINE state_t update_irls(const PointConfigPack& pack) { auto& w = this->weight(); const auto& q = this->orig_weight(); auto& r = this->resid(); f_.array() = this->intercept() + this->offset().array(); std::for_each(this->active_begin(), this->active_end(), [&](index_t k) { f_ += this->beta(k) * X_.col(k); }); w.array() = q.array() * (f_.array().abs().min(this->max_link())).binaryExpr(f_.array(), [](auto x, auto y) { return std::copysign(x,y); }).exp(); r=t_-w; return base_t::update_irls(pack.l1_regul(), [&](index_t k) { return compute_grad(k); }); } GLMNETPP_STRONG_INLINE value_t deviance() const { return (t_.dot(f_) - this->new_weight_sum() - this->null_deviance_intr()) / this->null_deviance(); } GLMNETPP_STRONG_INLINE const auto& prediction() const { return f_; } private: using typename base_t::mat_t; using typename base_t::vec_t; // TODO: put in base class? template GLMNETPP_STRONG_INLINE static value_t compute_xv(const XType& x, const WType& w) { return w.dot(x.array().square().matrix()); } GLMNETPP_STRONG_INLINE value_t compute_grad(index_t j) const { return base_t::compute_grad(this->resid(), X_.col(j)); } GLMNETPP_STRONG_INLINE value_t compute_abs_grad(index_t j) const { return std::abs(base_t::compute_grad(this->resid(), X_.col(j))); } Eigen::Map X_; vec_t t_; // different quantity from sparse version vec_t f_; // linear prediction }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/poisson_base.hpp0000644000176200001440000001726614171551160030015 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { /* * Base class for internal implementation of Poisson elastic-net point solver. * This contains all the common interface and members across all versions of poisson: * - naive * - sparse/dense */ template struct ElnetPointInternalPoissonBase : ElnetPointInternalNonLinearBase { private: using base_t = ElnetPointInternalNonLinearBase; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase; protected: using state_t = util::control_flow; using typename base_t::vec_t; using typename base_t::ivec_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template ElnetPointInternalPoissonBase( bool intr, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, index_t no, index_t ni, value_t& dev0, const YType& y, const GType& g, const QType& q, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(thr, maxit, nx, nlp, intr, ia, dev0, vp, cl, ju) , a_(ni) , as_(ni) , r_(no) , v_(ni) , w_(no) , fmax_(std::log(std::numeric_limits::max()*0.1)) , q_(q.data(), q.size()) , g_(g.data(), g.size()) , y_(y.data(), y.size()) { a_.setZero(); as_.setZero(); } GLMNETPP_STRONG_INLINE auto intercept() const { return az_; } GLMNETPP_STRONG_INLINE auto beta(index_t k) const { return a_(k); } GLMNETPP_STRONG_INLINE void update_dlx(index_t k, value_t beta_diff) { base_t::update_dlx(beta_diff, v_(k)); } template GLMNETPP_STRONG_INLINE void initialize(const PackType& p) { base_t::compute_strong_map( this->abs_grad(), this->penalty(), this->strong_map(), p.beta, p.alm, p.alm0, [&](auto k) { return !this->is_excluded(k) || !this->exclusion()[k]; }); } protected: GLMNETPP_STRONG_INLINE auto& beta(index_t k) { return a_(k); } GLMNETPP_STRONG_INLINE auto& resid() { return r_; } GLMNETPP_STRONG_INLINE const auto& resid() const { return r_; } GLMNETPP_STRONG_INLINE auto& x_var() { return v_; } GLMNETPP_STRONG_INLINE const auto& x_var() const { return v_; } GLMNETPP_STRONG_INLINE auto new_weight_sum() const { return v0_; } GLMNETPP_STRONG_INLINE auto& null_deviance_intr() { return dv0_; } GLMNETPP_STRONG_INLINE auto null_deviance_intr() const { return dv0_; } GLMNETPP_STRONG_INLINE auto& intercept() { return az_; } GLMNETPP_STRONG_INLINE auto& weight() { return w_; } GLMNETPP_STRONG_INLINE const auto& weight() const { return w_; } GLMNETPP_STRONG_INLINE const auto& y() const { return y_; } GLMNETPP_STRONG_INLINE const auto& y_mean() const { return yb_; } GLMNETPP_STRONG_INLINE const auto& orig_weight() const { return q_; } GLMNETPP_STRONG_INLINE const auto& offset() const { return g_; } GLMNETPP_STRONG_INLINE auto max_link() const { return fmax_; } GLMNETPP_STRONG_INLINE void update_beta(index_t k, value_t gk, value_t l1_regul, value_t l2_regul) { const auto& cl = this->endpts(); base_t::update_beta( beta(k), gk, v_(k), this->penalty()(k), cl(0,k), cl(1,k), l1_regul, l2_regul); } template GLMNETPP_STRONG_INLINE void setup_wls(UpdateFType update_f) { az0_ = az_; std::for_each(this->active_begin(), this->active_end(), [&](auto k) { as_(k) = a_(k); }); base_t::for_each_with_skip(this->all_begin(), this->all_end(), update_f, [&](auto k) { return this->is_excluded(k); }); } template void construct( value_t y_mean, InitFType init_f, Init2FType init_2_f, UpdateDvrFType update_dvr_f, AbsGradFType abs_grad_f) { bool intr = this->has_intercept(); auto& dev0 = this->null_deviance(); yb_ = y_mean; if ((g_.array() == 0).all()) { if (intr) { w_ = yb_ * q_; az_ = std::log(yb_); dv0_ = yb_ * (az_-1.0); init_f(true, true); } else { w_ = q_; az_ = 0.; dv0_ = -1.0; init_f(true, false); } } else { w_.array() = q_.array() * (g_.array().abs().min(fmax_)).binaryExpr(g_.array(), [&](auto x, auto y) { return std::copysign(x, y); }).exp(); v0_ = w_.sum(); if (intr) { auto eaz = yb_ / v0_; w_ *= eaz; az_ = std::log(eaz); init_f(false, true); } else { az_ = 0.0; init_f(false, false); } } v0_ = 1.0; if (intr) v0_ = yb_; init_2_f(); dev0 = -yb_; for (int i = 0; i < y_.size(); ++i) { update_dvr_f(i); } dev0 -= dv0_; this->set_thresh(this->thresh() * dev0); gaussian_naive_t::update_abs_grad(this->abs_grad(), abs_grad_f, [&](index_t j) { return !this->exclusion()[j]; }); } template GLMNETPP_STRONG_INLINE state_t update_irls( value_t l1_regul, GradFType grad_f) { v0_ = w_.sum(); value_t diff0 = az_ - az0_; bool ix = base_t::has_converged_irls( v0_ * diff0 * diff0, [&](index_t k) { auto d = a_(k)-as_(k); return v_(k)*d*d; }); if (ix) { auto skip_f = [&](auto k) { return !this->is_excluded(k) || !this->exclusion()[k]; }; gaussian_naive_t::update_abs_grad( this->abs_grad(), [&](index_t k) { return std::abs(grad_f(k)); }, skip_f); bool kkt_passed = gaussian_naive_t::check_kkt( this->abs_grad(), this->penalty(), this->strong_map(), l1_regul, skip_f); if (kkt_passed) return state_t::break_; } return state_t::noop_; } private: vec_t a_; // coefficients vec_t as_; // old coefficients vec_t r_; // residual vec_t v_; // weighted variance of columns of x vec_t w_; // weight const value_t fmax_; // max linear prediction value_t dv0_ = 0.0; // null deviance value_t v0_ = 0.0; // sum of weights value_t az_ = 0.0; // intercept value_t az0_ = 0.0; // prev intercept during WLS Eigen::Map q_; // original weights Eigen::Map g_; // offset Eigen::Map y_; // y response value_t yb_ = 0.0; // y mean }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/base.hpp0000644000176200001440000003430014171551160026227 0ustar liggesusers#pragma once #include #include #include #include #include #include #include namespace glmnetpp { /* * Base class for all static interface */ template struct ElnetPointInternalStaticBase { protected: using value_t = ValueType; using index_t = IndexType; /* Helper routines to encapsulate the logic of some common routines in a generic way */ template GLMNETPP_STRONG_INLINE static constexpr void for_each_with_skip( Iter begin, Iter end, UpdatePolicy update_pol, SkipPolicy skip_pol) { for (; begin != end; ++begin) { auto k = *begin; if (skip_pol(k)) continue; update_pol(k); } } GLMNETPP_STRONG_INLINE static void update_dlx(value_t& dlx, value_t beta_diff, value_t x_var) { dlx = std::max(x_var * beta_diff * beta_diff, dlx); } GLMNETPP_STRONG_INLINE static void update_beta( value_t& a, value_t gk, value_t x_var, value_t penalty, value_t a_min, value_t a_max, value_t l1_regul, value_t l2_regul) { auto a_copy = a; auto u = gk + a_copy * x_var; auto v = std::abs(u) - penalty * l1_regul; a = 0.0; if (v > 0.0) { a = std::max(a_min, std::min(a_max, std::copysign(v,u) / (x_var + penalty * l2_regul)) ); } } GLMNETPP_STRONG_INLINE static value_t update_intercept( value_t& intercept, value_t& dlx, bool intr, value_t r_sum, value_t var) { auto d = 0.0; if (intr) d = r_sum / var; if (d) { intercept += d; update_dlx(dlx, d, var); } return d; } GLMNETPP_STRONG_INLINE static bool check_kkt(value_t g, value_t l1_regul, value_t penalty) { return g > l1_regul * penalty; } /* * Computes strong map given a threshold. */ template GLMNETPP_STRONG_INLINE static bool compute_strong_map( const GType& g, const VPType& penalty, SType& strong_map, value_t tlam, SkipType skip_f) { bool updated = false; for_each_with_skip( util::counting_iterator(0), util::counting_iterator(g.size()), [&](auto k) { if (check_kkt(g(k), tlam, penalty(k))) { strong_map[k] = true; updated = true; } }, skip_f); return updated; } /* * Same as above, but allows users to perform any action upon kkt failure at index k. */ template GLMNETPP_STRONG_INLINE static bool compute_strong_map( const GType& g, const VPType& penalty, SType& strong_map, value_t tlam, FType f, SkipType skip_f) { bool updated = false; for_each_with_skip( util::counting_iterator(0), util::counting_iterator(g.size()), [&](auto k) { if (check_kkt(g(k), tlam, penalty(k))) { strong_map[k] = true; updated = true; f(k); } }, skip_f); return updated; } /* * Computes strong map by computing the threshold based on the previous and current lambda. */ template GLMNETPP_STRONG_INLINE static bool compute_strong_map( const GType& g, const VPType& penalty, SType& strong_map, value_t beta, value_t lmda, value_t prev_lmda, SkipType skip_f) { auto tlam = beta * (2.0 * lmda - prev_lmda); return compute_strong_map(g, penalty, strong_map, tlam, skip_f); } /* * Same as above, but allows users to perform any action upon kkt failure at index k. */ template GLMNETPP_STRONG_INLINE static bool compute_strong_map( const GType& g, const VPType& penalty, SType& strong_map, value_t beta, value_t lmda, value_t prev_lmda, FType f, SkipType skip_f) { auto tlam = beta * (2.0 * lmda - prev_lmda); return compute_strong_map(g, penalty, strong_map, tlam, f, skip_f); } template GLMNETPP_STRONG_INLINE static auto compute_grad(const RType& r, const XType& x) { return r.dot(x); } public: GLMNETPP_STRONG_INLINE constexpr static bool equal(value_t x, value_t y) { return x == y; } // TODO: may be useful to have a static interface for multi-stuff template GLMNETPP_STRONG_INLINE constexpr static bool equal(const Eigen::MatrixBase& x, const Eigen::MatrixBase& y) { return (x.array() == y.array()).all(); } }; /* * Base class for internal implementation of any GLM elastic-net point solver. * This only views resources and doesn't allocate expensive data structures. */ template struct ElnetPointInternalBaseViewer : ElnetPointInternalStaticBase { private: using base_t = ElnetPointInternalStaticBase; public: using typename base_t::value_t; using typename base_t::index_t; using bool_t = BoolType; template ElnetPointInternalBaseViewer( value_t thr, index_t maxit, index_t& nin, index_t nx, index_t& nlp, IAType& ia, const VPType& vp, const CLType& cl, const JUType& ju ) : thr_(thr) , maxit_(maxit) , nin_(nin) , nx_(nx) , mm_(nullptr, 0) , nlp_(nlp) , ia_(ia.data(), ia.size()) , vp_(vp.data(), vp.size()) , cl_(cl.data(), cl.rows(), cl.cols()) , ju_(util::init_bvec::eval(ju)) {} GLMNETPP_STRONG_INLINE void increment_passes() { ++nlp_; } GLMNETPP_STRONG_INLINE void coord_desc_reset() { dlx_ = 0.0; } GLMNETPP_STRONG_INLINE auto all_begin() const { return util::counting_iterator(0); } GLMNETPP_STRONG_INLINE auto all_end() const { return util::counting_iterator(vp_.size()); } GLMNETPP_STRONG_INLINE auto active_begin() const { return util::one_to_zero_iterator(ia_.data()); } GLMNETPP_STRONG_INLINE auto active_end() const { return util::one_to_zero_iterator(ia_.data() + nin_); } GLMNETPP_STRONG_INLINE bool is_active(index_t j) const { return mm_(j); } GLMNETPP_STRONG_INLINE bool has_converged() const { return dlx_ < thr_; } GLMNETPP_STRONG_INLINE bool has_reached_max_passes() const { return nlp_ > maxit_; } GLMNETPP_STRONG_INLINE bool has_reached_max_active() const { return nin_ > nx_; } GLMNETPP_STRONG_INLINE auto n_active() const { return nin_; } GLMNETPP_STRONG_INLINE void update_active(index_t k) { ++nin_; if (has_reached_max_active()) { throw util::max_active_reached_error(); } mm_(k) = nin_; ia_(nin_-1) = k+1; } protected: using base_t::update_dlx; GLMNETPP_STRONG_INLINE auto& passes() { return nlp_; } GLMNETPP_STRONG_INLINE auto& convg_measure() { return dlx_; } GLMNETPP_STRONG_INLINE void set_thresh(value_t t) { thr_ = t; } GLMNETPP_STRONG_INLINE auto thresh() const { return thr_; } GLMNETPP_STRONG_INLINE const auto& endpts() const { return cl_; } GLMNETPP_STRONG_INLINE const auto& penalty() const { return vp_; } GLMNETPP_STRONG_INLINE const auto& exclusion() const { return ju_; } GLMNETPP_STRONG_INLINE auto active_idx(index_t k) const { return mm_(k)-1; } /* * Derived class must call this to ensure the viewers are all initialized correctly. */ template GLMNETPP_STRONG_INLINE void construct(MMType&& mm) { new (&mm_) Eigen::Map(mm.data(), mm.size()); } GLMNETPP_STRONG_INLINE void update_dlx(value_t beta_diff, value_t x_var) { base_t::update_dlx(dlx_, beta_diff, x_var); } // TODO: this is only needed for multi-stuff template GLMNETPP_STRONG_INLINE void update_dlx(const Eigen::MatrixBase& beta_diff, value_t x_var) { base_t::update_dlx(dlx_, beta_diff.array().abs().maxCoeff(), x_var); } using vec_t = Eigen::Matrix; using mat_t = Eigen::Matrix; using sp_mat_t = Eigen::SparseMatrix; using ivec_t = Eigen::Matrix; using bvec_t = util::bvec_t; using ju_t = typename std::conditional< std::is_same::value, const bvec_t&, Eigen::Map >::type; private: value_t dlx_ = 0.0; // measures convergence of each coord-desc value_t thr_; // threshold for convergence const index_t maxit_; // max number of passes index_t& nin_; // number of active variables const index_t nx_; // max number of active variables Eigen::Map mm_; // index k is j if feature k is the jth feature active index_t& nlp_; // number of passes Eigen::Map ia_; // active set (important that it's 1-indexed!) Eigen::Map vp_; // penalties on features Eigen::Map cl_; // limits on each feature (2 x nvars) ju_t ju_; // exclusion type }; template struct ElnetPointInternalBase : ElnetPointInternalBaseViewer { private: using base_t = ElnetPointInternalBaseViewer; protected: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::ivec_t; public: template ElnetPointInternalBase( value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, const VPType& vp, const CLType& cl, const JUType& ju ) : base_t(thr, maxit, nin_, nx, nlp, ia, vp, cl, ju) , mm_(vp.size()) { base_t::construct(mm_); ia.setZero(); mm_.setZero(); } private: index_t nin_ = 0; ivec_t mm_; // index k is j if feature k is the jth feature active }; /* * Base class for all derived classes for non-linear (non-gaussian) point solver. */ template struct ElnetPointInternalNonLinearBase : ElnetPointInternalBase { private: using base_t = ElnetPointInternalBase; protected: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::vec_t; public: template ElnetPointInternalNonLinearBase( value_t thr, index_t maxit, index_t nx, index_t& nlp, bool intr, IAType& ia, value_t& dev0, const VPType& vp, const CLType& cl, const JUType& ju ) : base_t(thr, maxit, nx, nlp, ia, vp, cl, ju) , ga_(vp.size()) , ixx_(vp.size(), false) , intr_(intr) , dev0_(dev0) { ga_.setZero(); } GLMNETPP_STRONG_INLINE bool is_excluded(index_t k) const { return !ixx_[k]; } GLMNETPP_STRONG_INLINE const auto& abs_grad() const { return ga_; } GLMNETPP_STRONG_INLINE auto null_deviance() const { return dev0_; } template GLMNETPP_STRONG_INLINE constexpr bool check_kkt(const PointPackType&) const { return true; } protected: GLMNETPP_STRONG_INLINE auto& abs_grad() { return ga_; } GLMNETPP_STRONG_INLINE auto& strong_map() { return ixx_; } GLMNETPP_STRONG_INLINE const auto& strong_map() const { return ixx_; } GLMNETPP_STRONG_INLINE auto has_intercept() const { return intr_; } GLMNETPP_STRONG_INLINE auto& null_deviance() { return dev0_; } template GLMNETPP_STRONG_INLINE bool has_converged_irls( value_t max_intr_diff_sq, MaxBetaDiffSqFType max_beta_diff_sq_f) const { bool ix = max_intr_diff_sq > this->thresh(); if (!ix) { for (auto it = this->active_begin(); it != this->active_end(); ++it) { auto k = *it; ix = max_beta_diff_sq_f(k) > this->thresh(); if (ix) break; } } return !ix; } private: vec_t ga_; std::vector ixx_; const bool intr_; value_t& dev0_; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/sp_binomial_two_class.hpp0000644000176200001440000001206614171551160031674 0ustar liggesusers#pragma once #include #include #include #include #include #include #include #include #include #include namespace glmnetpp { /* * Sparse elastic-net point solver for Binomial multi-class method. */ template struct SpElnetPointInternal< util::glm_type::binomial, util::mode_type::two_class, ValueType, IndexType, BoolType> : ElnetPointInternalBinomialTwoClassBase , SpElnetPointInternalBinomialBase { private: using base_t = ElnetPointInternalBinomialTwoClassBase; using sp_base_t = SpElnetPointInternalBinomialBase; using typename base_t::state_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template SpElnetPointInternal( bool isd, bool intr, index_t kopt, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, const GType& g, value_t& dev0, const XType& X, const YType& y, const WType& w, const XBType& xb, const XSType& xs, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(isd, intr, kopt, thr, maxit, nx, nlp, ia, g, dev0, y, w, vp, cl, ju, int_param) , sp_base_t(X, xb, xs) , sc_(X.rows()) { this->construct( [&](index_t j) { return sp_base_t::compute_xv(j, this->weight()); }, [&](index_t j) { return sp_base_t::compute_grad(j, this->resid(), this->new_weight()); }); sp_base_t::update_shifts(this->resid().sum()); } using base_t::check_kkt; using base_t::update_dlx; using base_t::for_each_with_skip; template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType& pack) { auto gk = sp_base_t::compute_grad(k, this->resid(), this->new_weight()); base_t::update_beta(k, gk, pack.l1_regul(), pack.l2_regul()); } GLMNETPP_STRONG_INLINE void update_active(index_t k) { base_t::update_active(k); sp_base_t::update_active(k, this->new_weight()); } GLMNETPP_STRONG_INLINE void update_intercept() { auto d = base_t::update_intercept(this->sum_weighted_resid()); sp_base_t::update_intercept(d, this->new_weight_sum()); } GLMNETPP_STRONG_INLINE void update_resid(index_t k, value_t beta_diff) { sp_base_t::update_resid( k, this->resid(), beta_diff, this->new_weight(), this->new_weight_sum()); } template GLMNETPP_STRONG_INLINE void setup_wls(const PointPackType&) { const auto& ixx = this->strong_map(); const auto& v = this->new_weight(); auto& xv = this->x_var(); const auto& xmz = this->new_weight_sum(); base_t::setup_wls(); base_t::for_each_with_skip(this->all_begin(), this->all_end(), [&](index_t j) { sp_base_t::update_with_new_weights(j, v, this->optimization_type(), xmz, xv(j)); }, [&](index_t j) { return !ixx[j]; }); } template GLMNETPP_STRONG_INLINE state_t update_irls(const PointConfigPack& pack) { sc_.array() = this->intercept(); auto b0=0.0; std::for_each(this->active_begin(), this->active_end(), [&](index_t l) { sp_base_t::update_prediction(l, -this->beta(l), sc_, b0); }); sc_.array() += b0; auto predict_f = [&](index_t i) { return sc_(i) + this->offset()(i); }; state_t state = base_t::update_irls_invariants(predict_f); if (state == state_t::break_) return state_t::break_; // update further invariants sp_base_t::update_shifts(this->resid().sum()); auto grad_f = [&](index_t k) { return sp_base_t::compute_grad(k, this->resid(), this->new_weight()); }; return base_t::update_irls_strong_set(grad_f, pack.l1_regul()); } private: using typename base_t::vec_t; vec_t sc_; // buffer for temporary storage for IRLS }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/binomial_multi_class_group.hpp0000644000176200001440000001074414171551160032730 0ustar liggesusers#pragma once #include #include #include #include #include #include #include #include #include #include namespace glmnetpp { /* * Dense elastic-net point solver for Binomial multi-class group method. */ template struct ElnetPointInternal< util::glm_type::binomial, util::mode_type::multi_class_group, ValueType, IndexType, BoolType> : ElnetPointInternalBinomialMultiClassGroupBase { private: using base_t = ElnetPointInternalBinomialMultiClassGroupBase; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase; using typename base_t::state_t; using typename base_t::mat_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template ElnetPointInternal( bool intr, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, GType& g, value_t& dev0, const XType& X, const YType& y, const WType& w, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(intr, thr, maxit, nx, nlp, ia, g, dev0, y, w, xv, vp, cl, ju, int_param) , X_(X.data(), X.rows(), X.cols()) { base_t::construct( [&](index_t ic) { base_t::initialize_resid(ic); }, [&](index_t j, auto& grad_buff) { return compute_abs_grad(j, grad_buff); }); } template GLMNETPP_STRONG_INLINE void update_resid(index_t k, const DiffType& beta_diff) { auto& r = this->resid(); for (int ic = 0; ic < r.cols(); ++ic) { gaussian_naive_t::update_resid( r.col(ic), beta_diff(ic), X_.col(k).cwiseProduct(this->weight())); } } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType&) { base_t::update_beta(k, [&](index_t k, auto& buff) { compute_grad(k, buff); }); } template GLMNETPP_STRONG_INLINE void setup_wls(const PointPackType& pack) { base_t::setup_wls(pack.l1_regul(), pack.l2_regul(), [&](index_t ic, value_t t) { base_t::initialize_resid(ic, t); }); auto& r = this->resid(); for (int ic = 0; ic < r.cols(); ++ic) { gaussian_naive_t::update_intercept( this->intercept()(ic), r.col(ic), this->convg_measure(), this->has_intercept(), r.col(ic).sum(), 1., this->weight()); } } template GLMNETPP_STRONG_INLINE state_t update_irls(const PointConfigPack& pack) { return base_t::update_irls(pack.l1_regul(), [&](index_t ic, auto& sc) { std::for_each(this->active_begin(), this->active_end(), [&](index_t k) { sc += this->beta(k)(ic) * X_.col(k); }); }, [&](index_t k) { base_t::initialize_resid(k); }, [&](index_t k, auto& grad_buff) { return compute_abs_grad(k, grad_buff); }); } private: template GLMNETPP_STRONG_INLINE void compute_grad(index_t k, DestType&& dest) const { dest.noalias() = this->resid().transpose() * X_.col(k); } template GLMNETPP_STRONG_INLINE value_t compute_abs_grad(index_t k, DestType&& dest) const { compute_grad(k, dest); return dest.norm(); } Eigen::Map X_; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/sp_binomial_multi_class.hpp0000644000176200001440000001332314171551160032212 0ustar liggesusers#pragma once #include #include #include #include #include #include #include #include #include #include namespace glmnetpp { /* * Sparse elastic-net point solver for Binomial multi-class method. */ template struct SpElnetPointInternal< util::glm_type::binomial, util::mode_type::multi_class, ValueType, IndexType, BoolType> : ElnetPointInternalBinomialMultiClassBase , SpElnetPointInternalBinomialBase { private: using base_t = ElnetPointInternalBinomialMultiClassBase; using sp_base_t = SpElnetPointInternalBinomialBase; using typename base_t::state_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template SpElnetPointInternal( bool isd, bool intr, index_t kopt, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, GType& g, value_t& dev0, const XType& X, const YType& y, const WType& w, const XBType& xb, const XSType& xs, const VPType& vp, const CLType& cl, const JUType& ju, ISType& is, const IntParamType& int_param) : base_t(isd, intr, kopt, thr, maxit, nx, nlp, ia, g, dev0, y, w, vp, cl, ju, is, int_param) , sp_base_t(X, xb, xs) { base_t::construct( [&](index_t j) { return sp_base_t::compute_xv(j, this->weight()); }, [&](index_t ic) { initialize_resid(ic); }, [&](index_t j) { return sp_base_t::compute_grad(j, this->resid(), this->new_weight()); }); } using base_t::check_kkt; using base_t::update_dlx; using base_t::for_each_with_skip; GLMNETPP_STRONG_INLINE void update_active(index_t k) { base_t::update_active(k); sp_base_t::update_active(k, this->new_weight()); } GLMNETPP_STRONG_INLINE void update_intercept() { auto d = base_t::update_intercept(this->sum_weighted_resid()); sp_base_t::update_intercept(d, this->new_weight_sum()); } GLMNETPP_STRONG_INLINE void update_resid(index_t k, value_t beta_diff) { sp_base_t::update_resid( k, this->resid(), beta_diff, this->new_weight(), this->new_weight_sum()); } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType& pack) { base_t::update_beta(k, sp_base_t::compute_grad(k, this->resid(), this->new_weight()), pack.l1_regul(), pack.l2_regul()); } GLMNETPP_STRONG_INLINE state_t setup_wls(index_t ic) { state_t state = base_t::setup_wls(ic); if (state != state_t::noop_) return state; const auto& v = this->new_weight(); auto xmz = this->new_weight_sum(); auto& xv_ic = this->curr_x_var(); base_t::for_each_with_skip(this->all_begin(), this->all_end(), [&](index_t j) { sp_base_t::update_with_new_weights(j, v, this->optimization_type(), xmz, xv_ic[j]); }, [&](index_t j) { return this->is_excluded(j); }); sp_base_t::update_shifts(this->resid().sum()); return state_t::noop_; } template GLMNETPP_STRONG_INLINE state_t update_irls(const PointConfigPack& pack) { value_t b0 = 0.0; return base_t::update_irls( pack.elastic_prop(), pack.l1_regul(), [this, &b0](index_t l, value_t s, auto& buff) { this->sp_base_t::update_prediction(l, s, buff, b0); }, [&b0](auto& buff) { buff.array() = (buff.array() + b0).exp(); }, [&](index_t ic) { initialize_resid(ic); }, [this](index_t k) { return sp_base_t::compute_grad(k, this->resid(), this->new_weight()); }); } GLMNETPP_STRONG_INLINE void update_irls_class() { base_t::has_converged_irls_class(); base_t::update_irls_class( [&](auto& buff) { value_t b0 = 0.0; std::for_each(this->active_begin(), this->active_end(), [&](index_t k) { sp_base_t::update_prediction(k, -this->beta(k), buff, b0); }); buff.array() += b0; }); } private: GLMNETPP_STRONG_INLINE void initialize_resid(index_t ic) { auto& y = this->y(); auto& v = this->new_weight(); auto& r = this->resid(); auto& q = this->q(); auto& sxp = this->sxp(); v.array() = q.col(ic).array()/sxp.array(); base_t::initialize_resid(r, y.col(ic), v); v.array() = this->weight().array() * v.array() * (1.0 - v.array()); sp_base_t::update_shifts(r.sum()); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/sp_gaussian_wls.hpp0000644000176200001440000001077614171551160030531 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct SpElnetPointInternal< util::glm_type::gaussian, util::mode_type::wls, ValueType, IndexType, BoolType> : ElnetPointInternalGaussianWLSBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianWLSBase< ValueType, IndexType, BoolType>; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType>; protected: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::sp_mat_t; using typename base_t::mat_t; using typename base_t::vec_t; public: template SpElnetPointInternal( value_t alm0, value_t almc, value_t alpha, const XType& X, RType& r, const XMType& xm, const XSType& xs, XVType& xv, const VType& v, bool intr, const JUType& ju, const VPType& vp, const CLType& cl, index_t nx, value_t thr, index_t maxit, AType& a, value_t& aint, GType& g, IAType& ia, IYType& iy, index_t& iz, MMType& mm, index_t& nino, value_t& rsqc, index_t& nlp) : base_t(alm0, almc, alpha, r, xv, v, intr, ju, vp, cl, nx, thr, maxit, a, aint, g, ia, iy, iz, mm, nino, rsqc, nlp) , X_(X.rows(), X.cols(), X.nonZeros(), X.outerIndexPtr(), X.innerIndexPtr(), X.valuePtr(), X.innerNonZeroPtr()) , xm_(xm.data(), xm.size()) , xs_(xs.data(), xs.size()) { svr_ = this->resid().sum(); base_t::construct( [&](index_t j) { return compute_xv(j); }, [&](index_t j) { return std::abs(compute_abs_grad(j)); }); } template GLMNETPP_STRONG_INLINE void initialize(const PointPackType&) { base_t::initialize([&](index_t j) { return compute_xv(j); }); } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType&) { base_t::update_beta(k, compute_grad(k)); } GLMNETPP_STRONG_INLINE void update_rsq(index_t k, value_t diff) { base_t::update_rsq(k, diff); } GLMNETPP_STRONG_INLINE void update_resid(index_t k, value_t beta_diff) { auto d_scaled = beta_diff / xs_(k); gaussian_naive_t::update_resid(this->resid(), d_scaled, X_.col(k).cwiseProduct(this->weight())); gaussian_naive_t::update_resid(this->resid(), -d_scaled * xm_(k), this->weight()); svr_ = this->resid().sum(); } GLMNETPP_STRONG_INLINE void update_intercept() { auto d = base_t::update_intercept(svr_); if (d) svr_ = this->resid().sum(); } template GLMNETPP_STRONG_INLINE bool check_kkt(const PointPackType&) { return base_t::check_kkt( [&](index_t k) { return compute_xv(k); }, [&](index_t k) { return compute_abs_grad(k); }); } private: GLMNETPP_STRONG_INLINE value_t compute_xv(index_t k) const { const auto& v = this->weight(); value_t xv = X_.col(k).cwiseProduct(X_.col(k)).dot(v); xv -= 2. * xm_(k) * X_.col(k).dot(v); return (xv + this->new_weight_sum() * xm_(k) * xm_(k)) / (xs_(k) * xs_(k)); } GLMNETPP_STRONG_INLINE value_t compute_grad(index_t k) const { value_t d = X_.col(k).dot(this->resid()); return (d - svr_ * xm_(k)) / xs_(k); } GLMNETPP_STRONG_INLINE value_t compute_abs_grad(index_t k) const { return std::abs(compute_grad(k)); } value_t svr_ = 0.0; // sum of (weighted) residuals Eigen::Map X_; // data matrix Eigen::Map xm_; // column means of X Eigen::Map xs_; // column stddevs of X }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/sp_gaussian_naive.hpp0000644000176200001440000000653414171551160031023 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { template struct SpElnetPointInternal< util::glm_type::gaussian, util::mode_type::naive, ValueType, IndexType, BoolType> : ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType>; public: using typename base_t::value_t; using typename base_t::index_t; template SpElnetPointInternal( value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, YType& y, const WType& w, const XType& X, const XMType& xm, const XSType& xs, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : base_t(thr, maxit, nx, nlp, ia, xv, vp, cl, ju) , X_(X.rows(), X.cols(), X.nonZeros(), X.outerIndexPtr(), X.innerIndexPtr(), X.valuePtr(), X.innerNonZeroPtr()) , y_(y.data(), y.size()) , w_(w.data(), w.size()) , xm_(xm.data(), xm.size()) , xs_(xs.data(), xs.size()) { base_t::construct([this](index_t k) { return compute_abs_grad(k); }); } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType& pack) { base_t::update_beta(k, pack.ab, pack.dem, compute_grad(k)); } GLMNETPP_STRONG_INLINE void update_resid(index_t k, value_t beta_diff) { auto beta_diff_scaled = beta_diff / xs_(k); y_ -= beta_diff_scaled * X_.col(k); o_ += beta_diff_scaled * xm_(k); } template GLMNETPP_STRONG_INLINE bool check_kkt(const PointPackType& pack) { return base_t::check_kkt(pack.ab, [this](index_t k) { return compute_abs_grad(k); }); } private: GLMNETPP_STRONG_INLINE value_t compute_grad(index_t k) const { return X_.col(k).cwiseProduct(w_).dot( (y_.array() + o_).matrix() ) / xs_(k); } GLMNETPP_STRONG_INLINE value_t compute_abs_grad(index_t k) const { return std::abs(compute_grad(k)); } using typename base_t::vec_t; using typename base_t::mat_t; using spmat_t = Eigen::SparseMatrix; value_t o_ = 0.0; // mean shift correction when updating gradient Eigen::Map X_; // data matrix (sparse) Eigen::Map y_; // unscaled residual vector Eigen::Map w_; // weights for each column of X_ Eigen::Map xm_; // col-wise mean Eigen::Map xs_; // col-wise stddev // (may not be actually the stddev of X, but something passed by user) }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/sp_binomial_multi_class_group.hpp0000644000176200001440000001343314171551160033430 0ustar liggesusers#pragma once #include #include #include #include #include #include #include #include #include #include namespace glmnetpp { /* * Sparse elastic-net point solver for Binomial multi-class grouped method. */ template struct SpElnetPointInternal< util::glm_type::binomial, util::mode_type::multi_class_group, ValueType, IndexType, BoolType> : ElnetPointInternalBinomialMultiClassGroupBase { private: using base_t = ElnetPointInternalBinomialMultiClassGroupBase; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase; using typename base_t::state_t; using typename base_t::sp_mat_t; using typename base_t::vec_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template SpElnetPointInternal( bool intr, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, GType& g, value_t& dev0, const XType& X, const YType& y, const WType& w, const XBType& xb, const XSType& xs, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(intr, thr, maxit, nx, nlp, ia, g, dev0, y, w, xv, vp, cl, ju, int_param) , X_(X.rows(), X.cols(), X.nonZeros(), X.outerIndexPtr(), X.innerIndexPtr(), X.valuePtr(), X.innerNonZeroPtr()) , xb_(xb.data(), xb.size()) , xs_(xs.data(), xs.size()) , svr_(y.cols()) { base_t::construct( [&](index_t ic) { initialize_resid(ic); }, [&](index_t j, auto& grad_buff) { return compute_abs_grad(j, grad_buff); }); } template GLMNETPP_STRONG_INLINE void update_resid(index_t k, const DiffType& beta_diff) { auto& r = this->resid(); for (int ic = 0; ic < r.cols(); ++ic) { auto d_scaled = beta_diff(ic) / xs_(k); gaussian_naive_t::update_resid( r.col(ic), d_scaled, X_.col(k).cwiseProduct(this->weight())); gaussian_naive_t::update_resid( r.col(ic), -d_scaled * xb_(k), this->weight()); } } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType&) { base_t::update_beta(k, [&](index_t k, auto& buff) { compute_grad(k, buff); }); } template GLMNETPP_STRONG_INLINE void setup_wls(const PointPackType& pack) { base_t::setup_wls(pack.l1_regul(), pack.l2_regul(), [&](index_t ic, value_t t) { initialize_resid(ic, t); }); auto& r = this->resid(); for (int ic = 0; ic < r.cols(); ++ic) { gaussian_naive_t::update_intercept( this->intercept()(ic), r.col(ic), this->convg_measure(), this->has_intercept(), svr_(ic), 1., this->weight()); if (this->has_intercept()) svr_(ic) = 0.0; } } template GLMNETPP_STRONG_INLINE state_t update_irls(const PointConfigPack& pack) { return base_t::update_irls( pack.l1_regul(), [&](index_t ic, auto& sc) { value_t b0 = 0.0; std::for_each(this->active_begin(), this->active_end(), [&](index_t k) { auto d_scaled = this->beta(k)(ic) / xs_(k); sc += d_scaled * X_.col(k); b0 -= d_scaled * xb_(k); }); sc.array() += b0; }, [&](index_t k) { initialize_resid(k); }, [&](index_t k, auto& grad_buff) { return compute_abs_grad(k, grad_buff); }); } private: GLMNETPP_STRONG_INLINE void initialize_resid(index_t ic) { base_t::initialize_resid(ic); svr_(ic) = this->resid().col(ic).sum(); } GLMNETPP_STRONG_INLINE void initialize_resid(index_t ic, value_t t) { base_t::initialize_resid(ic, t); svr_(ic) = this->resid().col(ic).sum(); } template GLMNETPP_STRONG_INLINE void compute_grad(index_t j, DestType&& dest) const { dest.noalias() = this->resid().transpose() * X_.col(j); dest = (dest - svr_*xb_(j)) / xs_(j); } template GLMNETPP_STRONG_INLINE value_t compute_abs_grad(index_t j, DestType&& dest) const { compute_grad(j, dest); return dest.norm(); } Eigen::Map X_; // X matrix Eigen::Map xb_; // X column means Eigen::Map xs_; // X column stddevs vec_t svr_; // sum of (weighted) residual }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/decl.hpp0000644000176200001440000000244714171551160026233 0ustar liggesusers#pragma once #include namespace glmnetpp { template struct ElnetPointInternalBase; template struct ElnetPointInternalBinomialBase; template struct ElnetPointInternalBinomialTwoClassBase; template struct ElnetPointInternalGaussianBase; template struct ElnetPointInternalGaussianCovBase; template struct ElnetPointInternalGaussianNaiveBase; template struct ElnetPointInternalGaussianMultiBase; template mode , class ValueType = double , class IndexType = int , class BoolType = bool> struct ElnetPointInternal; template mode , class ValueType = double , class IndexType = int , class BoolType = bool> struct SpElnetPointInternal; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/gaussian_cov.hpp0000644000176200001440000000334114157246224030005 0ustar liggesusers#pragma once #include #include namespace glmnetpp { /* * Dense elastic-net point solver for Gaussian covariance method. */ template struct ElnetPointInternal< util::glm_type::gaussian, util::mode_type::cov, ValueType, IndexType, BoolType> : ElnetPointInternalGaussianCovBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianCovBase< ValueType, IndexType, BoolType>; public: using typename base_t::value_t; using typename base_t::index_t; template ElnetPointInternal(value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, GType& g, const XType& X, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : base_t(thr, maxit, nx, nlp, ia, g, xv, vp, cl, ju) , X_(X.data(), X.rows(), X.cols()) {} GLMNETPP_STRONG_INLINE void update_active(index_t k) { base_t::update_active(k, [&](index_t j, index_t l) { return X_.col(j).dot(X_.col(l)); }); } private: using typename base_t::mat_t; Eigen::Map X_; // data matrix }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/sp_poisson_naive.hpp0000644000176200001440000001704114171551160030676 0ustar liggesusers#pragma once #include #include #include #include #include #include #include #include #include #include namespace glmnetpp { /* * Sparse elastic-net point solver for Poisson naive method. */ template struct SpElnetPointInternal< util::glm_type::poisson, util::mode_type::naive, ValueType, IndexType, BoolType> : ElnetPointInternalPoissonBase { private: using base_t = ElnetPointInternalPoissonBase; using gaussian_naive_t = ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType>; using typename base_t::state_t; public: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::bool_t; template SpElnetPointInternal( bool intr, value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, value_t& dev0, const XType& X, const YType& y, const GType& g, const QType& q, const XBType& xb, const XSType& xs, const VPType& vp, const CLType& cl, const JUType& ju, const IntParamType& int_param) : base_t(intr, thr, maxit, nx, nlp, ia, X.rows(), X.cols(), dev0, y, g, q, vp, cl, ju, int_param) , X_(X.rows(), X.cols(), X.nonZeros(), X.outerIndexPtr(), X.innerIndexPtr(), X.valuePtr(), X.innerNonZeroPtr()) , xb_(xb.data(), xb.size()) , xs_(xs.data(), xs.size()) , t_(X.rows()) , xm_(X.cols()) , qy_(X.rows()) { t_ = this->offset(); qy_.array() = this->orig_weight().array() * this->y().array(); this->construct(qy_.sum(), [&](bool offset_all_zero, bool intr) { if (offset_all_zero) { if (intr) { uu_ = this->intercept(); xm_ = this->y_mean() * xb_; } else { xm_.setZero(); uu_ = 0; } } else { if (intr) { uu_ = this->intercept(); this->null_deviance_intr() = qy_.dot(this->offset())-this->y_mean()*(1.0-this->intercept()); } else { uu_ = 0.0; this->null_deviance_intr() = qy_.dot(this->offset()) - this->new_weight_sum(); } base_t::for_each_with_skip(this->all_begin(), this->all_end(), [&](auto k) { xm_(k) = X_.col(k).dot(this->weight()); }, [&](auto k) { return !this->exclusion()[k]; }); } }, [&]() { auto yb = this->y_mean(); tt_ = yb - this->new_weight_sum() * (1.0-uu_); this->resid() = qy_ - this->weight() * (1.0-uu_); }, [&](index_t i) { auto& dvr = this->null_deviance(); if (qy_(i) > 0.0) dvr += qy_(i)*std::log(this->y()(i)); }, [&](index_t j) { return compute_abs_grad(j); }); } template GLMNETPP_STRONG_INLINE void update_beta(index_t k, const PointPackType& pack) { auto gk = compute_grad(k); base_t::update_beta(k, gk, pack.l1_regul(), pack.l2_regul()); } GLMNETPP_STRONG_INLINE void update_intercept() { auto d = base_t::update_intercept( this->intercept(), this->convg_measure(), this->has_intercept(), tt_ - uu_ * this->new_weight_sum(), this->new_weight_sum()); uu_ += d; } GLMNETPP_STRONG_INLINE void update_resid(index_t k, value_t beta_diff) { auto d_scaled = beta_diff / xs_(k); gaussian_naive_t::update_resid( this->resid(), d_scaled, X_.col(k).cwiseProduct(this->weight())); uu_ -= d_scaled * xb_(k); tt_ -= d_scaled * xm_(k); } template GLMNETPP_STRONG_INLINE void setup_wls(const PointPackType&) { auto& v = this->x_var(); const auto& w = this->weight(); auto v0 = this->new_weight_sum(); base_t::setup_wls( [&](auto k) { xm_(k) = X_.col(k).dot(w); v(k) = compute_xv(X_.col(k), w, v0, xb_(k), xm_(k), xs_(k)); }); } template GLMNETPP_STRONG_INLINE state_t update_irls(const PointConfigPack& pack) { auto& w = this->weight(); const auto& q = this->orig_weight(); auto& r = this->resid(); t_ = this->offset(); std::for_each(this->active_begin(), this->active_end(), [&](index_t k) { t_ += (this->beta(k) / xs_(k)) * X_.col(k); }); w.array() = q.array() * ((t_.array()+uu_).abs().min(this->max_link())).matrix().binaryExpr(t_, [&](auto x, auto y) { return std::copysign(x,y+uu_); }).array().exp(); r = qy_ - w*(1.0-uu_); tt_ = r.sum(); return base_t::update_irls(pack.l1_regul(), [&](index_t k) { xm_(k) = X_.col(k).dot(this->weight()); return compute_grad(k); }); } GLMNETPP_STRONG_INLINE value_t deviance() const { return (qy_.dot(t_) + this->y_mean()*uu_ -this->new_weight_sum()-this->null_deviance_intr())/this->null_deviance(); } GLMNETPP_STRONG_INLINE auto prediction() const { return (t_.array() + uu_).matrix(); } private: using typename base_t::vec_t; using typename base_t::sp_mat_t; template GLMNETPP_STRONG_INLINE static value_t compute_xv(const XType& x, const WType& w, value_t v0, value_t xb, value_t xm, value_t xs) { return (x.cwiseProduct(x).dot(w) + (-2.0*xm + v0*xb)*xb ) / (xs*xs); } GLMNETPP_STRONG_INLINE value_t compute_grad(index_t j) const { const auto& r = this->resid(); auto v0 = this->new_weight_sum(); return (X_.col(j).dot(r) - uu_*(xm_(j)-v0*xb_(j)) - xb_(j)*tt_) / xs_(j); } GLMNETPP_STRONG_INLINE value_t compute_abs_grad(index_t j) const { return std::abs(compute_grad(j)); } value_t tt_ = 0.0; value_t uu_ = 0.0; Eigen::Map X_; Eigen::Map xb_; Eigen::Map xs_; vec_t t_; vec_t xm_; vec_t qy_; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/binomial_base.hpp0000644000176200001440000000100214171551160026256 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPointBinomialBase : ElnetPointNonLinearCRTPBase { private: using base_t = ElnetPointNonLinearCRTPBase; protected: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::state_t; using typename base_t::update_t; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/poisson_naive.hpp0000644000176200001440000000167214171551160026363 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPoint< util::glm_type::poisson, util::mode_type::naive, ElnetPointInternalPolicy> : ElnetPointPoissonBase< ElnetPoint< util::glm_type::poisson, util::mode_type::naive, ElnetPointInternalPolicy> > { private: using base_t = ElnetPointPoissonBase< ElnetPoint< util::glm_type::poisson, util::mode_type::naive, ElnetPointInternalPolicy> >; using typename base_t::value_t; using typename base_t::index_t; using typename base_t::state_t; using typename base_t::update_t; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/poisson_base.hpp0000644000176200001440000000100114171551160026155 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPointPoissonBase : ElnetPointNonLinearCRTPBase { private: using base_t = ElnetPointNonLinearCRTPBase; protected: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::state_t; using typename base_t::update_t; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/base.hpp0000644000176200001440000001360414220461661024420 0ustar liggesusers#pragma once #include #include #include #include #include namespace glmnetpp { /* * Common CRTP base class for all point-solvers. * Only routines that __must__ be generated differently should be in here, * i.e. those that are dependent on the derived type. */ template struct ElnetPointCRTPBase: details::traits::internal_t { private: using derived_t = ElnetPointDerived; using internal_t = typename details::traits::internal_t; protected: using update_t = util::update_type; using typename internal_t::value_t; using typename internal_t::index_t; using state_t = util::control_flow; // Generate CRTP self() GLMNETPP_GENERATE_CRTP(derived_t) template GLMNETPP_STRONG_INLINE std::pair fit(const PointConfigPack& pack) { this->increment_passes(); this->coord_desc_reset(); util::if_else( [this, &pack]() { this->for_each_with_skip( this->all_begin(), this->all_end(), [=, &pack](auto k) { this->self().template update(k, pack); }, [=](auto k) { return this->is_excluded(k); } ); }, [this, &pack]() { this->for_each_with_skip( this->active_begin(), this->active_end(), [=, &pack](auto k) { this->self().template update(k, pack); }, [](auto) { return false; } // no skip ); }); this->update_intercept(); if (this->has_converged()) { return util::if_else( [this, &pack]() -> std::pair { return {true, this->check_kkt(pack)}; }, []() -> std::pair { return {true, true}; } ); } if (this->has_reached_max_passes()) { throw util::maxit_reached_error(); } return {false, false}; } template GLMNETPP_STRONG_INLINE state_t update(index_t k, const PointPackType& pack, DiffType&& diff) { diff = this->beta(k); // save old beta_k this->update_beta(k, pack); // update new beta_k (assumes diff doesn't change) if (this->equal(diff, this->beta(k))) return state_t::continue_; // update active set stuff if full util::if_else( [=]() { if (!this->is_active(k)) { this->update_active(k); } }, []() {}); diff = this->beta(k) - diff; // new minus old beta_k this->update_dlx(k, diff); return state_t::noop_; } public: using internal_t::internal_t; }; /* * This is a CRTP base class for all non-linear GLM point-solvers. * All of these use IRLS-WLS implementation, so it is convenient capture the logic in one place. */ template struct ElnetPointNonLinearCRTPBase: ElnetPointCRTPBase { private: using base_t = ElnetPointCRTPBase; protected: using typename base_t::value_t; using typename base_t::index_t; using typename base_t::state_t; using typename base_t::update_t; using base_t::self; public: using base_t::base_t; template void fit(const PointConfigPack& pack) { self().irls(pack); } /* * Default version of coordinate-descent update logic that propagates update of coefficient difference. */ template GLMNETPP_STRONG_INLINE state_t update(index_t k, const PointConfigPack& pack, DiffType&& diff) { state_t state = base_t::template update(k, pack, diff); if (state == state_t::continue_) return state_t::continue_; this->update_resid(k, diff); return state_t::noop_; } /* * Default version of coordinate-descent update logic that should be called from the most derived object. */ template GLMNETPP_STRONG_INLINE void update(index_t k, const PointConfigPack& pack) { value_t diff = 0.0; self().template update(k, pack, diff); } /* * Default version of WLS loop logic. */ template GLMNETPP_STRONG_INLINE void wls(const PointConfigPack& pack) { while (1) { bool converged = false, _ = false; std::tie(converged, _) = base_t::template fit(pack); if (converged) break; // partial fit while (1) { bool converged = false, _ = false; std::tie(converged, _) = base_t::template fit(pack); if (converged) break; } } } /* * Default version of IRLS loop logic. */ template GLMNETPP_STRONG_INLINE void irls(const PointConfigPack& pack) { this->initialize(pack); while (1) { if (this->has_reached_max_passes()) { throw util::maxit_reached_error(); } this->setup_wls(pack); base_t::self().wls(pack); state_t state = this->update_irls(pack); if (state == state_t::break_) break; } } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/sp_binomial_two_class.hpp0000644000176200001440000000136014171551160030053 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct SpElnetPoint< util::glm_type::binomial, util::mode_type::two_class, SpElnetPointInternalPolicy> : ElnetPoint< util::glm_type::binomial, util::mode_type::two_class, SpElnetPointInternalPolicy> { private: using base_t = ElnetPoint< util::glm_type::binomial, util::mode_type::two_class, SpElnetPointInternalPolicy>; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/binomial_multi_class_group.hpp0000644000176200001440000000220414171551160031104 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPoint< util::glm_type::binomial, util::mode_type::multi_class_group, ElnetPointInternalPolicy> : ElnetPointBinomialBase< ElnetPoint< util::glm_type::binomial, util::mode_type::multi_class_group, ElnetPointInternalPolicy> > { private: using base_t = ElnetPointBinomialBase< ElnetPoint::multi_class_group, ElnetPointInternalPolicy> >; using typename base_t::index_t; using typename base_t::update_t; public: using base_t::base_t; template GLMNETPP_STRONG_INLINE void update(index_t k, const PointConfigPack& pack) { auto&& diff = this->beta_buffer(); base_t::template update(k, pack, diff); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/sp_binomial_multi_class.hpp0000644000176200001440000000136614157246224030410 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct SpElnetPoint< util::glm_type::binomial, util::mode_type::multi_class, ElnetPointInternalPolicy> : ElnetPoint< util::glm_type::binomial, util::mode_type::multi_class, ElnetPointInternalPolicy> { private: using base_t = ElnetPoint< util::glm_type::binomial, util::mode_type::multi_class, ElnetPointInternalPolicy>; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/sp_gaussian_wls.hpp0000644000176200001440000000125114171551160026701 0ustar liggesusers#pragma once #include namespace glmnetpp { template struct SpElnetPoint< util::glm_type::gaussian, util::mode_type::wls, ElnetPointInternalPolicy> : ElnetPoint< util::glm_type::gaussian, util::mode_type::wls, ElnetPointInternalPolicy> { private: using base_t = ElnetPoint::wls, ElnetPointInternalPolicy>; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/sp_gaussian_naive.hpp0000644000176200001440000000124214140040573027173 0ustar liggesusers#pragma once #include namespace glmnetpp { template struct SpElnetPoint< util::glm_type::gaussian, util::mode_type::naive, SpElnetPointInternalPolicy> : ElnetPoint< util::glm_type::gaussian, util::mode_type::naive, SpElnetPointInternalPolicy> { private: using base_t = ElnetPoint< util::glm_type::gaussian, util::mode_type::naive, SpElnetPointInternalPolicy>; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/sp_binomial_multi_class_group.hpp0000644000176200001440000000140214171551160031605 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct SpElnetPoint< util::glm_type::binomial, util::mode_type::multi_class_group, ElnetPointInternalPolicy> : ElnetPoint< util::glm_type::binomial, util::mode_type::multi_class_group, ElnetPointInternalPolicy> { private: using base_t = ElnetPoint< util::glm_type::binomial, util::mode_type::multi_class_group, ElnetPointInternalPolicy>; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/decl.hpp0000644000176200001440000000141514171551160024411 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPointCRTPBase; template struct ElnetPointNonLinearCRTPBase; template struct ElnetPointBinomialBase; template struct ElnetPointGaussianBase; template struct ElnetPointPoissonBase; template mode , class ElnetPointInternalPolicy=ElnetPointInternal > struct ElnetPoint; template mode , class SpElnetPointInternalPolicy=SpElnetPointInternal > struct SpElnetPoint; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/gaussian_cov.hpp0000644000176200001440000000470014171551160026163 0ustar liggesusers#pragma once #include #include #include #include #include namespace glmnetpp { template struct ElnetPoint< util::glm_type::gaussian, util::mode_type::cov, ElnetPointInternalPolicy> : ElnetPointGaussianBase< ElnetPoint< util::glm_type::gaussian, util::mode_type::cov, ElnetPointInternalPolicy> > { private: using base_t = ElnetPointGaussianBase< ElnetPoint::cov, ElnetPointInternalPolicy> >; using typename base_t::update_t; using typename base_t::value_t; using typename base_t::index_t; using typename base_t::state_t; public: using base_t::base_t; template GLMNETPP_STRONG_INLINE void partial_fit(const PointPackType& pack) { this->compress_active(); base_t::partial_fit(pack); // update gradient to leave invariant this->update_compressed_active(); this->update_grad_compressed_active(); } template GLMNETPP_STRONG_INLINE void update(index_t k, const PointPackType& pack) { value_t beta_diff = 0; auto state = base_t::template update(k, pack, beta_diff); if (state == state_t::continue_) return; // update gradient util::if_else( [=]() { std::for_each( this->all_begin(), this->all_end(), [=](auto j) { if (!this->is_excluded(j)) { this->update_grad(j, k, beta_diff); } }); }, [=]() { std::for_each( this->active_begin(), this->active_end(), [=](auto j) { this->update_grad(j, k, beta_diff); }); }); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/sp_poisson_naive.hpp0000644000176200001440000000141214157246224027063 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { template struct SpElnetPoint< util::glm_type::poisson, util::mode_type::naive, SpElnetPointInternalPolicy> : ElnetPoint< util::glm_type::poisson, util::mode_type::naive, SpElnetPointInternalPolicy> { private: using base_t = ElnetPoint< util::glm_type::poisson, util::mode_type::naive, SpElnetPointInternalPolicy>; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/traits.hpp0000644000176200001440000000155614140040573025013 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { namespace details { template struct traits; template m , class ElnetPointInternalType> struct traits > { static constexpr util::glm_type glm = g; static constexpr util::mode_type mode = m; using internal_t = ElnetPointInternalType; }; template m , class ElnetPointInternalType> struct traits > { static constexpr util::glm_type glm = g; static constexpr util::mode_type mode = m; using internal_t = ElnetPointInternalType; }; } // namespace details } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path.hpp0000644000176200001440000000203214171551160023301 0ustar liggesusers#pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/0000755000176200001440000000000014471023421022570 5ustar liggesusersglmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/sp_gaussian_cov.hpp0000644000176200001440000001314314157246224026477 0ustar liggesusers#pragma once #include #include namespace glmnetpp { /* * Sparse Gaussian covariance method elastic net path-solver. */ template struct SpElnetPath< util::glm_type::gaussian, util::mode_type::cov, SpElnetPointPolicy> : ElnetPathGaussianBase , ElnetPathCRTPBase< SpElnetPath< util::glm_type::gaussian, util::mode_type::cov, SpElnetPointPolicy> > { private: using base_t = ElnetPathGaussianBase; using crtp_base_t = ElnetPathCRTPBase< SpElnetPath< util::glm_type::gaussian, util::mode_type::cov, SpElnetPointPolicy> >; using elnet_point_t = SpElnetPointPolicy; template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType, JUType, VPType, CLType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; int_t& err_code() const { return sub_pack.err_code(); } int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; GType& g; const WType& w; const XMType& xm; const XSType& xs; }; public: using base_t::process_path_fit; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, GType& g, const WType& w, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XMType& xm, const XSType& xs, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { FitPack< ValueType , JUType , VPType , CLType , GType , WType , IntType , XType , ULamType , XMType , XSType , XVType , AOType , IAType , KinType , RSQOType , ALMOType , SetpbFType , IntParamType> pack{ // build sub-pack { // build sub-pack {beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, almo, nlp, jerr, setpb_f, int_param}, // add new members xv, rsqo }, // add new members g, w, xm, xs }; crtp_base_t::fit(pack); } template auto get_elnet_point(const FitPackType& pack, const PathConfigPackType&) const { auto& sp = pack.sub_pack; auto& ssp = sp.sub_pack; return elnet_point_t( ssp.thr, ssp.maxit, ssp.nx, ssp.nlp, ssp.ia, pack.g, pack.w, ssp.x, pack.xm, pack.xs, sp.xv, ssp.vp, ssp.cl, ssp.ju); } template auto initialize_path(const FitPackType& pack) const { return base_t::initialize_path(pack.sub_pack); } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType&) const { return base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, pack.g); } template state_t process_point_fit( const FitPackType& pack, const PathConfigPackType& path_pack, const PointConfigPackType& point_pack, const ElnetPointType& elnet_point) const { return base_t::process_point_fit(pack.sub_pack, path_pack, point_pack, elnet_point); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/sp_gaussian_multi.hpp0000644000176200001440000001347414171551160027043 0ustar liggesusers#pragma once #include #include namespace glmnetpp { /* * Sparse Gaussian multi-response method elastic net path-solver. */ template struct SpElnetPath< util::glm_type::gaussian, util::mode_type::multi, ElnetPointPolicy> : ElnetPathGaussianMultiBase , ElnetPathCRTPBase< SpElnetPath< util::glm_type::gaussian, util::mode_type::multi, ElnetPointPolicy> > { private: using base_t = ElnetPathGaussianMultiBase; using crtp_base_t = ElnetPathCRTPBase< SpElnetPath::multi, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType, JUType, VPType, CLType, YType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; int_t& err_code() const { return sub_pack.err_code(); } int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; const WType& w; const XMType& xm; const XSType& xs; }; public: using base_t::process_path_fit; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, YType& y, const WType& w, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XMType& xm, const XSType& xs, const XVType& xv, ValueType ys0, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { FitPack< ValueType , JUType , VPType , CLType , YType , WType , IntType , XType , ULamType , XMType , XSType , XVType , AOType , IAType , KinType , RSQOType , ALMOType , SetpbFType , IntParamType> pack { { // build sub-pack { // build sub-pack {beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, almo, nlp, jerr, setpb_f, int_param}, // add new members xv, rsqo }, // add new members y, ys0 }, // add new members w, xm, xs }; crtp_base_t::fit(pack); } template auto get_elnet_point(const FitPackType& pack, const PathConfigPackType&) const { auto& sp = pack.sub_pack; auto& ssp = sp.sub_pack; auto& sssp = ssp.sub_pack; return elnet_point_t( sssp.thr, sssp.maxit, sssp.nx, sssp.nlp, sssp.ia, sp.ys0, sp.y, sssp.x, pack.w, pack.xm, pack.xs, ssp.xv, sssp.vp, sssp.cl, sssp.ju, sssp.int_param); } template auto initialize_path(const FitPackType& pack) const { return base_t::initialize_path(pack.sub_pack); } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType& elnet_point) const { return base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, elnet_point); } template state_t process_point_fit( const FitPackType& pack, const PathConfigPackType& path_pack, PointConfigPackType&& point_pack, const ElnetPointType& elnet_point) const { return base_t::process_point_fit(pack.sub_pack, path_pack, point_pack, elnet_point); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/binomial_multi_class.hpp0000644000176200001440000000760114157246224027507 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPath< util::glm_type::binomial, util::mode_type::multi_class, ElnetPointPolicy> : ElnetPathBinomialMultiClassBase , ElnetPathCRTPBase< ElnetPath< util::glm_type::binomial, util::mode_type::multi_class, ElnetPointPolicy> > { private: using base_t = ElnetPathBinomialMultiClassBase; using crtp_base_t = ElnetPathCRTPBase< ElnetPath< util::glm_type::binomial, util::mode_type::multi_class, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; public: using base_t::initialize_path; using base_t::process_point_fit; using base_t::process_path_fit; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, IntType ne, IntType nx, const XType& x, const YType& y, GType& g, const WType& w, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, bool isd, bool intr, IntType maxit, IntType kopt, IntType& lmu, A0Type& a0, AOType& ao, IAType& ia, KinType& kin, ValueType& dev0, DevType& dev, ALMType& alm, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { FitPack< ValueType , JUType , VPType , CLType , IntType , XType , YType , GType , WType , ULamType , A0Type , AOType , IAType , KinType , DevType , ALMType , SetpbFType , IntParamType> pack{ // build sub-pack {beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, alm, nlp, jerr, setpb_f, int_param}, // add new members y, g, w, isd, intr, kopt, a0, dev0, dev }; crtp_base_t::fit(pack); } template elnet_point_t get_elnet_point( const FitPackType& pack, PathConfigPackType&& path_pack) const { auto& sp = pack.sub_pack; return elnet_point_t( pack.isd, pack.intr, pack.kopt, sp.thr, sp.maxit, sp.nx, sp.nlp, sp.ia, pack.g, pack.dev0, sp.x, pack.y, pack.w, sp.vp, sp.cl, sp.ju, path_pack.is, sp.int_param); } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType& elnet_point) const { return base_t::initialize_point(m, lmda_curr, pack, path_pack, elnet_point.abs_grad()); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/binomial_two_class.hpp0000644000176200001440000000667114157246224027174 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { template struct ElnetPath< util::glm_type::binomial, util::mode_type::two_class, ElnetPointPolicy> : ElnetPathBinomialTwoClassBase , ElnetPathCRTPBase< ElnetPath< util::glm_type::binomial, util::mode_type::two_class, ElnetPointPolicy> > { private: using base_t = ElnetPathBinomialTwoClassBase; using crtp_base_t = ElnetPathCRTPBase< ElnetPath::two_class, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; public: using base_t::initialize_path; using base_t::initialize_point; using base_t::process_point_fit; using base_t::process_path_fit; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, IntType ne, IntType nx, const XType& x, const YType& y, GType& g, const WType& w, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, bool isd, bool intr, IntType maxit, IntType kopt, IntType& lmu, A0Type& a0, AOType& ao, IAType& ia, KinType& kin, ValueType& dev0, DevType& dev, ALMType& alm, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { FitPack< ValueType , JUType , VPType , CLType , IntType , XType , YType , GType , WType , ULamType , A0Type , AOType , IAType , KinType , DevType , ALMType , SetpbFType , IntParamType> pack{ // build sub-pack {beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, alm, nlp, jerr, setpb_f, int_param}, // add new members y, g, w, isd, intr, kopt, a0, dev0, dev }; crtp_base_t::fit(pack); } template elnet_point_t get_elnet_point( const FitPackType& pack, const PathConfigPackType&) const { auto& sp = pack.sub_pack; return elnet_point_t( pack.isd, pack.intr, pack.kopt, sp.thr, sp.maxit, sp.nx, sp.nlp, sp.ia, pack.g, pack.dev0, sp.x, pack.y, pack.w, sp.vp, sp.cl, sp.ju, sp.int_param); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/gaussian_base.hpp0000644000176200001440000002314014171551160026110 0ustar liggesusers#pragma once #include #include #include #include #include #include namespace glmnetpp { /* * Common routines across all Gaussian path-solvers. */ struct ElnetPathGaussianBase : ElnetPathBase { private: using base_t = ElnetPathBase; protected: using typename base_t::state_t; using base_t::process_point_fit; /* * Common FitPack base class for all Gaussian path-solvers. */ template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType , JUType , VPType , CLType , IntType , XType , ULamType , AOType , IAType , KinType , ALMOType , SetpbFType , IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; GLMNETPP_STRONG_INLINE int_t& err_code() const { return sub_pack.err_code(); } GLMNETPP_STRONG_INLINE int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; const XVType& xv; RSQOType& rsqo; }; /* * Delegate to base class method with the base pack. */ template GLMNETPP_STRONG_INLINE auto initialize_path(const FitPackType& pack) const { return base_t::initialize_path(pack.sub_pack); } template GLMNETPP_STRONG_INLINE auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const GType& g) const { return base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, g); } /* * Common routine for all Gaussian path-solvers after point-solver fit. * See fit() in base.hpp for usage. * * @param pack object of FitPack of current class. * @param path_pack object of PathConfigPack of current class. * @param point_pack object of PointConfigPack of current class. * @param elnet_point point-solver object. */ template state_t process_point_fit( const FitPackType& pack, const PathConfigPackType& path_pack, const PointConfigPackType& point_pack, const ElnetPointType& elnet_point) const { using int_t = typename std::decay_t::int_t; using value_t = typename std::decay_t::value_t; auto& sp = pack.sub_pack; auto& ao = sp.ao; auto& rsqo = pack.rsqo; auto m = point_pack.m; auto n_active = elnet_point.n_active(); auto rsq = elnet_point.rsq(); auto rsq0 = elnet_point.rsq_prev(); base_t::store_beta_compressed( elnet_point.active_begin(), elnet_point.active_end(), ao.col(m), [&](int_t k) { return elnet_point.beta(k); } ); rsqo(m) = rsq; int_t me = (ao.col(m).head(n_active).array() != 0).count(); auto prop_dev_change = (rsq == 0) ? std::numeric_limits::infinity() : (rsq - rsq0) / rsq; state_t state = base_t::process_point_fit( m, n_active, me, prop_dev_change, rsq, sp, path_pack, point_pack ); if (state == state_t::continue_ || state == state_t::break_) return state; return state_t::noop_; } /* * Common finishing routine for all Gaussian path-solvers after (path) fit. * See fit() in base.hpp for usage. */ template GLMNETPP_STRONG_INLINE constexpr void process_path_fit(const FitPackType&, const ElnetPointType&) const {} }; /* * Common routines across all Gaussian multi-response path-solvers. */ struct ElnetPathGaussianMultiBase : ElnetPathGaussianBase { private: using base_t = ElnetPathGaussianBase; template struct PointConfigPack { using sub_pack_t = typename base_t::template PointConfigPack; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; using mat_t = Eigen::Matrix; GLMNETPP_STRONG_INLINE auto elastic_prop() const { return sub_pack.elastic_prop(); } GLMNETPP_STRONG_INLINE auto lmda() const { return sub_pack.lmda(); } GLMNETPP_STRONG_INLINE auto prev_lmda() const { return sub_pack.prev_lmda(); } GLMNETPP_STRONG_INLINE auto l1_regul() const { return sub_pack.l1_regul(); } GLMNETPP_STRONG_INLINE auto l2_regul() const { return sub_pack.l2_regul(); } sub_pack_t sub_pack; Eigen::Map a_slice; }; protected: template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType, JUType, VPType, CLType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; int_t& err_code() const { return sub_pack.err_code(); } int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; YType& y; ValueType ys0; }; template auto initialize_path(const FitPackType& pack) const { return base_t::initialize_path(pack.sub_pack); } template GLMNETPP_STRONG_INLINE auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType& elnet_point) const { using pack_t = typename std::decay::type; using value_t = typename pack_t::value_t; using int_t = typename pack_t::int_t; using mat_t = Eigen::Matrix; auto nx = pack.sub_pack.sub_pack.nx; auto nr = pack.y.cols(); auto& a = pack.sub_pack.sub_pack.ao; // set the new slice for coefficient storage Eigen::Map a_slice( a.data() + nx * nr * m, nx, nr); auto&& sp = base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, elnet_point.abs_grad()); using point_config_pack_t = PointConfigPack; return point_config_pack_t{{std::move(sp)}, a_slice}; } template state_t process_point_fit( const FitPackType& pack, const PathConfigPackType& path_pack, PointConfigPackType&& point_pack, const ElnetPointType& elnet_point) const { using fit_pack_t = std::decay_t; using value_t = typename fit_pack_t::value_t; using int_t = typename fit_pack_t::int_t; auto& point_sp = point_pack.sub_pack; auto nr = pack.y.cols(); auto rsq = elnet_point.rsq(); auto rsq0 = elnet_point.rsq_prev(); auto n_active = elnet_point.n_active(); auto ys0 = pack.ys0; auto& ao_slice = point_pack.a_slice; int_t m = point_pack.sub_pack.m; auto& rsqo = pack.sub_pack.rsqo; for (int j = 0; j < nr; ++j) { auto ao_slice_j = ao_slice.col(j); base_t::store_beta_compressed( elnet_point.active_begin(), elnet_point.active_end(), ao_slice_j, [&](auto k) { return elnet_point.beta(j, k); } ); } rsqo(m) = 1.0-rsq/ys0; int_t me = (ao_slice.col(0).head(n_active).array() != 0).count(); auto prop_dev_change = (rsq == 0) ? std::numeric_limits::infinity() : (rsq0 - rsq) / rsq; state_t state = base_t::process_point_fit( m, n_active, me, prop_dev_change, rsqo(m), pack.sub_pack.sub_pack, path_pack, point_sp ); if (state == state_t::continue_ || state == state_t::break_) return state; return state_t::noop_; } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/gaussian_naive.hpp0000644000176200001440000001231514157246224026310 0ustar liggesusers#pragma once #include #include namespace glmnetpp { /* * Gaussian naive method elastic net path-solver. */ template struct ElnetPath< util::glm_type::gaussian, util::mode_type::naive, ElnetPointPolicy> : ElnetPathGaussianBase , ElnetPathCRTPBase< ElnetPath< util::glm_type::gaussian, util::mode_type::naive, ElnetPointPolicy> > { private: using base_t = ElnetPathGaussianBase; using crtp_base_t = ElnetPathCRTPBase< ElnetPath::naive, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType, JUType, VPType, CLType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; int_t& err_code() const { return sub_pack.err_code(); } int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; YType& y; }; public: using base_t::process_path_fit; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, YType& y, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { FitPack< ValueType , JUType , VPType , CLType , YType , IntType , XType , ULamType , XVType , AOType , IAType , KinType , RSQOType , ALMOType , SetpbFType , IntParamType> pack { // build sub-pack { // build sub-pack {beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, almo, nlp, jerr, setpb_f, int_param}, // add new members xv, rsqo }, // add new members y }; crtp_base_t::fit(pack); } template auto get_elnet_point(const FitPackType& pack, const PathConfigPackType&) const { auto& sp = pack.sub_pack; auto& ssp = sp.sub_pack; return elnet_point_t( ssp.thr, ssp.maxit, ssp.nx, ssp.nlp, ssp.ia, pack.y, ssp.x, sp.xv, ssp.vp, ssp.cl, ssp.ju); } template auto initialize_path(const FitPackType& pack) const { return base_t::initialize_path(pack.sub_pack); } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType& elnet_point) const { return base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, elnet_point.abs_grad()); } template state_t process_point_fit( const FitPackType& pack, const PathConfigPackType& path_pack, const PointConfigPackType& point_pack, const ElnetPointType& elnet_point) const { return base_t::process_point_fit(pack.sub_pack, path_pack, point_pack, elnet_point); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/gaussian_multi.hpp0000644000176200001440000000644514171551160026341 0ustar liggesusers#pragma once #include #include namespace glmnetpp { /* * Gaussian multi-response method elastic net path-solver. */ template struct ElnetPath< util::glm_type::gaussian, util::mode_type::multi, ElnetPointPolicy> : ElnetPathGaussianMultiBase , ElnetPathCRTPBase< ElnetPath< util::glm_type::gaussian, util::mode_type::multi, ElnetPointPolicy> > { private: using base_t = ElnetPathGaussianMultiBase; using crtp_base_t = ElnetPathCRTPBase< ElnetPath::multi, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; public: using base_t::initialize_path; using base_t::initialize_point; using base_t::process_path_fit; using base_t::process_point_fit; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, YType& y, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XVType& xv, ValueType ys0, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { base_t::FitPack< ValueType , JUType , VPType , CLType , YType , IntType , XType , ULamType , XVType , AOType , IAType , KinType , RSQOType , ALMOType , SetpbFType , IntParamType> pack { // build sub-pack { // build sub-pack {beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, almo, nlp, jerr, setpb_f, int_param}, // add new members xv, rsqo }, // add new members y, ys0 }; crtp_base_t::fit(pack); } template auto get_elnet_point(const FitPackType& pack, const PathConfigPackType&) const { auto& sp = pack.sub_pack; auto& ssp = sp.sub_pack; return elnet_point_t( ssp.thr, ssp.maxit, ssp.nx, ssp.nlp, ssp.ia, pack.ys0, pack.y, ssp.x, sp.xv, ssp.vp, ssp.cl, ssp.ju, ssp.int_param); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/binomial_base.hpp0000644000176200001440000004073214171551160026076 0ustar liggesusers#pragma once #include #include #include #include #include #include #include #include namespace glmnetpp { /* * Common base class across all binomial path-solvers. */ struct ElnetPathBinomialBase : ElnetPathBase { private: using base_t = ElnetPathBase; protected: using state_t = util::control_flow; template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType , JUType , VPType , CLType , IntType , XType , ULamType , AOType , IAType , KinType , ALMType , SetpbFType , IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; int_t& err_code() const { return sub_pack.err_code(); } int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; const YType& y; GType& g; const WType& w; bool isd; bool intr; int_t kopt; A0Type& a0; value_t& dev0; DevType& dev; }; template auto initialize_path(const FitPackType& pack) const { return base_t::initialize_path(pack.sub_pack); } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const GType& g) const { return base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, g); } }; /* * Common base class for all Binomial Two-Class path-solvers. */ struct ElnetPathBinomialTwoClassBase : ElnetPathBinomialBase { private: using base_t = ElnetPathBinomialBase; using base_t::initialize_point; template GLMNETPP_STRONG_INLINE static auto dev2( const WType& w, const YType& y, const PType& p, ValueType pmin ) { auto pmax = 1.0-pmin; auto s = 0.0; for (int i = 0; i < w.size(); ++i) { auto pi = std::min(std::max(pmin, p(i)), pmax); s -= w(i) * (y(i) * std::log(pi) + (1.0-y(i)) * std::log(1.0-pi)); } return s; } protected: template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType& elnet_point) const { return base_t::initialize_point(m, lmda_curr, pack, path_pack, elnet_point.abs_grad()); } template state_t process_point_fit( const FitPackType& pack, const PathConfigPackType& path_pack, const PointConfigPackType& point_pack, const ElnetPointType& elnet_point) const { using int_t = typename std::decay_t::int_t; auto& sp = pack.sub_pack; auto& a = sp.ao; const auto& int_param = sp.int_param; const auto& w = pack.w; const auto& y = pack.y; auto& a0 = pack.a0; auto& dev = pack.dev; int_t m = point_pack.m; int_t nin = elnet_point.n_active(); const auto& q = elnet_point.q(); auto dev1 = elnet_point.deviance(); auto dev0 = elnet_point.null_deviance(); base_t::store_beta_compressed( elnet_point.active_begin(), elnet_point.active_end(), a.col(m), [&](int_t k) { return elnet_point.beta(k); } ); a0(m) = elnet_point.intercept(); auto devi = dev2(w,y,q,int_param.pmin); dev(m) = (dev1-devi)/dev0; int_t me = (a.col(m).head(nin).array() != 0.0).array().count(); auto prev_dev = (m == 0) ? 0 : dev(m-1); state_t state = base_t::process_point_fit( m, nin, me, dev(m)-prev_dev, dev(m), sp, path_pack, point_pack); if (state == state_t::continue_ || state == state_t::break_) return state; if (elnet_point.is_total_var_too_small()) return state_t::break_; return state_t::noop_; } template void process_path_fit( const FitPackType& pack, const ElnetPointType& elnet_point) const { auto& g = pack.g; const auto& q = elnet_point.q(); g.array() = (q.array()/(1.0-q.array())).log(); } }; /* * Common base class for all Binomial Multi-Class path-solvers. * TODO: refactor more after integrating sparse multi-class. */ struct ElnetPathBinomialMultiClassBase : ElnetPathBinomialBase { private: template static auto nintot( const AType& a, const MType& m, IntType nin, ISType&& is ) { auto nc = a.cols(); is.setZero(); int out = 0; for (int ic = 0; ic < nc; ++ic) { for (int j = 0; j < nin; ++j) { auto k = m(j)-1; if (is(k)) continue; if (!a(j,ic)) continue; is(k) = k+1; ++out; } } return out; } protected: using base_t = ElnetPathBinomialBase; template struct PathConfigPack { using sub_pack_t = typename base_t::template PathConfigPack; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; using ivec_t = Eigen::Matrix; sub_pack_t sub_pack; ivec_t is; // common buffer for both point and path fitting }; template struct PointConfigPack { using sub_pack_t = typename base_t::template PointConfigPack; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; using mat_t = Eigen::Matrix; auto elastic_prop() const { return sub_pack.elastic_prop(); } auto lmda() const { return sub_pack.lmda(); } auto prev_lmda() const { return sub_pack.prev_lmda(); } auto l1_regul() const { return sub_pack.l1_regul(); } auto l2_regul() const { return sub_pack.l2_regul(); } sub_pack_t sub_pack; Eigen::Map a_slice; }; template auto initialize_path(const FitPackType& pack) const { using value_t = typename std::decay_t::value_t; using int_t = typename std::decay_t::int_t; using ivec_t = Eigen::Matrix; auto&& sp = base_t::initialize_path(pack); const auto& x = pack.sub_pack.x; const auto& y = pack.y; ivec_t is(std::max(x.cols(), y.cols())); using pack_config_t = PathConfigPack; return pack_config_t{{std::move(sp)}, std::move(is)}; } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const GType& g) const { using pack_t = typename std::decay::type; using value_t = typename pack_t::value_t; using int_t = typename pack_t::int_t; using mat_t = Eigen::Matrix; auto nx = pack.sub_pack.nx; auto& a = pack.sub_pack.ao; auto nc = pack.y.cols(); // set the new slice for coefficient storage Eigen::Map a_slice( a.data() + nx * nc * m, nx, nc); auto&& sp = base_t::initialize_point(m, lmda_curr, pack, path_pack.sub_pack, g); using point_config_pack_t = PointConfigPack; return point_config_pack_t{{std::move(sp)}, a_slice}; } /* * Helper function that generalizes point fit processing * depending on how we compute the total number of nonzero entries in the active set. */ template state_t process_point_fit( const FitPackType& pack, PathConfigPackType&& path_pack, PointConfigPackType&& point_pack, const ElnetPointType& elnet_point, ActiveNznFType active_nzn_f) const { using int_t = typename std::decay_t::int_t; auto& sp = pack.sub_pack; auto no = sp.x.rows(); const auto& y = pack.y; const auto& w = pack.w; auto& a0 = pack.a0; auto& dev = pack.dev; int_t m = point_pack.sub_pack.m; auto& a_slice = point_pack.a_slice; auto nc = elnet_point.n_classes(); int_t nin = elnet_point.n_active(); const auto& q = elnet_point.q(); const auto& sxp = elnet_point.sxp(); auto dev1 = elnet_point.deviance(); auto dev0 = elnet_point.null_deviance(); auto devi = 0.0; for (int_t ic = 0; ic < nc; ++ic) { base_t::store_beta_compressed( elnet_point.active_begin(), elnet_point.active_end(), a_slice.col(ic), [&](int_t k) { return elnet_point.beta(k, ic); } ); a0(ic, m) = elnet_point.intercept(ic); for (int_t i = 0; i < no; ++i) { if (y(i,ic) <= 0) continue; devi -= w(i) * y(i,ic) * std::log(q(i,ic) / sxp(i)); } } int_t me = active_nzn_f(); dev(m) = (dev1-devi)/dev0; auto prev_dev = (m <= 0) ? 0 : dev(m-1); state_t state = base_t::process_point_fit( m, nin, me, dev(m) - prev_dev, dev(m), sp, path_pack.sub_pack, point_pack.sub_pack ); if (state == state_t::continue_ || state == state_t::break_) return state; if (elnet_point.has_skipped_all_classes()) return state_t::break_; return state_t::noop_; } template state_t process_point_fit( const FitPackType& pack, PathConfigPackType&& path_pack, PointConfigPackType&& point_pack, const ElnetPointType& elnet_point) const { auto& a_slice = point_pack.a_slice; const auto& ia = pack.sub_pack.ia; auto nin = elnet_point.n_active(); auto& is = path_pack.is; return process_point_fit(pack, path_pack, point_pack, elnet_point, [&]() { return nintot(a_slice, ia, nin, is); }); } template void process_path_fit( const FitPackType& pack, const ElnetPointType& elnet_point) const { auto& g = pack.g; auto& q = elnet_point.q(); auto nc = pack.y.cols(); g.array() = q.array().log(); for (int i = 0; i < g.rows(); ++i) { g.row(i).array() -= g.row(i).sum()/nc; } } }; /* * Common base class for all Binomial Multi-Class with group penalty path-solvers. */ struct ElnetPathBinomialMultiClassGroupBase : ElnetPathBinomialMultiClassBase { private: using base_t = ElnetPathBinomialMultiClassBase; protected: /* * Looks stupid, but this makes the logic consistent, * and therefore, simplifies the other member functions. */ template struct PathConfigPack { using sub_pack_t = typename base_t::base_t::template PathConfigPack; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; using ivec_t = Eigen::Matrix; sub_pack_t sub_pack; }; template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType , JUType , VPType , CLType , IntType , XType , YType , GType , WType , ULamType , A0Type , AOType , IAType , KinType , DevType , ALMType , SetpbFType , IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; int_t& err_code() const { return sub_pack.err_code(); } int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; const XVType& xv; }; template auto initialize_path(const FitPackType& pack) const { // Note: we want to by-pass the base class's override of PathPack // and go directly to its base class's version. auto&& sp = base_t::base_t::initialize_path(pack.sub_pack); using sp_t = std::decay_t; using value_t = typename sp_t::value_t; using int_t = typename sp_t::int_t; using path_config_t = PathConfigPack; return path_config_t{sp}; } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const GType& g) const { return base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, g); } template state_t process_point_fit( const FitPackType& pack, PathConfigPackType&& path_pack, PointConfigPackType&& point_pack, const ElnetPointType& elnet_point) const { auto& a_slice = point_pack.a_slice; auto nin = elnet_point.n_active(); return base_t::process_point_fit(pack.sub_pack, path_pack, point_pack, elnet_point, [&]() { return (a_slice.col(0).head(nin).array() != 0.0).count(); }); } template void process_path_fit( const FitPackType& pack, const ElnetPointType& elnet_point) const { base_t::process_path_fit(pack.sub_pack, elnet_point); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/poisson_naive.hpp0000644000176200001440000000651314157246224026173 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { template struct ElnetPath< util::glm_type::poisson, util::mode_type::naive, ElnetPointPolicy> : ElnetPathPoissonBase , ElnetPathCRTPBase< ElnetPath< util::glm_type::poisson, util::mode_type::naive, ElnetPointPolicy> > { private: using base_t = ElnetPathPoissonBase; using crtp_base_t = ElnetPathCRTPBase< ElnetPath::naive, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; public: using base_t::initialize_path; using base_t::initialize_point; using base_t::process_path_fit; using base_t::process_point_fit; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, IntType ne, IntType nx, const XType& x, const YType& y, GType& g, const QType& q, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, bool intr, IntType maxit, IntType& lmu, A0Type& a0, AOType& ao, IAType& ia, KinType& kin, ValueType& dev0, DevType& dev, ALMType& alm, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { FitPack< ValueType , JUType , VPType , CLType , IntType , XType , YType , GType , QType , ULamType , A0Type , AOType , IAType , KinType , DevType , ALMType , SetpbFType , IntParamType> pack{ // build sub-pack {beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, alm, nlp, jerr, setpb_f, int_param}, // add new members y, g, q, intr, a0, dev0, dev }; crtp_base_t::fit(pack); } template elnet_point_t get_elnet_point( const FitPackType& pack, const PathConfigPackType&) const { auto& sp = pack.sub_pack; return elnet_point_t( pack.intr, sp.thr, sp.maxit, sp.nx, sp.nlp, sp.ia, pack.dev0, sp.x, pack.y, pack.g, pack.q, sp.vp, sp.cl, sp.ju, sp.int_param); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/poisson_base.hpp0000644000176200001440000001006414171551160025771 0ustar liggesusers#pragma once #include #include #include #include #include #include #include #include namespace glmnetpp { /* * Common base class across all binomial path-solvers. */ struct ElnetPathPoissonBase : ElnetPathBase { private: using base_t = ElnetPathBase; protected: using state_t = util::control_flow; template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType , JUType , VPType , CLType , IntType , XType , ULamType , AOType , IAType , KinType , ALMType , SetpbFType , IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; int_t& err_code() const { return sub_pack.err_code(); } int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; const YType& y; GType& g; const QType& q; bool intr; A0Type& a0; value_t& dev0; DevType& dev; }; template auto initialize_path(const FitPackType& pack) const { auto out = base_t::initialize_path(pack.sub_pack); out.sml *= 10; return out; } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType& elnet_point) const { return base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, elnet_point.abs_grad()); } template state_t process_point_fit( const FitPackType& pack, const PathConfigPackType& path_pack, const PointConfigPackType& point_pack, const ElnetPointType& elnet_point) const { using int_t = typename std::decay_t::int_t; auto& sp = pack.sub_pack; auto& a = sp.ao; auto& a0 = pack.a0; auto& dev = pack.dev; auto mnl = path_pack.mnl; int_t m = point_pack.m; int_t nin = elnet_point.n_active(); base_t::store_beta_compressed( elnet_point.active_begin(), elnet_point.active_end(), a.col(m), [&](int_t k) { return elnet_point.beta(k); } ); a0(m) = elnet_point.intercept(); dev(m) = elnet_point.deviance(); int_t me = (a.col(m).head(nin).array() != 0.0).count(); auto prev_dev = (m+1 >= mnl) ? dev(m-mnl+1) : 0; // otherwise, set to dummy value state_t state = base_t::process_point_fit( m, nin, me, (dev(m)-prev_dev)/dev(m), dev(m), sp, path_pack, point_pack); if (state == state_t::continue_ || state == state_t::break_) return state; return state_t::noop_; } template void process_path_fit( const FitPackType& pack, const ElnetPointType& elnet_point) const { pack.g = elnet_point.prediction(); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/base.hpp0000644000176200001440000002360214171551160024221 0ustar liggesusers#pragma once #include #include #include #include #include #include namespace glmnetpp { /* * Common CRTP base class for all path-solvers. * All routines here should be those that depend on the derived class type (ElnetPathDerived), * i.e. those that have different implementation definitions based on the derived type. */ template struct ElnetPathCRTPBase { private: using derived_t = ElnetPathDerived; using state_t = util::control_flow; // generates self() GLMNETPP_GENERATE_CRTP(derived_t) public: /* * Main driver for fitting a path-wise solution to elastic net. * The purpose of this function is to abstract the control-flow of the fit function. * * @param pack the derived class FitPack. */ template void fit(const FitPackType& pack) const { using pack_t = std::decay_t; using value_t = typename pack_t::value_t; using int_t = typename pack_t::int_t; auto& jerr = pack.err_code(); try { auto&& path_config_pack = self().initialize_path(pack); auto&& elnet_point = self().get_elnet_point(pack, path_config_pack); value_t lmda_curr = 0; // this makes the math work out in the point solver for (int_t m = 0; m < pack.path_size(); ++m) { auto&& point_config_pack = self().initialize_point(m, lmda_curr, pack, path_config_pack, elnet_point); try { elnet_point.fit(point_config_pack); } catch (const util::maxit_reached_error& e) { jerr = e.err_code(m); return; } catch (const util::bnorm_maxit_reached_error& e) { jerr = e.err_code(m); return; } catch (const util::elnet_error& e) { jerr = e.err_code(m); break; } state_t state = self().process_point_fit(pack, path_config_pack, point_config_pack, elnet_point); if (state == state_t::continue_) continue; if (state == state_t::break_) break; } self().process_path_fit(pack, elnet_point); } catch (const util::elnet_error& e) { jerr = e.err_code(0); } } }; /* * Common base class for all path-solvers. * Note that these routines should NOT be inside the CRTP class, * as this will lead to massive code bloat. */ struct ElnetPathBase { protected: using state_t = util::control_flow; /* * Common PathConfigPack base class across every type of path-solver. */ template struct PathConfigPack { using value_t = ValueType; using int_t = IntType; value_t omb; value_t alm; value_t alf; int_t ni; int_t mnl; value_t sml; value_t rsqmax; }; /* * Common PointConfigPack base class across every type of path-solver. */ template struct PointConfigPack { using value_t = ValueType; using int_t = IntType; value_t elastic_prop() const { return beta; } value_t lmda() const { return alm; } value_t prev_lmda() const { return alm0; } value_t l1_regul() const { return ab; } value_t l2_regul() const { return dem; } int_t m; value_t ab; value_t dem; value_t alm0; value_t alm; value_t beta; }; /* * Common FitPack base class across every type of path-solver. */ template struct FitPack { using value_t = ValueType; using int_t = IntType; int_t& err_code() const { return jerr; } int_t path_size() const { return nlam; } value_t beta; const JUType& ju; const VPType& vp; const CLType& cl; int_t ne; int_t nx; const XType& x; int_t nlam; value_t flmin; const ULamType& ulam; value_t thr; int_t maxit; int_t& lmu; AOType& ao; IAType& ia; KinType& kin; ALMOType& almo; int_t& nlp; int_t& jerr; SetpbFType setpb_f; IntParamType int_param; }; /* * Initializes common global configuration for a path-solver * across every type of path-solver. * All outputted quantities should be those that both * a path-solver and a point-solver would not modify. */ template auto initialize_path(const FitPackType& pack) const { using pack_t = typename std::decay::type; using value_t = typename pack_t::value_t; using int_t = typename pack_t::int_t; const auto& x = pack.x; auto beta = pack.beta; auto eps = pack.int_param.eps; auto mnlam = pack.int_param.mnlam; auto sml = pack.int_param.sml; auto rsqmax = pack.int_param.rsqmax; int_t ni = x.cols(); value_t omb = 1.0 - beta; value_t alm = 0.0; value_t alf = 1.0; if (pack.flmin < 1.0) { auto eqs = std::max(eps, pack.flmin); alf = std::pow(eqs, 1.0 / (pack.nlam - 1.)); } pack.nlp = 0; auto mnl = std::min(mnlam, pack.nlam); using pack_config_t = PathConfigPack; return pack_config_t{omb, alm, alf, ni, mnl, sml, rsqmax}; } /* * Initializes common global configuration for a point-solver * across every type of point-solver. * All outputted quantities should be those that a point-solver would not modify. * * @param m iteration number. * @param lmda_curr current lambda value (will be updated to next lmda after the call). * @param pack object of type FitPack of current class. * @param path_pack object of type PathConfigPack of current class. * @param g (absolute or non-absolute) gradient vector. */ template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const GType& g) const { using pack_t = typename std::decay::type; using value_t = typename pack_t::value_t; using int_t = typename pack_t::int_t; auto flmin = pack.flmin; auto beta = pack.beta; auto ni = path_pack.ni; const auto& ulam = pack.ulam; const auto& ju = pack.ju; const auto& vp = pack.vp; auto alf = path_pack.alf; auto omb = path_pack.omb; auto& setpb_f = pack.setpb_f; auto big = pack.int_param.big; auto itrace = pack.int_param.itrace; if (itrace) setpb_f(m); auto alm0 = lmda_curr; auto alm = alm0; if (flmin >= 1.0) { alm = ulam(m); } else if (m > 1) { alm *= alf; } else if (m == 0) { alm = big; } else { alm0 = 0.0; for (int_t j = 0; j < ni; ++j) { if (ju[j] == 0 || (vp(j) <= 0.0)) continue; alm0 = std::max(alm0, std::abs(g(j)) / vp(j)); } alm0 /= std::max(beta, 1e-3); alm = alm0 * alf; } lmda_curr = alm; auto dem = alm * omb; auto ab = alm * beta; using point_config_pack_t = PointConfigPack; return point_config_pack_t{m, ab, dem, alm0, alm, beta}; } template state_t process_point_fit( IndexType m, IndexType n_active, IndexType me, ValueType prop_dev_change, ValueType curr_dev, const FitPackType& pack, const PathConfigPackType& path_pack, const PointConfigPackType& point_pack) const { auto& kin = pack.kin; auto& almo = pack.almo; auto& lmu = pack.lmu; auto flmin = pack.flmin; auto ne = pack.ne; auto mnl = path_pack.mnl; auto rsqmax = path_pack.rsqmax; auto sml = path_pack.sml; auto alm = point_pack.alm; kin(m) = n_active; almo(m) = alm; lmu = m + 1; if (lmu < mnl || flmin >= 1.0) return state_t::continue_; if ((me > ne) || (prop_dev_change < sml) || (curr_dev > rsqmax)) return state_t::break_; return state_t::noop_; } /* Helper static routines for all derived classes */ /* * Stores compressed_beta(j) = beta(k_j) where k_j = *begin at the jth iteration. */ template GLMNETPP_STRONG_INLINE static void store_beta_compressed( Iter begin, Iter end, CompBetaType&& compressed_beta, const BetaType& beta) { size_t j = 0; std::for_each(begin, end, [&](auto k) { compressed_beta(j++) = beta(k); }); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/sp_binomial_two_class.hpp0000644000176200001440000001453514157246224027674 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { template struct SpElnetPath< util::glm_type::binomial, util::mode_type::two_class, ElnetPointPolicy> : ElnetPathBinomialTwoClassBase , ElnetPathCRTPBase< SpElnetPath< util::glm_type::binomial, util::mode_type::two_class, ElnetPointPolicy> > { private: using base_t = ElnetPathBinomialTwoClassBase; using crtp_base_t = ElnetPathCRTPBase< SpElnetPath< util::glm_type::binomial, util::mode_type::two_class, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; using typename base_t::state_t; template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType , JUType , VPType , CLType , IntType , XType , YType , GType , WType , ULamType , A0Type , AOType , IAType , KinType , DevType , ALMType , SetpbFType , IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; int_t& err_code() const { return sub_pack.err_code(); } int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; const XBType& xb; const XSType& xs; }; public: template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, IntType ne, IntType nx, const XType& x, const YType& y, GType& g, const WType& w, IntType nlam, ValueType flmin, const ULamType& ulam, const XBType& xb, const XSType& xs, ValueType thr, bool isd, bool intr, IntType maxit, IntType kopt, IntType& lmu, A0Type& a0, AOType& ao, IAType& ia, KinType& kin, ValueType& dev0, DevType& dev, ALMType& alm, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { FitPack< ValueType , JUType , VPType , CLType , IntType , XType , YType , GType , WType , ULamType , XBType , XSType , A0Type , AOType , IAType , KinType , DevType , ALMType , SetpbFType , IntParamType> pack{ // build sub-pack { // build sub-pack { beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, alm, nlp, jerr, setpb_f, int_param }, // add new members y, g, w, isd, intr, kopt, a0, dev0, dev }, xb, xs }; crtp_base_t::fit(pack); } template elnet_point_t get_elnet_point( const FitPackType& pack, const PathConfigPackType&) const { auto& sp = pack.sub_pack; auto& ssp = sp.sub_pack; return elnet_point_t( sp.isd, sp.intr, sp.kopt, ssp.thr, ssp.maxit, ssp.nx, ssp.nlp, ssp.ia, sp.g, sp.dev0, ssp.x, sp.y, sp.w, pack.xb, pack.xs, ssp.vp, ssp.cl, ssp.ju, ssp.int_param); } template auto initialize_path(const FitPackType& pack) const { return base_t::initialize_path(pack.sub_pack); } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType& elnet_point) const { return base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, elnet_point); } template state_t process_point_fit( const FitPackType& pack, const PathConfigPackType& path_pack, const PointConfigPackType& point_pack, const ElnetPointType& elnet_point) const { return base_t::process_point_fit(pack.sub_pack, path_pack, point_pack, elnet_point); } template void process_path_fit( const FitPackType& pack, const ElnetPointType& elnet_point) const { base_t::process_path_fit(pack.sub_pack, elnet_point); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/binomial_multi_class_group.hpp0000644000176200001440000001002214171551160030704 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPath< util::glm_type::binomial, util::mode_type::multi_class_group, ElnetPointPolicy> : ElnetPathBinomialMultiClassGroupBase , ElnetPathCRTPBase< ElnetPath< util::glm_type::binomial, util::mode_type::multi_class_group, ElnetPointPolicy> > { private: using base_t = ElnetPathBinomialMultiClassGroupBase; using crtp_base_t = ElnetPathCRTPBase< ElnetPath< util::glm_type::binomial, util::mode_type::multi_class_group, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; public: using base_t::initialize_path; using base_t::process_point_fit; using base_t::process_path_fit; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, IntType ne, IntType nx, const XType& x, const YType& y, GType& g, const WType& w, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, bool intr, IntType maxit, const XVType& xv, IntType& lmu, A0Type& a0, AOType& ao, IAType& ia, KinType& kin, ValueType& dev0, DevType& dev, ALMType& alm, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { FitPack< ValueType , JUType , VPType , CLType , IntType , XType , YType , GType , WType , ULamType , XVType , A0Type , AOType , IAType , KinType , DevType , ALMType , SetpbFType , IntParamType> pack{ { // build sub-pack {beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, alm, nlp, jerr, setpb_f, int_param}, // add new members y, g, w, true /* not used */, intr, 2 /* not used */, a0, dev0, dev }, xv }; crtp_base_t::fit(pack); } template elnet_point_t get_elnet_point( const FitPackType& pack, const PathConfigPackType& path_pack) const { auto& sp = pack.sub_pack; auto& ssp = sp.sub_pack; return elnet_point_t( sp.intr, ssp.thr, ssp.maxit, ssp.nx, ssp.nlp, ssp.ia, sp.g, sp.dev0, ssp.x, sp.y, sp.w, pack.xv, ssp.vp, ssp.cl, ssp.ju, ssp.int_param); } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType& elnet_point) const { return base_t::initialize_point(m, lmda_curr, pack, path_pack, elnet_point.abs_grad()); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/sp_binomial_multi_class.hpp0000644000176200001440000001457314157246224030217 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { template struct SpElnetPath< util::glm_type::binomial, util::mode_type::multi_class, ElnetPointPolicy> : ElnetPathBinomialMultiClassBase , ElnetPathCRTPBase< SpElnetPath< util::glm_type::binomial, util::mode_type::multi_class, ElnetPointPolicy> > { private: using base_t = ElnetPathBinomialMultiClassBase; using crtp_base_t = ElnetPathCRTPBase< SpElnetPath< util::glm_type::binomial, util::mode_type::multi_class, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; using typename base_t::state_t; template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType , JUType , VPType , CLType , IntType , XType , YType , GType , WType , ULamType , A0Type , AOType , IAType , KinType , DevType , ALMType , SetpbFType , IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; int_t& err_code() const { return sub_pack.err_code(); } int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; const XBType& xb; const XSType& xs; }; public: template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, IntType ne, IntType nx, const XType& x, const YType& y, GType& g, const WType& w, IntType nlam, ValueType flmin, const ULamType& ulam, const XBType& xb, const XSType& xs, ValueType thr, bool isd, bool intr, IntType maxit, IntType kopt, IntType& lmu, A0Type& a0, AOType& ao, IAType& ia, KinType& kin, ValueType& dev0, DevType& dev, ALMType& alm, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { FitPack< ValueType , JUType , VPType , CLType , IntType , XType , YType , GType , WType , ULamType , XBType , XSType , A0Type , AOType , IAType , KinType , DevType , ALMType , SetpbFType , IntParamType> pack{ // build sub-pack { // build sub-pack { beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, alm, nlp, jerr, setpb_f, int_param }, // add new members y, g, w, isd, intr, kopt, a0, dev0, dev }, xb, xs }; crtp_base_t::fit(pack); } template elnet_point_t get_elnet_point( const FitPackType& pack, PathConfigPackType&& path_pack) const { auto& sp = pack.sub_pack; auto& ssp = sp.sub_pack; return elnet_point_t( sp.isd, sp.intr, sp.kopt, ssp.thr, ssp.maxit, ssp.nx, ssp.nlp, ssp.ia, sp.g, sp.dev0, ssp.x, sp.y, sp.w, pack.xb, pack.xs, ssp.vp, ssp.cl, ssp.ju, path_pack.is, ssp.int_param); } template auto initialize_path(const FitPackType& pack) const { return base_t::initialize_path(pack.sub_pack); } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType& elnet_point) const { return base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, elnet_point.abs_grad()); } template state_t process_point_fit( const FitPackType& pack, PathConfigPackType&& path_pack, PointConfigPackType&& point_pack, const ElnetPointType& elnet_point) const { return base_t::process_point_fit(pack.sub_pack, path_pack, point_pack, elnet_point); } template void process_path_fit( const FitPackType& pack, const ElnetPointType& elnet_point) const { base_t::process_path_fit(pack.sub_pack, elnet_point); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/sp_gaussian_naive.hpp0000644000176200001440000001320014157246224027004 0ustar liggesusers#pragma once #include #include namespace glmnetpp { /* * Sparse Gaussian naive method elastic net path-solver. */ template struct SpElnetPath< util::glm_type::gaussian, util::mode_type::naive, SpElnetPointPolicy> : ElnetPathGaussianBase , ElnetPathCRTPBase< SpElnetPath< util::glm_type::gaussian, util::mode_type::naive, SpElnetPointPolicy> > { private: using base_t = ElnetPathGaussianBase; using crtp_base_t = ElnetPathCRTPBase< SpElnetPath< util::glm_type::gaussian, util::mode_type::naive, SpElnetPointPolicy> >; using elnet_point_t = SpElnetPointPolicy; template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType, JUType, VPType, CLType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; int_t& err_code() const { return sub_pack.err_code(); } int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; YType& y; const WType& w; const XMType& xm; const XSType& xs; }; public: using base_t::process_path_fit; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, YType& y, const WType& w, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XMType& xm, const XSType& xs, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { FitPack< ValueType , JUType , VPType , CLType , YType , WType , IntType , XType , ULamType , XMType , XSType , XVType , AOType , IAType , KinType , RSQOType , ALMOType , SetpbFType , IntParamType> pack{ // build sub-pack { // build sub-pack {beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, almo, nlp, jerr, setpb_f, int_param}, // add new members xv, rsqo }, // add new members y, w, xm, xs }; crtp_base_t::fit(pack); } template auto get_elnet_point(const FitPackType& pack, const PathConfigPackType&) const { auto& sp = pack.sub_pack; auto& ssp = sp.sub_pack; return elnet_point_t( ssp.thr, ssp.maxit, ssp.nx, ssp.nlp, ssp.ia, pack.y, pack.w, ssp.x, pack.xm, pack.xs, sp.xv, ssp.vp, ssp.cl, ssp.ju); } template auto initialize_path(const FitPackType& pack) const { return base_t::initialize_path(pack.sub_pack); } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType& elnet_point) const { return base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, elnet_point.abs_grad()); } template state_t process_point_fit( const FitPackType& pack, const PathConfigPackType& path_pack, const PointConfigPackType& point_pack, const ElnetPointType& elnet_point) const { return base_t::process_point_fit(pack.sub_pack, path_pack, point_pack, elnet_point); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/sp_binomial_multi_class_group.hpp0000644000176200001440000001472714171551160031426 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct SpElnetPath< util::glm_type::binomial, util::mode_type::multi_class_group, ElnetPointPolicy> : ElnetPathBinomialMultiClassGroupBase , ElnetPathCRTPBase< SpElnetPath< util::glm_type::binomial, util::mode_type::multi_class_group, ElnetPointPolicy> > { private: using base_t = ElnetPathBinomialMultiClassGroupBase; using crtp_base_t = ElnetPathCRTPBase< SpElnetPath< util::glm_type::binomial, util::mode_type::multi_class_group, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; public: template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType , JUType , VPType , CLType , IntType , XType , YType , GType , WType , ULamType , XVType , A0Type , AOType , IAType , KinType , DevType , ALMType , SetpbFType , IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; int_t& err_code() const { return sub_pack.err_code(); } int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; const XMType& xm; const XSType& xs; }; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, IntType ne, IntType nx, const XType& x, const YType& y, GType& g, const WType& w, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, bool intr, IntType maxit, const XMType& xm, const XSType& xs, const XVType& xv, IntType& lmu, A0Type& a0, AOType& ao, IAType& ia, KinType& kin, ValueType& dev0, DevType& dev, ALMType& alm, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { FitPack< ValueType , JUType , VPType , CLType , IntType , XType , YType , GType , WType , ULamType , XMType , XSType , XVType , A0Type , AOType , IAType , KinType , DevType , ALMType , SetpbFType , IntParamType> pack{ { { // build sub-pack {beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, alm, nlp, jerr, setpb_f, int_param}, // add new members y, g, w, true /* not used */, intr, 2 /* not used */, a0, dev0, dev }, xv }, xm, xs }; crtp_base_t::fit(pack); } template elnet_point_t get_elnet_point( const FitPackType& pack, const PathConfigPackType& path_pack) const { auto& sp = pack.sub_pack; auto& ssp = sp.sub_pack; auto& sssp = ssp.sub_pack; return elnet_point_t( ssp.intr, sssp.thr, sssp.maxit, sssp.nx, sssp.nlp, sssp.ia, ssp.g, ssp.dev0, sssp.x, ssp.y, ssp.w, pack.xm, pack.xs, sp.xv, sssp.vp, sssp.cl, sssp.ju, sssp.int_param); } template auto initialize_path(const FitPackType& pack) const { return base_t::initialize_path(pack.sub_pack); } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType& elnet_point) const { return base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, elnet_point.abs_grad()); } template state_t process_point_fit( const FitPackType& pack, const PathConfigPackType& path_pack, PointConfigPackType&& point_pack, const ElnetPointType& elnet_point) const { return base_t::process_point_fit( pack.sub_pack, path_pack, point_pack, elnet_point); } template void process_path_fit( const FitPackType& pack, const ElnetPointType& elnet_point) const { base_t::process_path_fit(pack.sub_pack, elnet_point); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/decl.hpp0000644000176200001440000000110214157246224024213 0ustar liggesusers#pragma once #include #include namespace glmnetpp { template struct ElnetPathCRTPBase; struct ElnetPathBase; struct ElnetPathGaussianBase; struct ElnetPathBinomialBase; template mode , class ElnetPointPolicy=ElnetPoint > struct ElnetPath; template mode , class SpElnetPointPolicy=SpElnetPoint > struct SpElnetPath; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/gaussian_cov.hpp0000644000176200001440000001247314157246224026002 0ustar liggesusers#pragma once #include #include namespace glmnetpp { /* * Gaussian covariance method elastic net path-solver. */ template struct ElnetPath< util::glm_type::gaussian, util::mode_type::cov, ElnetPointPolicy> : ElnetPathGaussianBase , ElnetPathCRTPBase< ElnetPath< util::glm_type::gaussian, util::mode_type::cov, ElnetPointPolicy> > { private: using base_t = ElnetPathGaussianBase; using crtp_base_t = ElnetPathCRTPBase< ElnetPath::cov, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType, JUType, VPType, CLType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; int_t& err_code() const { return sub_pack.err_code(); } int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; GType& g; }; public: using base_t::process_path_fit; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, GType& g, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { FitPack< ValueType , JUType , VPType , CLType , GType , IntType , XType , ULamType , XVType , AOType , IAType , KinType , RSQOType , ALMOType , SetpbFType , IntParamType> pack { // build sub-pack { // build sub-pack {beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, almo, nlp, jerr, setpb_f, int_param}, // add new members xv, rsqo }, // add new members g }; crtp_base_t::fit(pack); } /* * Builds a point-solver using the arguments. * * @param pack object of FitPack of the current class. */ template auto get_elnet_point(const FitPackType& pack, const PathConfigPackType&) const { auto& sp = pack.sub_pack; auto& ssp = sp.sub_pack; return elnet_point_t( ssp.thr, ssp.maxit, ssp.nx, ssp.nlp, ssp.ia, pack.g, ssp.x, sp.xv, ssp.vp, ssp.cl, ssp.ju); } template auto initialize_path(const FitPackType& pack) const { return base_t::initialize_path(pack.sub_pack); } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType&) const { return base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, pack.g); } template state_t process_point_fit( const FitPackType& pack, const PathConfigPackType& path_pack, const PointConfigPackType& point_pack, const ElnetPointType& elnet_point) const { return base_t::process_point_fit(pack.sub_pack, path_pack, point_pack, elnet_point); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/sp_poisson_naive.hpp0000644000176200001440000001436214157246224026676 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { template struct SpElnetPath< util::glm_type::poisson, util::mode_type::naive, ElnetPointPolicy> : ElnetPathPoissonBase , ElnetPathCRTPBase< SpElnetPath< util::glm_type::poisson, util::mode_type::naive, ElnetPointPolicy> > { private: using base_t = ElnetPathPoissonBase; using crtp_base_t = ElnetPathCRTPBase< SpElnetPath< util::glm_type::poisson, util::mode_type::naive, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; using typename base_t::state_t; template struct FitPack { using sub_pack_t = typename base_t::template FitPack< ValueType , JUType , VPType , CLType , IntType , XType , YType , GType , QType , ULamType , A0Type , AOType , IAType , KinType , DevType , ALMType , SetpbFType , IntParamType>; using value_t = typename sub_pack_t::value_t; using int_t = typename sub_pack_t::int_t; int_t& err_code() const { return sub_pack.err_code(); } int_t path_size() const { return sub_pack.path_size(); } sub_pack_t sub_pack; const XBType& xb; const XSType& xs; }; public: template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, IntType ne, IntType nx, const XType& x, const YType& y, GType& g, const QType& q, IntType nlam, ValueType flmin, const ULamType& ulam, const XBType& xb, const XSType& xs, ValueType thr, bool intr, IntType maxit, IntType& lmu, A0Type& a0, AOType& ao, IAType& ia, KinType& kin, ValueType& dev0, DevType& dev, ALMType& alm, IntType& nlp, IntType& jerr, SetpbFType setpb_f, const IntParamType& int_param) const { FitPack< ValueType , JUType , VPType , CLType , IntType , XType , YType , GType , QType , ULamType , XBType , XSType , A0Type , AOType , IAType , KinType , DevType , ALMType , SetpbFType , IntParamType> pack{ // build sub-pack { // build sub-pack { beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, lmu, ao, ia, kin, alm, nlp, jerr, setpb_f, int_param }, // add new members y, g, q, intr, a0, dev0, dev }, xb, xs }; crtp_base_t::fit(pack); } template elnet_point_t get_elnet_point( const FitPackType& pack, const PathConfigPackType&) const { auto& sp = pack.sub_pack; auto& ssp = sp.sub_pack; return elnet_point_t( sp.intr, ssp.thr, ssp.maxit, ssp.nx, ssp.nlp, ssp.ia, sp.dev0, ssp.x, sp.y, sp.g, sp.q, pack.xb, pack.xs, ssp.vp, ssp.cl, ssp.ju, ssp.int_param); } template auto initialize_path(const FitPackType& pack) const { return base_t::initialize_path(pack.sub_pack); } template auto initialize_point( IntType m, ValueType&& lmda_curr, const FitPackType& pack, const PathConfigPackType& path_pack, const ElnetPointType& elnet_point) const { return base_t::initialize_point(m, lmda_curr, pack.sub_pack, path_pack, elnet_point); } template state_t process_point_fit( const FitPackType& pack, const PathConfigPackType& path_pack, const PointConfigPackType& point_pack, const ElnetPointType& elnet_point) const { return base_t::process_point_fit(pack.sub_pack, path_pack, point_pack, elnet_point); } template void process_path_fit( const FitPackType& pack, const ElnetPointType& elnet_point) const { base_t::process_path_fit(pack.sub_pack, elnet_point); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point.hpp0000644000176200001440000000456614171551160023514 0ustar liggesusers#pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include glmnet/src/glmnetpp/include/glmnetpp_bits/util/0000755000176200001440000000000014341220705021422 5ustar liggesusersglmnet/src/glmnetpp/include/glmnetpp_bits/util/exceptions.hpp0000644000176200001440000000317214171551160024322 0ustar liggesusers#pragma once #include #include namespace glmnetpp { namespace util { // TODO: change interface so that we construct with int and err_code has no args. struct elnet_error : std::exception { virtual int err_code(int) const =0; }; struct maxit_reached_error : elnet_error { int err_code(int m) const override { return -m-1; } }; struct max_active_reached_error : elnet_error { int err_code(int m) const override { return -10001-m; } }; struct bad_alloc_error : elnet_error { int err_code(int=0) const override { return 1; } }; struct prob_min_reached_error : elnet_error { prob_min_reached_error(int m) : m_(m) {} int err_code(int=0) const override { return 8001+m_; } private: int m_; }; struct prob_max_reached_error : elnet_error { prob_max_reached_error(int m) : m_(m) {} int err_code(int=0) const override { return 9001+m_; } private: int m_; }; struct below_min_variance_error : elnet_error { int err_code(int m) const override { return -20001-m; } }; struct bnorm_maxit_reached_error : elnet_error { int err_code(int m=0) const override { return 90000; } }; struct non_positive_penalty_error : elnet_error { int err_code(int m=0) const override { return 10000; } }; struct negative_response_error : elnet_error { int err_code(int m=0) const override { return 8888; } }; struct all_excluded_error : elnet_error { int err_code(int m=0) const override { return 7777; } }; struct non_positive_weight_sum_error : elnet_error { int err_code(int m=0) const override { return 9999; } }; } // namespace util } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/util/iterator/0000755000176200001440000000000014157246224023264 5ustar liggesusersglmnet/src/glmnetpp/include/glmnetpp_bits/util/iterator/one_to_zero_iterator.hpp0000644000176200001440000000315314140040573030221 0ustar liggesusers#pragma once #include #include namespace glmnetpp { namespace util { // forward declaration template struct one_to_zero_iterator; template inline constexpr bool operator==(const one_to_zero_iterator& it1, const one_to_zero_iterator& it2) { return it1.curr_ == it2.curr_; } template inline constexpr bool operator!=(const one_to_zero_iterator& it1, const one_to_zero_iterator& it2) { return it1.curr_ != it2.curr_; } template struct one_to_zero_iterator { using difference_type = int32_t; using value_type = IntType; using pointer = value_type*; using reference = value_type&; using iterator_category = std::bidirectional_iterator_tag; one_to_zero_iterator(const value_type* begin) : curr_(begin) {} one_to_zero_iterator& operator++() { ++curr_; return *this; } one_to_zero_iterator& operator--() { --curr_; return *this; } one_to_zero_iterator operator++(int) { auto tmp = *this; ++curr_; return tmp; } one_to_zero_iterator operator--(int) { auto tmp = *this; --curr_; return tmp; } value_type operator*() { return *curr_ - 1; } friend constexpr bool operator==<>(const one_to_zero_iterator&, const one_to_zero_iterator&); friend constexpr bool operator!=<>(const one_to_zero_iterator&, const one_to_zero_iterator&); private: const value_type* curr_; }; } // namespace util } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/util/iterator/counting_iterator.hpp0000644000176200001440000000314314157246224027535 0ustar liggesusers#pragma once #include #include namespace glmnetpp { namespace util { // forward declaration template struct counting_iterator; template inline constexpr bool operator==(const counting_iterator& it1, const counting_iterator& it2) { return it1.curr_ == it2.curr_; } template inline constexpr bool operator!=(const counting_iterator& it1, const counting_iterator& it2) { return it1.curr_ != it2.curr_; } template struct counting_iterator { using difference_type = int32_t; using value_type = IntType; using pointer = value_type*; using reference = IntType&; using iterator_category = std::bidirectional_iterator_tag; constexpr counting_iterator(value_type begin) : curr_(begin) {} constexpr counting_iterator& operator++() { ++curr_; return *this; } constexpr counting_iterator& operator--() { --curr_; return *this; } constexpr counting_iterator operator++(int) { auto tmp = *this; ++curr_; return tmp; } constexpr counting_iterator operator--(int) { auto tmp = *this; --curr_; return tmp; } constexpr reference operator*() { return curr_; } friend constexpr bool operator==<>(const counting_iterator&, const counting_iterator&); friend constexpr bool operator!=<>(const counting_iterator&, const counting_iterator&); private: value_type curr_; }; } // namespace util } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/util/type_traits.hpp0000644000176200001440000000367414140040573024514 0ustar liggesusers#pragma once #include #include #include namespace glmnetpp { namespace util { namespace details { // Dummy class that defines a conversion from F to this class. template