kernlab/0000755000175100001440000000000013562454636011724 5ustar hornikuserskernlab/NAMESPACE0000644000175100001440000000532313271617320013132 0ustar hornikusersuseDynLib("kernlab", .registration = TRUE) import("methods") importFrom("stats", "coef", "delete.response", "fitted", "kmeans", "median", "model.extract", "model.matrix", "na.action", "na.omit", "predict", "quantile", "rnorm", "runif", "sd", "terms", "var") importFrom("graphics", "axis", "filled.contour", "plot", "points", "title") importFrom("grDevices", "hcl") export( ## kernel functions "rbfdot", "laplacedot", "besseldot", "polydot", "tanhdot", "vanilladot", "anovadot", "splinedot", "stringdot", "kernelMatrix", "kernelMult", "kernelPol", "kernelFast", "as.kernelMatrix", ## High level functions "kmmd", "kpca", "kcca", "kha", "specc", "kkmeans", "ksvm", "rvm", "gausspr", "ranking", "csi", "lssvm", "kqr", ## Utility functions "ipop", "inchol", "couple", "sigest", ## Accessor functions ## VM "type", "prior", "alpha", "alphaindex", "kernelf", "kpar", "param", "scaling", "xmatrix", "ymatrix", "lev", "kcall", "error", "cross", "SVindex", "nSV", "RVindex", "prob.model", "b", "obj", ## kpca "rotated", "eig", "pcv", ## ipop "primal", "dual", "how", ## kcca "kcor", "xcoef", "ycoef", ## "xvar", ## "yvar", ## specc "size", "centers", "withinss", ## rvm "mlike", "nvar", ## ranking "convergence", "edgegraph", ## onlearn "onlearn", "inlearn", "buffer", "rho", ## kfa "kfa", ## inc.chol "pivots", "diagresidues", "maxresiduals", ## csi "R", "Q", "truegain", "predgain", ## kmmd "H0", "AsympH0", "Radbound", "Asymbound", "mmdstats" ) exportMethods("coef", "fitted", "plot", "predict", "show") exportClasses("ksvm", "kmmd", "rvm", "ipop", "gausspr", "lssvm", "kpca", "kha", "kcca", "kernel", "rbfkernel", "laplacekernel", "besselkernel", "tanhkernel", "polykernel","fourierkernel", "vanillakernel", "anovakernel", "splinekernel", "stringkernel", "specc", "ranking", "inchol", "onlearn", "kfa", "csi","kqr", "kernelMatrix","kfunction") kernlab/data/0000755000175100001440000000000012560371302012616 5ustar hornikuserskernlab/data/income.rda0000644000175100001440000012321413562451345014574 0ustar hornikusers7zXZi"6!X[ M])TW"nRʟxq5(Бcv3CՅleXg}t[%WvEƒ깹Z{up+VA2%3Ay/';`¯➫k} _9aQ*,e7OTfӈA~v*L6> 6A#}[-ձsx#jA#ĎRͱZm8mLGސ)l? jp[D+f&YzNnL%6`6ҫ+)ún֪X|[=WkryLJDSwr/iAVc'o HEYFOѴL#Ews\.TVX'=Zʥ=eN>{_֔ %6 be]?kq/NE2 )zBVCs ]ĝHMFȣLg7M!r2m ڷR/4]K|=o: ZG %|;Yc0N|R*z j8?e{򎱨&9@Ru>Gyz``3 DEQ(O*L))ƭ}N[?DVNM1#(O}'燚`53.UxGaJb_n<%MFOk0lrHb̶W >9W,5(,W ९u%U\rLH&oY5`,{EF‰r:^sJ\ 6$B>N!'m_ )祵7aݡ5K}0;5C #m?` 1> sA)^E Kǚpc(J&Lڇ3JTVø%sלz=B!3[k@KcVwO4 A;y72rLpGJ^YNHt*U>35:VÔܦt!SH>*K\ÍۀqtMVsrc?U8?ItYDRX1=3ZD cL6c%""28 \F?P C 5,8]p1afOxʂ;\ÝHžKw &d;]OCX NZ13(*V!Yl\}~J\mAtgF5Z?/ px :).ԥ+*(=3k~:`_5@ع~z KG揘~⋏lg{Y,=Xx={DYUA-of8z NrjTdrښF+ݵF'gPByz<#7τ;oR,gtp { (0λ a#EL u"=o i]>nU*+^"~7wϨqHx,f!qj}a{S/mΪ/*/Ӵ? h3UpPG4ߗ*qzLRvl ~2 %hN*ɡEۉkfY-II`dM' ?-~pD 5< Cn5xC8LZ0x#U'8 'i?gKsh@uG=w;U_S'E)@S!3fӼ'Oк~9gek昙߯GhDWNV3ᙁĚG/BDKA@궢|8 MFQKsRēճcꮚM{GpST4e( x{h&B꺇^\vvFD.5@GtqC!izb:iW'IhLhnr!RQ*}_u4STAGKZO ke=vX Y *G}Y1NQw.:rcjӉMjT,!1̠ς8=.{|na9%eCl~.VFe$3%)ήN+Ю7\S|~ª=76AJ!ю9b :l!6`hIɹ~r;B4~|E y˻|B8s󳐹;ڔG'ʆ )(+Wx% eHU&xtՔ(T! G kýr 伨Dx)Ɗ5К1t^2Yz£`xy Dd@$*)6Ι4AƎuU&||iSIC^p|G|Y Ceq.;yjJ6~69 \cGPG?' wQmEeS 9iNK[8Ufkf|m3$+.4]22Bo`Zf1A&U^,@_E7ϯ7:0Nr{t;;R?~- .C|]@q6T1[LRSWFwfMuUGVS6j97ި,Ń`Mɚ:39^] *;vPΐWB_O(D#N}ULv2Glgs'xb(e^L2)-EﮃqJpQ4ȕT.͗6+[J2??۪GQqVkŊM(#qΛQ9r1Lp& ON#Rr/d 1TqpS,FʸIN9902 ePYNnt J5Њkrqƥ~s\V@oWl_uѿaX4S9"wvpha dPsY,:k\j7d1ÓhOq\ä dUVkaVxT% )3f\]1V*ٖ(5_GyT>loٙޔUPFtO A EN+-Xb-.2HeX?1Z2ujq^Xl;ZMZS|Hx]AkFm5t UQRY]^Vªs/xy|Q~BZijvl#+Ig.6zЂ 40%m<[MdCdP-0~^(†UΒLt(nFIft0eQ)OW!ˎ4EX_E*{X ܃J֫*!~MORzNob YSobok ,I(J)`Ts_6i«x;xoL}kר2h{l& ӓ{&.1ewcമXEх{1z`֬ FO9>W'8(ޥnG}@[pm $uls0 g%K;E~;%m*_kD'''x؇_!E.Dzpza EܘLiyOP'ĉg5b~C]eRKFP,LJ6V"ǟ g0˔gP0r8spL̾K\:j)F|+Wuh~YPm Ĩ0܂YC!>ElG~c[/9/&.]R1hw(E8%s[{dU@ΎX!oQ1GB;kqQIWhzѹt, P .&M<Ѣ"H`#e:!i=z>8V8.nak5`'*vP}6\m170&(D$HbWuˆ C _ظ' *֯N0qkV0u}ֆwQOL:*O/V#y$slT|5/w@$_y=-GBQש~=`.ƗkW},W3n$֨bzC$-;M_ׅ2 \U7W}QMT`Lzy+n!Itd?jo۾3\L5 Ϻ#Oh ;="/ؼFȍzYSMȰzFksn 9:06t}0v2Jhn0pX -CM ~`FC܊M4 suWdYҟY"N2Z߻ _QN9>Vo.m?S`} 60Q'84R#.X]|e:6T @kk6!]֕E˟8i'+w cmP} N?3,9TuSlr}TU&\iW_1WE/dY'%ȂS>D[Sqze1kF@ P߇8K"q2w,^;2t Ś!E6ly=_=1R.F2YxB?) rT%WjJI85ۊbw!qc˖`NmR⡺0xj͆iZGrQjz#ހ7|n8OUc>Ak?eRy!AO* E,uD?1LLK173D DQBUpiY;tAL}|MBzzFe #RIQk9J"0~g2z2Q |,浃j_&R}$N\J#I&)pk!k5sP&Ak> +0S6To>/Z޶鬧ohRhXoI`0&kZ:UJRarO{#W=,opÿr!.KXEw \2q ,}81v|Ȑ. VL6;a@f'P/ᰫ<6\>=m74_fG刬TgF[yM2HĞTPeFMr+?Xd#; %-wzLw v9xGǗ?;8LkH+gR;Sp1) H}=Ѽ )e0G:ؠFiሴ^z8?"78sVE~TZ:NZJT+P]s$Z8VϜ9-d{f9xȉ-"9QxW TG•}riaEt{ yzMQ5[Z 3Fеd3<2JcwT5$a `[ō:E\Kg„yrݰkmMJ}O8uuKO z]qY3YN'}jBXau]};Dl"#[ߨQ]ќD ڀƙ]!J{/A!M+Ø?I!tΪU7"ՏmTV]n)J~^mXHnCLBWt;\1cՇW1[q, ާETM3:/Q⊈zk/zZt-ǹT.0B8o+md2tWY"RXG2HnõlRWpC;vzy%U8j@5 ,WUsYG0 qQ@,Fvt#R$%V ̄:AH{K`LUIA9cȂdX6<;*oK ,n`g {ᝄ7` .Ԓkϧes 3d|+J S1Oa-5)5ˋq';jKY h~pn ʌgr%?#iA[-[1Jclh,NCfKoH4 {3JTl, Đ 8`Wjyp}We ];}:~+į:`{r4ìJ3QUl~!:X}ߴ@4}4eo;Cˠ왁}H']&V:F18%a`YJ2*Gsn<G;$7<>T>KAuW"ҭG _` !T`Rue}ύ qՒ/ON>T#95nYTUaw^GNwf0P J;_ K!AЇ6}ksmh`gC-°6] C9][kpR(pXU)UD+zNڋX>: Txvqz.ƂM)m)A|@ /WS͟Rؚ/ <3cU7܌nҕ%Bɀ»Ӟ#*y"v" Sp&D,s)!k.64(5D+zAz(}p-Gh?7ѵӷS@lr6gdy_&" zPҴu^-.|lSh` kXӌٰ[ݭZpTE :"l^;EyH$XVyH.D;G|-CK(e?_ @>rO0XJ-`aJ_U _-w^XpdimVIoV;\7ȁit i 8AurJdzw5 $3M6#j=>KmRfZɞ֩g/^Z8jeՊf=SETOUQqRo jbqpmѮs+7KE+'P^% ,-.Slc:i ehSdypyj- ұ ϥ0S[&> Nw8a,`4odCUL80难Tw?g'g\1#<Ӝ9b8+P ]$:Z6fkbvS}M?ܯb':WcxZ9Z".^oıaA9]aUD;2ka'%HDhVVC7V25jLmX97˱ )=9[sREohٲH,Kd ǩeҶw#˞@ax&f D\ζ@Q'ru~/D'))-bh/@-Z鰑f[f.[FV Ɗp2傫zjYv(DH/ ?}ׯ7M$0ao >@v3|HruuīITFL$;׺W~TkWKyv9knO-c#^ʃl-4$Zrƈ\.*XZY]$W=|\%BJD߶8&N/LdV͢|2(@[؞0I: ZW"}>'z<|=8GffWvxA^  Y?t~2G c;^X.I~Y2ABbpz&\;B<4R55jE-$ƚ4Iy*>s5UjF. 猼xpW碕C̥_.rS$?3>cY]Amz;v50M@b>.ҏ*?BJ2҄T:*hK< |Ka8 &=AwY| [;R>P}ٰf7Ì1lOgjJ9֙c]x V`V0.(өf4 V*_Zf&Qn;{wt~ֲ\bd:o׾s71!#H~F]xdD@mݮ/)$ 4U-IjI|ZkCp)AO t鋝ǀMFB'+`Jglྪf@ DVdwAY}d^e.^|=Jh gj/OwT:D:N7mSc]:cF~ K-$UHLti?(eͷ3,,$xouH}T?{}]%C!hiGb $\WG-AO53JbFWVO`mf9"aV@͢O pk_y&ӷnJ6npHh૛ n >i %*%РwW)'3yD@0KTwBe7X7^Q"s^ O >N6 όs(8!l,oq)ԥȏ'Gا4.HMC; ӟ֙u6w qTAPƸRK ׽d}#j{3&P6eER)ߒX̞^[h`JjB+07mWl~Z̧J:y$Y7^l0=;BE6\xd|Gv%ﭰ|{ѭun#m"Y}.!&15EhS#8UX1`@z7_Mk,2A'=8<0 {yzV[C`lx^yV*|@NG2!BfѰtqg,KVdDFVT9];~FZ6kAT&u+͓Ȏ 1TzΡ2(޴Go_(mP1ӠDQ6 Ash3bO^J5:_,=?a^U9y8i6ޙY-AVîW+$)s} ]إddW~5Ζd 1|Xs,>?]7׉rۓbᪿiT K5LKOS\ j}\AcX PqJgɜYn!'x@ARbHc=,VtZW dXŞ?q8"ټaWd{6zܗS |C?F|F00꙽cHzq30aXRL__ٚ4%pU2YrM(oٝ}Q'.HI(9pSQw lTrX6i);y%n&?TCI< ~;lWmݕof,u=UW<.q{*fv*r`2s51NoGw7g)eV8ŬEViE.p*/&gh FZl㼤\P1)"|.DJdMTثo?v rB:MSNG;YݷG=w]t: RVw6Ľ%8ZbJ //I)uR 8`6U-#o;~!gZÔ^ڐ/ܚ@׏芣b]R2(ZiqEtQ/P+PaS⢵}),nH'uV颭lގ. Wo IТp ߼fے6;ga[Ɉig -Z%,Ѻ*))P32&6ut2LQg4,bm`E:;C^(%}_#JtnzSa0R,k~RO+am |b*(6;(DԳ~AL_veD6wl{xsq<z`Y V\vVL4gNua%[2L܍L$"؀ye*ɗɲ9?I[l\~uh.@-mWCuTxx_+l LE(6ʻJH?֤j#|*^ {dvO~Qs .A }p4Rhn>RPmX#eF29:k{+d֕TY0K"~>@byNYDGK7[c \ŴhI/um]>1N!/H՟&nJE G*p34M} C܈d򳗍ïlnWi4COyO.85ҿ۫!sL2Rt`TNMNkrz`45#.$_.(yH c~~~|Rj{V>j>Ʋ{Tg9e^mbfMVZfFs rγym΀!vQ[^V͇ (;fY/ҝPJgbSb[Gc1t`a&U`e/uS*Thl_ۈSR?aG_}] fphA4{Wf#1s46샓)Ӧ'>ul(:?>P.4 P1lӨ=};80w E$Ik#pfKCF}T\K"̟_d1/gƠ)jԂ2P4JR"eBVx.栚hd\⺊ڶȷO5lN!jo`#N~[TQJ蚜=mUΐ;@Vb!>F:STe).0T_G_:<9zQۜ|(2}M k/S} C28 o?wtPkX8u-j7+;n~6 Ž"~`7ПkR'n3BNv |;~D6 O:";zMdIXЉP73.-ӿ*pl;(vm\o8[ʷ5?7j5&J0]Z @3ᔗf6Zpxm Z3Ø=JCDBT1ъ g' *XK+# Boj^m沸G{ъ<>AnV.PSSD0iכ1;I0h3@;-]\vr??BO_D?3tiFP=m:/ݢR{yհs!NEYӨlrL83vYTHjN,siyxH)m**~VNKM% kB9+>Tӣbu%+vfGh\ud7r=U+[J&n ;YvBc9y1s6[gN팴-Ch3&ՔQrɥ .g֥A`U?7G7sPj^aȲ9%Af ;_z dUѷw@@iiF%Y.&l HmL '.$VrQ0nd#IQnZ5ͣ7n = UӀ-lxpjoQE̠\Ipn.w,:}B>ԡsϦ<)|b=2dKcfpQ`dUN {Cp? qDrpȌG Z.̥6Tt1x!*Ԙj>ppA'VzT!ϡ`o ]G46ќ yo5\m\G▥bKt.Mj-8gf PcRM=G,B]&M?(iXe!FA}7Hr'CHZ\B5(w m=WrgʡX X(%W' asCW"ԩ1W A}Sb5 EEU=X'ϐXo?.}sC%x6 ԗoGϭa:!IνM16\}sj(^\ @oHҺZG>xU?Ά.UfO-ѪO%+}If.hA-Vw;RՊhjJ'=fR,q@]1iƘO`mhZesPx-*=%pu3,PP0D d.M2ͪ6Ke*UZh1:sQBe5Qۛ#F+VfN= ll=Vz$+* z!ì5Kƀ\ _x~ c0Y6s3U3V{rd6`X3"V4,&;?6fLX+bV ChK_9&B05dG8{>#յ iSYȾ99a|2;#P܅r,`=NnkRœq{QiGvϵ8Px+@|_xm4D%܄^#^gja#aoŕ Pz<x ,ys(2$K=~14,LӫE1k&$\uc&hL_nNd4_{_AkCZfMPa'b56lav}i)d5R70|:(JJQtc0X xba?ǣ؊*.ej}$C T~c"+  \Ov%رlRt^bJp/nK g@o$kl~ KR2gu"e]yɾc=JJܖڍx`N֮4y}ZQ=MPˤbDIB4pɁJnZsa2OiNA7iFKоH^zG/kMlCqee+fưnX!B@pW@/4.N .<͏?26~@CH/Eza}+{Hˌ *`A{}Tr1Q솓3ʭ%^*~Xl|?9@d'!K,Q'ܱ,iuuH3É˅WܳkKV?!晢cO,"&Y\u]Zlc<Ǒ)3Ka':l|E=/,2R)E.5V1rfFoJx8S3 ÖVeEpW,/U-5ck*[ud+P oi/jz\4NhYU+2;v] XJ#E{*Eu*X6'p-XԂUHȇ--maŶj2Ic*ȋĞD{/>2{K:^1&_rg^}W ~8O g%9&=h!2gVLY c 1.lэ dKi+ 5f'Z#B]#/|Oл)o?L4_&'N*chɦse',JV>vbʎhCFC kJЏ_uѲVz}q_!h'{ D`TiDܯa>\2Km^P;i{Im5$V·$teHiģePN;3y[1/~S!4s7%_ ueMy; z< |,W=k~wX( 2 gȒ\/,imUf룘aguoY!{Ʋ<-C 3duTU-w5n~gwmF|Yu3)|2~^=5kRQ.F|p;/8=LWt Cx媊u.yn G>=n} WaId֗j ȅȥ境s߻*Ѹp`lC`x!h\ɻJ⑹3f=?tb5r}zns7 tBtM~0#j$^YT\VD4 -.љ#:R.\CdZXTRMC:5],m ݟcupB"S>ejw?-&B!aTД; 1gʶ='.~.7X"^lm3">0(ċӴ8٣{U+=ol}KQ6]Xhz'dkJ=^,@{ӂTNuAB4mf``p#T4*d]1-l WYjv<TYįSl%4}uF9 ֧[pMJ bm˃F;Ey4 y-8ϜohPϴ4ZunA_-Ue󁍟qnop)h{x'5 [􈅳N"{dIqxub_${%mݩ-b_ڻ9q7I}3 Njdubfg.^apɔ9uhԊ[AK&q&ٚtϗ5cl@r~s'hz&0$[8/*gR!Хӈ ˧C⠖V'sq6W!AxROr'g)IfLSB Ik.mAzYA`Cم@lC^Xxևk0!Ia )oi!' G.7v$X opͤ&U?: 3kЙJ}55 |to8QF(vIIimw߀e,'3t/C0KdAD3B=N%TvJ`?0ɭqS2@s##U5ZppeE4FibHb# a$꾴T^M@C;+ԶVATϓ*, nMgmȳrځ8SbK*TҐo'Vsv[B"m#́݋#Zd!1n \i8NSӜh;v9C]o)~m26M\# v̬b[ƍ͖'+۬e=;pV%̮SKLaږ,ۺNBe:(^k\~5 aT @qdHYSkM4 )HL_vT%1,DmD|}j[6}Rc!bjZZj;S6 ."vXhFRn;w_ꓣ˘==v:U<B^Ɵ6R<w@ 8žA>ݹz&3^#?ew.(> 24>".1 EihVRZ#)!^)S$(Z*x^CC'[Qþi1 L}D9ŌWM}֢Us!0oTQBk) O1y 7nM" E9XR+`O~返ziKV=JkME6H0Ybϝr#BoaoFKO)<;PڅLݖぬ x[U'?-OP;&{b ^e]A0fx 'q#* L<$nɋ7a<4lF3 D&!:mc:gClY醋o$=Mݢ] 9jt &e`T]=*v4GmmF-Z

vTq{ Aba,K5f*S:qwX&) ָAPVtE0{-`ƫHd8LAI|VfM6zn"߼Pa;p`C,Sw+t|ƻfˈXk׎_N#_v[b[VQƞ,\kq^R)n=Y\}B.¶ 80-D GzkAH,H#CKOck=B}EJ%\漷⟬aW8y/ QTky^^xې]".O!E((~|rE_ N1-w$HŵXh+T2nn 9Mtv W/BKnkA;ֿLBNԃMشB6߳0,7WyPYA hf~@U*7OmO ^XܬD16V>6ԁQr@$Q`ĩUڹR`B:b6]~`- uvIl:46 ]`d2kX ;M`NpYoЀҀŨQHNH`uq 9=WڟvS|~ }wO H[QOL~49mfDw<}Y67mԯ (6vl^NS̈2c y_5!~)OSmY{kHQX=rKr >-nuZ. [MfJI! xY168c֧@*2k6%\bd#zm!:r(A͜%b"r ", 睔Q2ب.U5$fxZz@.j@|sj*=ٯpg)b $wpt* 90ݞ?u_)0o;F|2$0Z@𳼼!Tk'}d:NP2/N6{WA ia'H HH5J(ts,4^︯S:% b*G]L@G yl%Y3#reN7prު\9=JѨOb"U OZ̫;}.XCVQ:T`T%,MMfyK\KV.'(7RJ֪&%6{ѕRHG ?Q\'EE(H4LEV;uQ|(qv9Uo 20jv,`j( f QJ &Mf+ aBu=h\,l3s,Mpۖfof+Q. i Tm0}< Yg|)P-_x3 \l{ 9)-* <1?WrX:\R WYU;?%$( Heuו.jeXȭ?N# 9}vWJZǎ>>i>f '7ӖЉ$J8(nL<ʆ1;tPdH; ׎]>i,%y`Y4+*C@;hl4@7YLqFc~%Ji#m|Hn^Zb٧"`b]*ABvq\z9dXwN {ȜT'ON)pf/!~K HfpM̍YO bNm#q q@7uQWcRY89ۍ[wo W,=@nYl$%tۖ`uR d޻ (Ʃ3!j>C^UޒF8 7Fىk`qodnra?{aږr2eD^ f5"JaHnCM6$"a+\.WvCe\~3MWY" JOɶ{XDp7ծn 㴊'E \pt7>'Ppj|[EpiJuߘ{۹i 8}z* @ާ d"Y5IcpIr<҃TqSjXAyqh|$(uR^\2H~r< dI`!B]o]r1{(:Rw&@1༔$5?xEaMZq XV# .Qk1yprD7*x/ 4 #9Ua󸛶_^ЍH `iY/ \}:ֈ5g0 BY|w>d'2p KSS9ӔE_ơ'ErO(n-riza,ĬEWL°yR]3N-Ӛr}\1.6)T6,}uX@ (_MCZP..v/h{8`v2Rm8KG-#FYcUګ A+5V>>TɰNF wS.WW73_2rdNć9ʨ8NLwNvJlj ꧑ UyQ!ZL֑&*DUU_j 'oU!!azqdss.nƭMmL3oflXRDޝ$Ϙgio /'#+>őm*ibzIELi/,_WN&hQҀ\g7% 2Q&^~2֜DI*(aA3 I Ƨ.:xJB|@ 3?3Uk+ؙh& "ꄾ*Nş<ÍI t OIL?F5K/ޞgf}CkG`.O`eP[ vt#U8NI%P?8lPs)c_PXU(ZJ5zrUVĉM7iV3.΁6't#Qҫy\~>5q>ptY! 0壋UI7kpsT?D*k6\"G&wQjZ#w5חO1"z:u 1}j}֓tMfڝ8AMfS*I78j_cҐb%c,7>C[#֋^יcjaz-k'T׷o_TQP h!{խi߿ z2~֜DA>xԼO+֤FSAk3 qQ#/OIj ; #BI6_1cS{R.z;iy8#kԇZ<&+.iSN`6`wip_=L(.yJzgE}P'XKώcHH1t 27;vQHlLRd# ^ňFZጧq^:0K"/樘-Y`ޒ!D+:u>9IN[n; ōhe R JA.*@&'%nٜO+7n6Eaj! \EWmN3-J c r}=[6Wc[fH\QP-o4R7w뎙BZ8NFϊ{ʂ,^FqUDqzDn,%\%$qo >J!mHc" 8 Xx]:ѓ"m|ON-$Y:[ԬmlȞZBWA /Gԏ Rn]~,uGt(%Nwh@ ;ɭp|;[ڢ$[Rr#X%l=WXAْ6%V*HQ[&Wvffl* (4s1:&„KV#G$@W3Q_ayM̛Ҹٹ8g%3-3u~ H0z)onl:#)IV|`[llÅ+4p p͏X#×j.&1F?kFz&soCwD8\ ONsl\4}L˔Kf`rU=LIMgFFeqͤه&uLG%|_^HpNhp3TŌ[=i ZF.'D9Ə/&Rn#cD1Ջ1׵3T=II8aF'$bq?ar}~GSFT,:T QL^:0 e*mݵӵC:gn Y?概t$ӻK9THF֌ mp$8@`Xwq$4x\KXєE<&a^!M\rz"bJñw7.Y4F0HN|:4([Ph!* >2OPB|hZ L %_h5lm maAd!fRMsadȆgUPk=dɹj;:=9-2ӭ~ 9.<2vkߐD\(ԱX#l3k[;oWmk+vi.M) jmI)˺&:k(`ZKM ϳ@!$$Ӣv\Ƭiur_KR % 26+P!.O :Bż{|u49!Z>iC v4'xgGw-tb |JIJU3bTj֩j'Г:no/?I ԲWBӹR]v*|v-)tuIn CkXX4Ӛr+Bݡ `ٗ(Z+M=3#l Pgl&;Ht^/ nL/ЙͱS_XWra&zr_/a! NDt<+U ;*+R}fH ZFZuH{N'cݖx-KDƈؗ4号%\mQ)DKl6Xbkʪ`4 ?FO=BFP|٩ڨU6K\M߻ ؤݒW]I4X!ߎAVEgV(6^ilɺ ']):-%b ),_큚=׌9NLę(e| J~{ڬNY1<(9@OܐMKDZCeYv/5ۆ}2Lk!vBCl89Le|)e\pnTJOF_FWx[zMGMdn@ ,dq*U EDH7Rx p.d$SL+ٸ"M,~:`wI~yA(y j^{reny`X\}zcUPfF/T^٭ 81>-RX/ z:Z|j4YRM$+6 lSo@* ?TlZd*zt>ඃN;6C$K|XI=S Է75$rfڳRnWWiKZBd::P3zE,k[hĿMxJ7&QoRb~g*߮^hO.P ω,Z][ݍ<~KّX:`$'KeAM娸 _E7Yk:Sg Ev{ P^W$(CX 82<2honZ8ŢGhxO6v.v DS.w Dtt55)3I! a[jnBX"nM44P*;  haqlɖ@M7'VnRm ]sba)j'ٳ7BiwA'[B[l dPnt%3 Ӑ!c C.W]c,Ґle!M!-w !Ff.́XJzL&I!״F C8'Pi󏲏Pȭ1:I ^7b mU9)tQf\_rr) ͷq亶, F=R~*&t6|*-ry8|5r,$>a(o7U*fLD>RZ e|2W,OjjI2[)\GOxzs5 {൨P(#2 l)Tob}&GYzY;շx9W: څCV( x1?]\ir\ ٺ,JG}lFN`>AXAMX A*leD1Mn ½aUcO|$RQH^7|~ϧmtGZΤxq^8!).B4T.cZJh͑y=QDu"%Wߜo 'õ[iza‚yDO zͩIFr<ފAZ4!#8u@ x/7{UT3ф0JU EZY6:3E^E:~TiI`I]{+CW҈<ږO 2Ô)sd}ֿhBuF"h\ `uo = 8]K@%4 @l+ĆÇr,OF,$RO%SEûg 2XW,LjgZӆު|z]D,̒˭L80DRVzlJ \L-Πh#EzAG~)Oq7i%Xa1`|yc7>->-D#<%udy=]Ǫ1qSN$w?Gl^יyi-IݺpFhb+tPaϙ-&@`PAյ9 }g2nA{; <e9}n'upk/]Ysng TԆuSSNX~΂|Yf"(2ҋ^R#o8Qayϙ[N@U'tR:΃HJgjjDRZ5E;Hݬ'cxJsW%:[4I[QwW g2@®cc8i>W$jUEa_ xnjγL. ޓ ,G: $ G*x$Ɏ%^2+ljeDouX-ܲZ.˸.e/q^ObchtE"э'2}3"=9#,ɜ&|UN{ DQ.. `8)ڒRAR'篍G1zj݋BWEr?ij\H K#vUTĘ۳ n t&!1J#&#$u K? Єw/<#ִu>Ӷ( FOaYҒ$8W"?H:r(/q7AX0@Uz, فk2Ybk+®8 An__ęC-r#D/C)"wѶ\>Yڗ ̍fy{0{s 1##vK?y;h]d=#""YeE!I'*oѰySjiɊQ1V_Ƥ=FmGPV[uRr]j"v(SUFUm'0uFX+`#ĶC\L|q(G֙7hHCұA)!&v:Ύ"Uʬ F.S) @(U|f!a`J< Mkkh6A2% }UtzcW?.ͤ$ W!@ocOө\uk^f9 BWL*/.->I`aM%|IZi d(8>G B(]?|V>nUT탽{\+ 5DGX, 1|EeА*,c4؅@$Xq`Mf3!+Y'.bQ$FEzxqTЗ1S@>`7X qo[7Vb~ڴI ޴Chq'CJ%"nQt s@ ̇-n3LW| PGȋꪚDw@.fi}emg$G CdTF;FAlŅ٠"hxm-$ilq ;!Ewgo{Xԉl+Tpײw`|Y_WC$VZ6(0!V`po7}N #()(RסpO3T{",י LXF}`5qg:YU<lvMŌ?ǘ9Р=6:dF%W~4X;.i30 g^zOavWғ|~ 8(2^j}R(9i?<ZO6Nf$` >%ZLAe=GϪ`'=7W"xπGC646pX8 nͨ TΟ`C8 ׎§qcI_Ȫ<^[b ydxns ؾ&{|3I Q|+HV=T>`kh`Yي0ϒ }4Ќ^@ۆ*!i> C1catu[|@t (DUfTp^ *"ˀ6Uu 0Mxz `% (cRS=b5 <=6b{*,,4&#"qLJ2^KGtJ&пu2ʳOHs/PV~d>\HGECԊȤ2 j @Z ?x9u$7g>wW6ڎWS2a]enKۛX5'ldm;2-C}? z[+!J%QyOaߗ$'#wKd1 ӣ:78PqXMmaa~ =u)7\ +$f8p StU4|Fܶԃ){\Ig'>ϖXo[uQ͛kĂ#p_JC+\O.?(U\QT5OQ8$Cv38%e?w$-F!pH&'[{A P(Gdx$a{L2٘7(b aƁ%thd zCYF az) N2 LӔe4u)ãEt<2K#0Kv3`C\ hBWg %|ǐ3Y,y B5NmDbF~MҟTVB2Ani ^۱ Web i n3_ oŅ*eqdJ<βYT`0fr@Yڻ.S^mEf|0$hy4tm@;\@p8C9~òH5nĜil̤b(qoX:K85%AXc.&/) eHeDFqo~mSJ+/._n4tˑg~4p Sci?ޝ605e5Ty?q$:ai9̆߻L. CL 햱b-Qu- qYfYYWN"4? 8#MG/$_s[D=I8EU?_|=2 BB TA(oi3{VHFLJ -w'o1\&\ڬ89v5҄M=aT_FOrse9lZ={oȗj ݾ5jɤ9ژ|Z?$*`)H`D=(\V{/.ݺUk#nn%c{ ۹*?QFiSxD2 Mx[NN;ӯƺv+/r!4J*.xjJLJDP8yp>s?|Mvݽd^ ,_;f\wÂ1N2)1ki_eǶ 6/gvաE7!AR7 W~%3jxS@(NnY/ Fi!җKLSECȎTSޯg(o}WV#XKXj0I~4I TiraF#Ƭ]v1/hle]psLfvv:׫Uck\ClR,So~A8SY9:pd5']|a"ۉ!)M5[%"w0yӉ3h;@%UlZU7#vU HMH1m̵u* Dj`QCeO}hahtS{p#A۔NEj2jfToF> o~y_-b>2Yo;^C:n0l|"9ʝ8{X_7M\mn13ٷ6V :/JY,.7mUU}T 2;M) t_oeB+}MW&_fvJ88:'SzNnի2Y_p=3 Trnhe]:(o2>= Ɨňw$\HGj uĬ&ҨTЊ*M+y|1=E!򤄶q1ga!BO;>m*8|MS *AE~ӫP&_!BHihw _̣"?g]rJ=2[X}ǤZ}f_[ ^[:LlP!461ڠ3.6fȷ:-vL?xOmUp%9z:YڎAߖaj-e75)ߝݭ #YElc",kgMgڂx_!~˔:}A,Zص#w,GO!-꼜. ؍#\>MUy:: {L#G\ Mq~\ 9uY>7G@h#]u-<7+үBMأZ/!H{IH=y6+ hkCTA*U|v?*sa{'u|re'{ӝ1fԛCx'*$TSHk1`uԤ#X$%||"U!1wkh!j,es6'|6%PL%#'[ M^/3s[>X[/HUWۙSY(!p& (O rRl^6 Y\\+yoB Mc^]t> 2+#PSE#sQׇ29\m<p]?MOkfX 2$T# Z-nDÊ` 5;BYxC;4;(jĴʼn9IU\ۈf4`z /Md*, :UI;p8QҶ4OfE;I(-2&ff]톜8LBl<>.hTľsYA~uan=A#4YgW &ZSϽ cymbroVt9]f]@TJ:_@&N~ʓN$ڒizO0a9-snIr;Ū44Ȟs2F/{LRXwLGX+:#cx~ 0&dŵ=X7,׹fg/ϼ`D]`O?wZ|ͥ Yܱ $aE"3@>We5)\ 5G$![j cuA ], &4k" .@Cg8y~qW7Dq2M7~̑T&ĩ&HJg!BzLS? 5hqc6>s:6H%鹘ˢis9XU7q(uWtR_117[NgEsĎpdiQgU>tjeT2a54[iZDt&^jr@< 0T^9LBtq(c<}SUS0JW*st~3<1t&8ɩpx'lRx{aa*G^~'x?Tm94@W%k(W'7ͫas{BOk$ *FMm+X C'\VQ\RW$t] G/=~ jB 8{%quK_e5IOVQQh^^sctLlg_1*zMw =f3m8]7—XDU!Qg';/#%QP{8K{OoϏ yZ#h5.\@Gu6`g{y.r<`+;TNGu4ʭ-u^ F¬"Ŷ:kIV~4AE@s]/Hs▏1/oQa[ty/qF-38I yPk/J-w >vdH1|ک 4Q|ˆTy2Q5^AkS)ي#%{NJ d~u=ng)\T N_ϺU jl?+E,tef;Iт1&ķU`Hyo hrOғ03 y0+ (Y 0ց0q,'GVt} b' I.RrJgץp\7m D5vJ!fK?!QpNV&_g 2h_45 KA|(ΐ_Πkpaks{C3znMALr΍Dg厶o+Rrz902'vϴE 93s_=j ;E *B 8!ǏuB :ѝ'1/Q'̕5p{4STPeWy Ղc,;M.m1I穴,tr[e^PZ@7v@#!%H,>ʐAI㣉V0 bx*)@mh}AjGxwJEkEH'RYue*V،xMKiFѷm!.Ҽ# mRM0X4Yq/6iQ'k[ܴhO¤)FԪTnSC2`v._MH6P%8|I8h Քo9֞oN&D }|֜oD揋]{ v{ŗpEf?y3t$B:8}a9C̸3hvFx4m^V`рP%F>0 YZkernlab/data/musk.rda0000644000175100001440000025407413562451345014312 0ustar hornikusers7zXZi"6!X])TW"nRʟxq5(3m@I~r$١Wu$3mDõ_R ·m6!&:v|f{N˜es?''=R6 ]RMSb. Z03;Ngm@`G/+\߅Uo^/EڽФ "H"6}N^N@ˤXak3E& UFGVKhjy6h1\I={NyTFY z^.Y#XHeDg\$皜nG%՝14/Sal>pz[s.\ec;G;..xZގB.IŢ<إ`CNmG,j(JB If{ߐlt/̣"Vf֝矜1C i½n~%MQLD%Hoiw'߄:}/sQU ѠYO'ϗO;h>H'Ga)A8WCWWm°ΡQ]//>\ͯ(cnMG4a5J;"I)?d;R!Oes^aREi1.A>1ӻ"}Bdj)6/ ƉfjU@h_hdrX}r^8( Cz W}*Π%!՞Iis4O]W#:Xq3 e= &NT+xs ~r+JvU`#^ Nf},nrBAjXGxMP2h< *1淓0!m?ySH)Hf[ 5Yd^]`1~'!?DhvʌJUQuP0RP@;ZP lNe%6;;.EfR|j&&$,Am3xwTV+.w"ED StXP9Z7/E?F=fL p"Rxu-dċS[N&%I;׻]~F~T [ƴ=q7Lk!2E׮3^UZ P/IM#'F0zҒkv{L]]4Qa^ vľK1O dP|IN2d{Oh SOc?%.K*M8xW-vT}dqN9 O:qq'oW;pG=iJGס9c = 5|g~Hb[OO,ˌ 7y; ^0s 7ȍ*Z<ޔU l(xTZ*|xz gBN~5]*jn`ȊHXi^l!zkg+ xL0 'ZUaqQE@BҪz&We1cWSq> _jDU(M$ ڴar"I#rm%MxAQbtHx6;{׊b5 UAo c=.+M;aq_Soe2͹@5҅ǫNVN8})j#L؞lz˩4p ؤ= R-ĥ[pQk'atOE@tƂߪag;k`tz<)4el,pۃ?GM= dxfJbs ]N =Cw"_jٟL|yaF?:3 O[v@a WzĺFyȸYOOBh{ =O߼6~-erqo_~ӈ!G5q9{£1JL.gDllHEc";G֝DEJlx)fƿ PP@NR6Rӱ|ecضC7VhӒ{[7[ ͕,fd7r`6&ˈk{)u5M١lŤ#$K5I˅f!:֛؝6NKwjE9%4 s{5{ عq4=I[栽| +꠆+$ݭN'sF^r~p>N!8J&/Kַs?~R =([P-y%-> e@r<%{`hq @b7n` ³{aD[M f7mw[ڵb}oNROuuXJMuTAjaP>5Lr14eU@󚩉 ?"\].>ŭ3߂IsW1 Gdc˦&`_PU‰qNJw2f6CHl~,Xbܴ0 MY >-T_l$א2 ψb(СƄk`fsWb]px6(|<ؠ'j>PȀzٰ 0ꗩQU(v>m1~);P'_խ sc*[6T\:;K*K`KUQH0SxQ" A*Dp_:WQ?kB7m`JT*\2^XxƗt )Z!P<{솈%hghhq0gAF0V5:of-^ݱ@] w4nCBF*ܵe3bm=\s{-X}¯š$3@SNpU>iNMCد[bø]+x1M[p!h3䞢8تGe5EWS\~Z̒f5X *R|ۂ=uH\ 7R茿檶svlg È;[/"hQ.Ctmk:J iТu3B>q%˜\UrƋ)fT>Ochjqq_ψ?)ji G]3tF9L)|"-K)WFF a-$º.y2(׾i)Y AC]8TAqnU(չQșAQ<]uWp`Wcn_B5`c+I¼P!6X*~H֚iSf#Pv8P ˽JnԨBALbSI>VJd~g`eFl>qlju#R ..,?J>T|(BߠĞC{#U/|\e>9V [WY,۪8 b2_9=jL8>-ziLc&ÊL*> D%O-ӟ}^8$RBr!d4GY !nq9cu_e#JWZ8!ÀFKH G®pU7}*ulZ;L$ oʗO=2^e7ͧ6w憥ֿ~'gtjGAǔü'Ɩq.ᯟ#0:_O0S'l8!˫hy}VmD{850`NARC,tmX7D3hwjZ%Jh @|(bL c+eC {l~ :?ey[_f0q"ʫ{׾FRy8"iL\5W!HUD݁(ͪDPǽ/9 d}{ iUڨ+9س5lC2c]*w"K_ٹ8򾂢ANnSE.{D* H<>hgd~5]Գ>.d],Z{p>$*Gdf^%ȢAr%1Fd(#9a MY_#רN.COӗ@\'Al=#dǃ8봱q5(sj[wꗁ$kގvZ!M5;G!8K6Nȧ!D58݊臦Jf<8l,eqz)(D khsdgSQa5t<5ZM(wXX)FrW_U r'?:Տ>eNE-h1X'طP5X|0r^EY\+v@)IڍthpMeu.#hS'sef1k^W#]—WR⁑]]cs9׺4e[ 4N'E.msvMSN)򫺩3-r ; ҀnZ$Pg,Tӹe6ĉ$:́-,zTZC,vׯ!]F7jZ%.s+Yzor{,4aNnOk7?/`M*|+9I #~d+1O( "Y~v3GkoƒB7. 1x .V)mDInxuOo;.v|;`3,.?IPGu Z̏r[71OL-}.дY `?Z1J[XzV]m caCF69f flۆR?n !Gl~p#"-G, LX O=toR=Q^?ߒ<*3w@B%$1 Y(g@68MḀ_\Q:4hހ"8i[p^Ү4L *m뜲z,\|sAiXP|7?(0Պc-;D6?B9^@acQ*nCʒ]=H`Mdߐ9T\MRխjmgL|S6lGՎA>Q`g53YI1}⨙i\OLiOW7-/I^bPU@"+iA[֒˼'??m8)q쥊M9[`nPZ@`-tt2X.XʅeHςljF~k >nR7t5BUX[Uq$k4ɥ;]M\@5Wug"ؔ) Cы>B.0D3og/(2C_Y`P\CMPd*g*] !tYܼm̊YB˼綔L_h+o >_JCq'UcxӰȗL.Y&,͞A`|A ~@]$dUtlSrem4@S712IMMk]zC;&m?,SIQ+ \B=%D3wR!OtJ{ЀvUHm2۝s(9X ,m-!tjѝ;Zͧ6¸`b/vG 7vzH6.ELd"OP6 ~N^GEic3['V wpe-ITq7n^n5Èo(HHi{cJMp[ C'U-HEk=ΫƊWB%Rg|ݍ ; }֕ < 8zsa<Vi+' ()soRȥv~\86:aԧs񓙥9̝DOICRCF U=$ D"琀h}c"nK+Ɏ|ȽЪA鰗,5gC,ƃb@)]ڄz>@:/j_Uv} ﮞ`Lǩ +U QJ׍KP-waMVr@~.l9bʘ.4dF^]vDmg/^Z&\@.x}B#s uBd#$ 2a?V!O+kx݃Q"v>nhD=&LjBe抩 #oxhÝ Lǃqu[\@ e #.mp}L_ċ4vv,A]Tk5҄#W댤+(9+ YLx=Z*zhƕ53ubKH~w:A#|-6V`:`I-.WycZ[z"[A8m)7 U4|"*7JQˊ,g=TD}0Oe8~3m)vLP9WD>Mȝ5t>A T:6 5'wd"1 V.Ϩg8yc?%S 3#U>-!FLXrkSU`A/_A_A?D^-VZC[۪Ľ*&`DQS/K:6Å ǎy.n^v j`$y]+A=ShkTH/қHR:^HE:IkN^b V7KT/ƣtJ-[V@W x÷<4o!6VV@ƁБrgQp, 7XTFʂ~S4j]7Gp~I$FASH!Rޱ}aëOl֡lچτ]=hPI'CBz"dNsrw%no&vnQ@>~IqT{n}iT3@z xe Cst9auSdrQͶ궶MRGޮIF+Nt 7Rp 4r]ȉƉHlzn/ LD vI(a[5Gw-S_ g7$_i @00)h}V PŠw\^3Yd5&7Y[ǹD|yws_p5 +41yo0M) h"#&q( yn9$~ld!gsdAB*_/W{ џQEPAUDæBh̔PD+2ypg'! S7*<`6 4/Dmhpy!"WJJJƱ<j.Xq D۷Qm.eu*=m+G%mhY,5 lg01fL5*[n҆114wEQG.dˆrwr9pIqn?4nb*sJk|g|2يJZ!jy[275]9 H0NZ#*".x` 4&:Zgy( 7 G/t ~eի-s7m+]YI㥿6!+ UQOTVb0aa璊5X{)ڥު|[dvJ1yOi~]?gB"-ӢTM_r!j JLy-0lrKp:h '}ڏ 8xEamqE.x:Na+^r +a(;}PaTR喎l' -ͬ M@(ħәB>.pR "&?& P !DfZ/7P@sʩQ\ T)3UN 2[ a˧K"9atSs| 0YSsF/[uoJޔI چ}Χl@TWctqх*$9{oj@lj)5i t{6z&j׽Oecf"83A̛iMЦ6].W~RY{P{E„sFzkh+"T u~$0e_Z-b_~g*n=w/3CGJV1%s HqŬ\a#9>Ec}䠈EI Z'E{XYtb 5oa5e!llTE׆m7`|+cN:TY|ryv'QKWmkUD>EnS"H6"cӄ} {cy{qz 3+b3t =3,iW=f<CC_$P'GUI@iND=~p@Zzxo4v+*<';R UxpBd ?u1{[hujAۡ3X22śy)Z.fOkq!l~pm 7J&x3 ''N<[ИiRSU *!CE"Te {Oa@q?%)?*06RLO9sS{o.Sj_T81uK&wS]JàkS{/ۃkԆPcoE+-dUqI&Y%b043SwW|Vh؋;l=ڶ, ~#z_R_.HwJ,:PXWQC~b~ssjB;$6n\.(J8 'Oij;_{֊U07?RX6,Ѱwk'={P/:MTy7,ײ^ǿV)wgfG ,I" wz^a´iSFPæTG}kj(CMg˚@9Kb`NbgU?'nJD{93>0榒-{=bCЛ|Xu9GѲ\qWXBf{BY{IzMaӄƑr)8de9Ol4zC79,UuPB},Tdi{U:6(h?7'zA ɂq6!aD;Y,:WH"wAسζhw]ǾN{GmO*3TͶq|10@͆=ƕHyM5^:(㛸Yx#p>ԉ5r] FjO eO{˖) A K3h 77 [}Nf֍6iZ&9:D`UN-P4n+3IgC0+kKпKDN)ʊIU m,A7&N*cE3 /<'Rg4| 7'G`cڴ}TyɺB_Z_.0X' B3B.m?$yf\۳eAciwHYb %XRM N_1 rt,{ōTtoI\Twn>Y ]d.t<@Q LPE`6l}?,=ƫpMܺ߉0!1.j;gYZbѽ.O{Tv?l s=%V >*Pcb6VeO7cHU'oL2sv9F x;!Ph{l*v`T4$-to xb`؉t3y'qMk9 s>?"YPydEs΃tSLavf3 2_5"z(&!c?c> nGN{@Wp "t4 WwHk4A4w}9gٍ®U\d^ vL [ ?$gCV(rXe;kD\TuGISb Nͳ@br1%Dxa/Bh0e.}7U@,&GtŐ+Ts $ĈRN;rѾl|h g|z;AQaĬ<:< H_}kݖaƴ(S]],g`q5Z染]\,ZUznSqEPZ  6Jd;[jO*(zNL!YkRQuc OEO,z++3'S>]9qBS[E I ̮RLHV8,7*hF>Ϯ]A ]R@EҚb9;[LZ+F$rU s۩6m 'Bԍ1}>Ipף.1獤2{'Tf*a ^ŊB') Xό& rg\1\C ySd'zctIú]>b@XO;)0wyl?M\^H9ԭ94'\Z@_: C*# ͝τqh;χ J6b"gs'XPG1J+1 tpQ|—W )+=g%i ĭ(!N4zgĖ!Q3=tuseH3.t/|WşX0t["HhYo9h',dzΡ wwxT]5JaTNC:9Oojzp]#wK09ZF/Ak>p@ɍ Ŀ3]0V) ,#D\Ï؅ZfFQԔ?ch;㮞XG9g`8\pfU`k83DQasAX]6g$Fl^̛ LwT[tÿn(KA)>g>u5-7a1mI7SROM*Bp gѓ?4!O>TEMkEUAt@~^/`)˷P,ߝp5ᬖ:͂^'=̯"5̄f\f/ 9mj !_*X.nLO?:Ldb>coUZxV2ƽ:>]P,I<6mOD[#^y=xHf]>;+._a۞,E/FvE;ru-_m%SC~t-),p?,5}_]a\4YϢ.R쀨o"G%i;&%R%i5.oE'k1%kCOuk>|3?rPx lIh k{~􂢢ء6AF\8b7UD2:[Oܝ[nJLrʌEQ-8.A8D@&O'{QTk P"W ial ݍGWd'V,\%}M$y +j*UyNlCsCb QVBG^eBbC,;Hi (eї@T_œk')0jUJ IS:>UW*4"9u#K&Ke1걀1 țuK:KzMBȍsh'2=@ LU, 2ȏ~NkpϛQfhC9=}u ?_b7'6Brɱ댋Y/6›$X+> 8YgJl?R@ej\H*( uUC{0ɪקq S=;ߑظxyixyz Zr+*vGnpGD\+.">uѣ›æU/t𩾽D sRH*ʼ>nJp%Cp' Lvt 7lܛݟ1o/+HUנo4|JBiEr&Dҝr} 1E؆ǯQ897PY;ze@rB=E!r9jq2'YW]ꄄj􅇌f_)(hny\]Wotjnori2Y%7*CWwEd$m˲:%rM.IEjAs֤ؕa{qtkH3^UvŌ9K J~2ğmWH5i#wꤏ_ ͥhF5|LnE[yK 6JLC1>+1cc8ǗXB @Y,'#:paaӂ?$ R"ctHF>3x@QO1ESMvG;Fņk1[LM+70?kH%Yjd a}p ơ+oIݘɰ$6KhgWXv!,+ٻGߟB:î7OA AaRcw/R% =ߠp~#Ƨ;H3/}rLʔZxouFgzXIofՅh#Wm}%}J: [+y|mTFg(ڹ<+DT,>A) YDKZ8rQǝ ѶyImk5e-M5(?۾vo;>mlhc%s3O-/tDh-I;P`&lѯsAs#яK]< p)V2Xa];Ȯ &L$gK%;kb@.Men/$S_/Rv͉t6I:Kvhvဠe 2\H[01d!hUpy:/ZXlo;}Gyc G,e*be*Тe+N4r$TͿ4f_,Zi&uin8H~5s4H4$rՋK-2kt;R ^"a|Ta9Wż\g;AHȩ5кL^~qKթ:D@ML;ymw7f۹xo힊LNj\YJuB''h: k ޞI1Q _)lpí픖 v:eR=,yY x~[IFR5H/w9n 6krA0gY}ι1Ql3 3C\9|NwnmxԸ,̞ڙ,nhk⼰,Tr_ ze@'V\I/es{#Gx%uj>(+l+]w|>5g#w-XY";wF0zѣ R wV45>0%^0)%I o W6$9ӷl!< Mw2o,NgrYi};n F.]$wZR\JZx2ؼ?݉KbQ!y'oIr;BeyR'؈C"b̬ٸ`'X PD:~@J]͵26>`bG 3}tCºa:ĚճOm}ߣ 3E[j_BRANf`mi5:񐺯+@[ԏb="s5֥37+eH`*Bh\6&T >`f{OX~95Th]I.\&5')N+8B^nb@|7 ';\̈́\^&}۷(u +sșc?†`a)|OִAI ЦH_* ) X_Sİ+FyYԷiEUGcV@tKdNFS}I3.pԅxKFV|>^V nxpI$?~F~dECw]"O_ "+S?{@~_lmHOK ?0d:yKq5i.gP]J<"chxcگzQyXkkw+-$O6ڒ{pLpsL#xk۰]wp> N":GJYQ7-}DFs&o j]ž|Ď,g ۵­ cRư_< H⇄@82=-3{rĮ'- HȄI}ʼnXٜ*-BO :a~>+A.硐>?7p 嶃8TP@(8m,)V3y-oޭLٵ}NRE.]jڏ*|=<|IOXgoZfL|hi/\|N)O p!]j(UOKw/^wWF362\,2]JN5)4&joݿJ3[\YOUs?&} ~=#{ۖwM4d6U;U[?Z*Gjzk[W+BٯDKߟhG^%Ɖsޯp9%Qv')gFduA>'SN!xuBWM׻ "84nprJy^oRKWp#P:_ڊB> fB6݆/ 9ftJkR:uJwY68Y^ !%Q^D. [3kIyao]=H;4 r۟8'_ e<)R" U䐺dࠜ dT d0LKW @b<Vw :9TUV[&cU/?̳JO^UH߾H,\!6lE[ķ;Ӵ.-2\ ٳ:_oJES%Hmx~]&^gJy泽Mcѱx^ CJ^]B5cf6D-'MmAMc:R t)g%o&Plw.9E?pC`9`@W׉lΌN}bd[A@I7;5w$Ǐ0!87 :K=wy;jNV򼐼(,xD=Ѿ4u)ɥAе{RjDS҂|\Jq2u=yCd[5`1蹐hħ==t:㭖?G2v5Tk:U|f3DXD4 3+jK5 K2van] ;}}'ZSZz,ֲ> ^Hyzoe+OP1jOX?R,>,2{p@rq픮{!T~KB=_Y`kNG]q*ۃT = ;dj-u4 d̤ 6ttik!ԃ1o%{m} 9aCt ' ^{.@v.c*[|;Z7D٨1,Du<7#H+|u6 _ojS {cLH"gr^?D#BڬF9 9f=LXk!ER AJ<wI8v^KdU th|k^>+mU _Qa":FSBen+ae0 njq+0zmK=߸3Cgqͨ O& Nu<*q90,]`.R4LD y!P@\]Xأ[8CP>,9Ap=b:nˉ8b Ɲw)\0 9 k!Q &A-و||3vg)Z͝zZ8_^C!&&V#~eX'{^R;<w+XM@.8zwz,EL۫AhHPd:2{UXȦxo*4Yq$0.p?pKeUHE7\u-xQ6[_lOiehr 8\>8FқZEw59B)ʈ1EٚqRDw@搃 Zßgc'/|2JFS(6g-aN]j;@i<Y"Hg S_?1U7Le )Vm%&1e?R"տu>+v@^(DRcqܣ u~_rSOoy2;:D z!]rΧhMdQs|h*פC=60'+QnѴ( U`UZPv>߃峗mmN Q-R  rϭDnb]V̷*G7Z::_ ,ƧElaKrJgR7ʔHy/؍P32K02 h{pIezcNzRc. @{~*}qmSnX#pBC,J6U2 6_XpLS  9q4uW)Oتיf[$t[c&R[D$8d6MYH{sG4 # :fg}sJPaJDa-ueOHI[2m51]6$Q =>N`'iVYBs)jXS0'%ї٪$֥"[?6)CǴ5.>:.>j*d)H;'[Dh'kb+=]yk_ 1*YbfYn)sVH˸"/Wvg61T˷^K:ӣ k>?UdRE7+͠1kǡ0D"tj:Wm܏Yd >0<<: sb +Rw~Bz9opfZܫ#>+bB)m\}IU{ $Z'gǶH } کZV|!v*( Ɓ{'|]SV sM,qEPXnU61{gI'=VdziQG /(H"Fp-{SG<~b11;IkL:bA4J]&0ɬ\IziOw^4Jܒ Jv''5B[9 a>4L>r_έKkRS[*.J4,X_5;9.`V4ZMG3 zyR8 f?{n0춠Y;dh 'g: ͏("O62HSbT$u7'\@h2>Dž|dq}>4ZXY0RWJB@)wڋ9xsNtbT#_aNd"\%qc`.?|Ƒz@tm$ntAy"$|N:1#}UePC.vfMDwƪ LjLL6-{DV u<|~9fD 1{T~=;D^'wHSМ7#Ԛj?#X3uԷOqrlG|rHhLX+e\X9&c,ۭUv6 x=P>s'BA~K):Z/e޸ Ǵ ğBM2Q/)IB+}foy~Rچn =u['#-I^0(8o(tDkƨ}M\5)t[ ]ܻLψNc-n eXu'yNW+ܹ\(_kXA?Jٹг0=`*Mn Y &.g5u7Y Y`;%m!6i e2R yT0AFY}1&]Pr'<3ȳZ^P)0x]!~Q=WΙȇ$\gE5XV1\_"(x7{`YPYԗ UoLUͤ_90?f-w/ y=r*{0>ܟiyn(U_<\3ɢYDŽ1U4Vr>YטLMe=:d{1ϱѬ:ȶBAfW doH}{Ԟ=&_2\:Q=ȀLp.[1Uf!9{ѸSv_25vTh50d6*U6zgVs~tja>6H[6Ӛuewax25!eAnȍno*# #\M?}f{L5HčQsݾfnҦG쐖o٧F&.LK=vBri˧* cAtkmzfU(~W44Us}5st\I?L74atg4>[k,|Au‚{nXje*KC*.+[Fz3=$t,׻B5T0)R6aADѻKrLcu T.rQ{^, 7>#^&M [Q4[?6Ճvѽ=1ϡSdW[ڗZ!Uݖ2E -F!fv(E)S' 0W @4-}<*, [Ddʬuؐp-ڒZͪ=OOǷȾ MWz 5w;S2~ɠV>ts3Sm{ ZQV󢫄YrMV+Y5n;Id"56wh K"2zp,D#KNQ#uu ɣK(87ZEl[=e&/dt!+zG'4h;mU-[K[B.-׹8\RV?}‹AzsOfQi႖Rxe ΂SҾ/,3&QUp|R15`%knjY|21j-׌r~ж{dCKNBe_i!aM^Y˚ 5M O;ke&ayhZԢE`nbM ԆorCߨF E˄9g90J`+@? 3%^J/dL*JBъ/|p9;Ԍ{y1 J|׃~/bٵ;N?܈Y\_َ7cd":ap=h,T>\;yĦ #Iי~v,KEztBr(s5=håāBdWK4=Tߏ6.ĄF`c,YwۢPzpZDsߐ%sߕ{Trz 2'LbkI>aGǚGn7V i{x+3:?EpR"~_1C)ыFO9c ܽ+|`5`McF|skn z~cluU a! et˚:w2O\ylfs~B*d62;cI &iKWOi:ޖ.j–}!~A{{.ywOTo"fFg 'pW0xطHe US0wKelcLG(!REÑa5}6n*d!-2$CՆit[ ]ߊ8|Hk R&9'1%8SoĂq5GKŽ,hq r ٭ oҠb;ǟa RNs)OdyhgsE)s%AQa y(7N5 G9j۞-2*&P'ŨxyS Y$!Nt@S7ѢEFfo l 1? uQ |+H@!¹J_ȇx힯i9Gc;:%.o%.0 F qϞZ;X_l`mՒiߺU'Yl<[r8//jJ!Lc=jwD5 pH e/ɲ0{2-d,+XGkRxer1-WR_o##s:,@<c HQ= #W9Nkʆ|0:kJjGN @-ǖźeؕE%S$IE<rKCoNrtsF7F0Sz[LTwD:}S<-0kD_bT1q5bRcIJ^.DHW}mQKJieYR?(i#1s#-Nْ@}^Pa\L'1yzy<[0riam&hcc%O 6yV x{)}NXRJڟeGAkpHg㹛MJ-ӏ! %XY ⥡Fnť)N>g< \/ bOVgre0'i+7QQ-;h%}2 6lR'Dҧ\ZJ3TFN7GG8IQWh`A%;DL[=j`"_ hBy@(͈ ]7=(ƠluВ9y6xVI(dW{%4$,YI))pxuw ;]u??ϰ_d҈efy<ڧK^T"?& ZW ֕T~e@#~~N{7@ЍwEpXAFls=V'œVOzJ-[]Bxc̙(RZ S?KU8톧8Bk]n:enBEm/H44s9cн=Ȥ~ʇ}T*@ziCYNb&@0 1י%:yBhpBj;fN">0f|Ϣ[{.uD:gHJ"d?ٿ5+3i*f"^ک UvQsT )0Ό00=GƱF_ +tV҈ɱ yƂ8b} }pTZ%ARX>_^Xԍa-b(X. mme,qG@t|ZAŧm[jt/YmpG *pe-̯+quj4gqA_0u/hptl\# >#Ȕ쀑Z!Ʀ4Jϸ>pJ-TYxt~QŻyTle{? !K-= BJwsDxhs#/Ȱ?XVMi4`Q‘0̑UxܱȎVgܬ񫵦; pkGWoNC(1ڎV]ǒ3/@y,BJ [@~ 1ؙ~n.CP*Wk*X6cL$Mvw^'zy5h>O<#(<^ʃ'SR3ZcNqPhky|^?]#prz7hX(k7D^h9xC(wo[3ƌYĒ~o1ic+kNh6;# 7"X+$ܳ7̲`Sھjn`}4bl}w%Q1ɭGYi#!xc?:4iL)c%aW_ t~{&E,lQISqmU mD eVXd0!4yF"Q(Bq-Yy &ez^A4@>RR`Q'\~l3-+SRPA.b^YZ4\K]c9BfM_Jaŗ3Vb\KghFo!'u읐lN`g0̫IzyՑcjC j+`9;Ĺ##r gecFYy8sՍK76<`D^Ir L <$!2_N:Vr-pb'GoEn ⬗GmQ-y8J߬g!ٵϝmŲx,펹қ.< _[֠l}#duhb;XzMeq:卟C&a *bFr-Ch9vd9/((em)H zZH(9_af[d |[ޮ<-C ܇Yk~Be מ0{bP*b&okw<,U{씷p4E~Y4|HHbqO2k{7gT& 8 k[Ӱ|$399]Z 51CXܞOxMa(=vR* *@4ovI&|ȯ]#aBbrc̚j0&9GmQ\ 3en((F;G>rmH)i[1~p@}/+mKN_bISt ]uCZFPZrˢ39l tU[@P<-W(W8\_nP /Sp^V#aEBn XOoo:)BI(mS2~G֠AO1R&tx̙*t|(ޥ )dlܿaQ4[:{ ['Hzh"߲Nߣ@R8) t<.g\X8go,(,gvaNqJɈop D$F+&lu3i96uwunaSu@hZUpL89/sp@!٨A%Ğ8L&VxbMCƫDMCa6@=߈{sK`bUؘ&Lå$M@VN =3}y= cs~Uf,¡JOO#S G[n;؉B]͋q )Q'8tD"T)'}n1]ZSg41oC5[uL-<'*8"J,t{JebjN*ً03gM囦oߝW ?DoY4wUӑ6fw7@D}l!>Ԗkohʥ.!)-#RHؘ刍A#ڑwWZT. YT\z.Va#5Wel&97y!2 KS1 f 1Uiz&تg% G YM6+B6 /{{&4ákҍLI~sz S0;i "󱨹1t|ҕw nΌ`UAU:<ɱD|mRՑ&&aŦs -tow]}3trqOR[ Q(s[&ܩ̇%7awQT˟άgCRO/F*, hQNcH;2 j5*#h Nj3V1NXBNI2~ѝ6-V \ umb:c‰AMޗ%'L2RgДH\좮zςdx{i|Jв)MV(`-qF+L=}y-BV,N/ !+x*ꨞV麇zSOGn+r4(2A' w /^f)@`h\!)V|T#LܤaH8xNLg>=W^t0kx(ڑ@5!>!1v |52[s,0,p6+i[džέݚJjzl)ׁCw<G8+cq:!c`:)+WRVzX3rߵcX5]< UTmKA77oM)xk2`]fJ;K^"ژ5~F`QK#-#Y6߽*&%4598?H{eL^_m6R?1Kw=9N)/|c_ ~:w#2y8-r&c=ܟݛ\JrC@/XD¶yb|[3+63=_F$C#?2Oo;129G& sp!0jħs]n<#d?h<u5]{,[:}jc1I]Ic|6"hk\BX0Y<7|QrO=;;VMxcG4ƮLE NQ) 1V|t_K'ii}Dbp6ÐhqЄM$-0c[(mz8b Дy[)%@\jxqVߕ6,_5vs㎷G)Gޭ,}3va[QpA }Jܥ#eV숪:ǘ3 FqݏfdYg&T 6=2wg^Tه]EӉJ`?-t_Cl5zP;25C>d}BS68;Ȗޘt BdS9.YPi%v)*-D7v?%];mp #^mr_q؋?Nwl|g,W>#kLmy;pΡGE?bm ֗QGȮ# ,b-LcS!_ӮR.쇤sX7x>trm{P+-lB#ajUE&&{ Ɉ͹,Tk#qEkKeܦǩ+bu稼HWe!QY&]&31;?Etݲ2tdGs1_]ͽ.{ؽYT#H$5̮/s)\dS=W׎sj>Q]#^ᶲ!u^]K5"s)PO,N9{+n (+ܺaZ;ps:.$I#'#[Yk|'zz"3=3Gipb mCghӟxbUPk7ocL/ٯE =&ܰ`uu*~n'숾"A{F1Q7B'm2yJmZ:obFGbR`'2*TwMgZ`aE?o(3fH # >AV߷OQmX-0n2Ť}^۴i6.I Q)==$̳O2MF?Cc<0?܀>=*b`Ǿ#˿>:V8G`ʴ;SVpe\fSŎhޣnL}/Xw#!›7zߡ>XrLvֈi"Ŷ_&fkW/Cn"g% ' xv¨; *V"[Z%7nsԑt]h9xx\N!ӠZo!1C!<[ٚypl of[ <3nݧ#|*"A1WJw Yh}a ԃXn7u҂DʲizGo;veC#7Cq~M^u?g7wK/G4q%VZ"P Oäj**K3 oMQJyPf#Yxv5&gRG7T:  `6au" xnO|-ư6X*lt,cZb^kd no}[ETHe棠&I.ZCr](ߌ yTo)+pL~d#8$9|aa"6!'SWy}M6N?Zgakj.q@W-3=-׸p3T9SXC[+4A3Je|2UpN& EP a:!tZX-jFr0F܇`ku`p`髱VI;+c2)0U4 ZJf Q*$\߳fs";eDMl/YnKFT ?5ĂCj>PfVԐ :{W߹ne6ƺFٖ`<0_!:nf`6Xh8C+,KJM mUAPIW]P_kLݣ @zv+KqbJz0EN1r|mv+xt{=B!Ϩ-jKN[d_i0ըr$=Դ>Ow⟑~^1nˢP#p Ӵu9J,5@Aa>?šmt~&(Y )s#HJfi-i y"6kYfԁs%x؝2ctG,Iv%c:~Aޫf1}rVD9@|ª1 z]k%U$_:؛vNTi*gEpQrBBFNKwbГu KAorNxHIb6hS gk<jJ Z,Y6@s# 4 dDg˙@y oZD–X OA9x64|'T66NpiXUaC r^v~!b답s7B(0d½X bV~g so @%LqHA} kLқӹoܹOPX1> F=$w}o[Dɴ H: oRnL .r#)V}e)6|mTt6K)*˺+bg|$?^o/GH[NqMg*/x7FfamfYt DN&P'Dwq i1 ҧiG813UJ϶Z]HAA .ͳ KhlM :VAut)MWMbQ_粪1DќРg>[ Bvh*-`RWCzn5ُ%:qZD]d ؾ/[(z6\XI%gJ^h~-kQVֺp1xeUo#dE+LG md]"xAZJ&46@&87᠓n Pƶ IG jX/_B˖~O߆2x7oiM9);ye m6Md7Y>xd1j%Ό0VFlsX=n4U1Ow@=fF{UzX \BJ (nj Z<m0ey!IjXml*\Q%J*̈.bLTLPP GBnN}ms =y1_-ܴ4>ņ@)fSs!.^ؾgκ;m [Ű*fCq#2 P yf 1h+kT;_g=p=3pCۡ5Pnu\mZg˪uCQğ>exDtg$H7!?TZ" |(ɩ4 c<=ダ-@֘/?lRܷXG7?oM\w!|no$6z'gTwA Ƒp1&. YG35=!Bӱ~\4+ HZncsm\uϸ<hQGX Xh>'X:YU$O3Xx%==D kߗ֛γ@GK  (bV0jNl!̛gq<*dr\Vӽ|W݀rfyzh.? }~Qzle2U:&EF^(s\ɥye߂~/Uf#Zɫ߲!e)qy7sNE%u1Qv/J\)]b LХPɢ3U-dv@u'!Z}YGTze}%W+LN+8#kCSRll>#G/ԣbnb58MlI=X~NФM &wE,Ξ`C5Tә/(*FWϑrq(jwc_ ^ˉ$o3^oq{3ދnVT<Ӥ~9+n-3Wl<;~3,/#81nCs 񧓆}O,$ W &Zg0G]AoYsHjd=Imǽ¿Sp X/pᴘUGN7D̈ -< U Ef V:޽O4j킢0W#jajb|N5z0eaFoc$u H-Xʦ⢊ˤ$lVtB>k}b;'nN"mmJ]d_w5z [i~y W)DEOfq,do;J}q4׈f%\xKaY|_n2vPw'i\cY䩁 th}7OYNs19 Ϟ  U$Tv -oiXaŖ\P?=P=`:Zf`9g4U2=" `N;6eM'$\c6}vBQ2剙GW\P<2TgL:PY->IQdFyh[q*aS~/rZّ[ѡ I0Ga-z<-I4=$_p;yW)||DV!mփOf2.]\~FM#!Pu.cdym>t˲Jb 8GDn`^;ܿɩ&6~I8GH m/iHӸa,#s25 ߁Q$C#>e~M7[4fdD 4%$o} ӭ$U-틻Y 9y,^K3!$!A7_`@gJK'&N| h>vgIrq$EQZme)g1%TiPنW'd60 y!)Ya%s.{Bc GF|9.<"/Syn#='VzYO[0怚všІ)MMd{woO!&-gHf'H972"op"8&VbD΂k|[@ʌXHB nV R+cAA1[PN&ks:7tKkͰ3cXdUe^ (,Cl̍ v-HLe! ΐ~wdk!Iׅ!dj5n <_{/t>q 7_c9݄k{=[Twf!r eF͝v.&г= ڞ>̖_FЬ+I2bS"EԞvt܇͐E#83u̻xL:k|,ru͚~׉:gMNC3Ԟ_ Bbe2st4l"=S3SX=v&_PQ$]I#Sf _G-'uZ3`XW-+:Pv2iiBi'_^d+DJS0v5U$2|xS}hD 1̵Zlbb|xx Ri4n+ ˕[b2VVb"ݝC3t& Wp1fzUe49L4M,lzpcZR"W\ZM_U_NMԉl76q]2[o VHYR٥?ydXO'*)0##Zaѓ\Q_YKj@w vnO(CJJP [eċvKN0ƾvFpht1VeQ<~U}}-gF 3 DhONXfP\W/_g]ytoY̧v`hs2\$֪[Kq:I9G`?<.`!Us1d]UEK(DL_yZV'ѻT-U4F(Ts7 nn2sV|D"z-JA9Pbrc$@&R+U /|ˉb`hc3#֛2:olؘ4nd:hu[:ֶ4%-4~)n+F4.W3$8f%$h[Pj"HM”:+.G]'>DbԜݒͥD:+83cI +gߐ4Hld*E ٰp@AM5jꛂҁ+4إ6}ֳ犊o85<[j)CBkRT-S'ga{i+Y?ϐK*$UṄX]NŋG%`V&tÓ48>e]#/%(&5 Ȕ ?߮QveQ;/ɂɳZ1z"P)*~ri3h^{_ )<{d+QT[a0,o) n#J%:sX<ݹc< Oජ "P+&>UzEQM LH`ܓ`V 5Yt!$p*C$C|u\ˇ66 6dX^yOkwf/cI36]c8 jsc҈%>+E{BvU4 #=m~+3F$*^8Gf"n|rR^-}wnDD]|;qJc}?Zd[ħ2$gqTτ)sP \n5\l* !/A%'CC*T>24 CrI!'PXo<R?U>єn6{kb |Bq˼ ";xu@Uꪳ`&A kgr,SgvÌ9Sf%<p2x"T<26m7ތwR4ͅe*e}78Ln;=[5haHN7N뾐ttMM k6 oJxdAHaHϘv 7VܵPPUKoʯڧsqoe)I+T0 -PL5}^;U4"ؗ`vH]lg_ƒ:RG5?CV=Mf=Io!ht̷zj/z\'# M#gꠠ̪Ne1N X=Д5X$un5:pO.Kg%#g!XjWW.idfz|[ՎUc?R1edې) Hepmn3_#>s*p7Rb*`;eU8F|X#e9zm5鏞 }Q P%CиB|]{i 9jw$yG PYY'h &>uVB s&&stȦ,rͥ~PMVP[Ǿۯ ,">vn=Eq:x;B{|1NKHv]*=u=BC1DJ8ǃLaXǀb&Sduy:4MQB޹#>֝-kO8ᶌy[a 9$ǎ*`~gFPLo ѵ;؞6᎓R4$`J4LƘugD.~r;1ƅ:3YNQcؙ oYy~ߺo[vj/`i1tn(0vH)_x>Պ]V{3njO%G{VRHmeA7Lw5m^0W@eY<Ӗ{Ӳ Y|U{ߝQ b]~QG>acׅ:0) iFr"!&{ Y^@$ fM /x[A jy%}]!?JTRӔ>AGC[jozUB:e}@ tкk+Ou*&[*9|qlPT"WJV'[QF_S/&\LD w\C\<91_0WYi9,㭼_S>Tp:Ǣ铡2|B7"-0)= 2v)5E)fBxG}} +V0AsN-[b[zSDIm1|ػK2c+,V׻/ZqB75 muRa1'H|l%aڹNP;ܲҒV :!́9$yvyޛ@6'Ft,+GX k?b(>>vgv Rdwn`"Z,wk}pz[X_pKo^Q\!6%6`zğQЩn|T SXP:IY{%M@\v'M6M4&L֑߷9ڵX!5} 0U*sԗρu׆rA^ԑ;MW>!1-;"?=0168Psa:Cٟ\YKiв` ɇ\Wm4ڋ >uX{Ws$Sug",NA{"/(|2O>3ՙyI͵jvV\]\ b?ݴHԢ?:4AJNDRw}3}_^t[RǕV\ࠝe'wfl'2k+`% I7O6 V1Гu'?>tcx f[ށ6BrAr%Q'PNI;zvN^ć&s|Ntn~gp#(Y.ƚf\~Mq\oK$j+@e]Ȫk-+`( ay>{o[!LOʍ8( p /V +{s F =D4WpyfzYNI~H$~/7ir' \ΉAf.BqW % vLFv!gf%~sLUDF枒: VO`ddm /NqIǿ{]_cg $HII؏K /7bI=c_9Z45K=!D+ t&|.֫dXo`ˤruD3yL :fjn wɸCeMUn.ᏧܺtP?qr wŝ&[Oq랙km$1NˉH gR+ݽSۛAEBK=1a M/Xx(P!YGM/0=HؔPt%SmweR(C6;׻{k7;ܖ >o Xj!گ~ VM-a kRfҴP/naDK1 Ye6S8`A;_ 5UEH6<F3`ۗvԒ-4?{>хߒh٢n[y,hB!@KY'3iz6rUu{V(=lZL)j%6ĀTpkgL~v'g%#rQ>_Tf8P7:k$zeYE,jMR4mn=n܊R;01J´,Exz{;uF,ǢֲOok}B:rӌ͒,UNg=6o.cwUzȯ Yﳐ[U>v {e煜e!@ٟ\;T]I{"m} |~h;\鈧 DgS 7_CWB!b^|C;:?VR 0"p2 ~mQV~ !+W3SQOG5c#m2k9:ĚԣJ+ծ SD@Dc *I>Y\l#D*8OQ:__).uC R{bxn-ExqeZÄ3'֓T^p.ڃj)mCt$m05|Pg-Ҏ-FTocKlZ"&(ckؒ>}rD"X+kb}QyYx@I9H9KQÄw=մj 18TX JDU*vfcb'0qB/V5-LI!{!TcնRyDCCY*p!j۬u=UzLj]%6&~z*&ZG){CT(T *-0;Fl=Ú=-`:A^ĊRWttVP%Qg663XNw}4N15 2-[fh?GI WqohqDK*f tȍb]&{G"D; k t@KFH9-0x>񌢚PuA#v{Cе7GR'pǧyl! _KjFDk~Ŕ*@w*ky>5Ud Km0G@qZI J޹ݳ"?|֚$鷷ti~ܽz ӝ_'Or^˫wO7%Y:00U|{V2_VINPF!{Fn*>aBEM6|wdfi6z,zf@9Ev:Arq9)3 *uI&dh%"@o~|w﷖ȳ@LRQS1ͼo~I)ʉř71.DE#^9.x5c'p ʕflJi~Fpu9 [:B%ޛ|@,łI4ex> NA,Z0yodcd0@mF[ekϬ"S\ Kt7iy(nFV-Gf|Ek݆ryxKbΔ 2QЈViKsΡ(+gE`ZGVc{$U&-M0Y13A+$xz{SUu-v\r.-p$$%"4HM+m$hܿQy7qNۅ-gٳ6m-3!Y>\(rK[ '>Sqgls+uW"8[A:~xUFY,k}( ~fx| \?1ދ)HC6 dcަC_\!"ɗ87DSY;kڎ @ r5`uB[ݭb,ऩ:l=z2s5~TOMo4H@y6(˅!(*aCB1JQjUOW_tIO'IڀY~u)WAp%RzM92E.]bgQzr%LT ̑XzmW/ܚV& ?`="֋{ej(CT18Vv|m3q"YĬw)Iz 7=ag{\k{kp1G%ۇ ""&kIϼRsZ']c̲iehj犘O+cξ`\/ql2$6ӭƷn`0хrk@2K+!W[iSoPLjyzXm&oc%v6K_b3 YӨy/=|T\;Ci0(EL ҟǦ/S FP)"/ƥ&K$P!jOwP1 %Ej+@7z$nͨ !&@LYU;&9\7wԺ'D 6^h$_t_3zl-?w|~hvE>o.'=wyN)m=4.[mh+W͘5$ϰ )#( qXOkcmưh2N1 m3d5R΀_@H!.hO|[S4<7;(NkkV^׻9z%'30B=Yn&:5ER`XcQ8ibT J~PE^(]}Ž3i!qͱA }(]*.G,us_[6 @q+vCVxŽ,LCIF $JU.ad6g9b25[}mMwo}}E=E@S1m#uQ6ͣ|Sz9(b:1ƚ{תZݕ^UKTQU|@4A#0I@U;<= #U(d |a3o~3Ah4\~ F߶FѪ)o2kA1Yeh]B{L~hc{ ě=̓QjX[p ]T:{ gd|D Ev5Py>:b;I'$q>:kiJG=pD MUV2~)2W_qZ{ESErמktRP4TU@GJʬs L҂qWܶ;7{`"WG1XEdFlD3[sL`b>˗TۢVYwζ*/kG ~.o%ȅ2GMA1y&ҖOE'8쓹>ǫ@^$P7pr3}[;@"V6Jp A?BGò;^7hJ>߮$>;CB?r_͎t+ڸZ HJ)'NH74 XFh b{>^Cgahzfedq|}OO.`8,;F+Ӧ'RLڙ,|uʄ []3y00G>©5*1 sDVU-KO\2>hMSE ؘ~vY0ƝYRhB"(O_:qVK<`*GY,Ug=cX?g}:@]F{i\ܭRKNbQ@ۧ?iW Tzzvk5D!keo>u>3M_RI`<m+sPM=kQ8ѐD=9KlGAJ+zSkD٧b]ϙ,ku}hDk|NdSih}K{O0 zyPQڛր9O?dQ(nX7;r7Pp(CuR0[S:)1#YYj+R#Rn˙RxkxUt_0 VW]{E[-^戅?C6g_ŏ /'-_=WXxS'}›DpY;r-YWt)t4!ɮp>w ,L]ZWEF#ho=Z^@yA;9~B7֦"_ -9s4[az["6[D3v)L)GH&ljPwL?~=#kj%gO˗S)ؔ2W3W8;J[oG 0PY|CTSkحwaLԣ_pCo Y@6ܓXuEy^Lu`@(JSc[C~?6S(*n93U8*]Xj)}~yr[w+@7[kȓF.6b; bwZX+E7hG)+j/\a_{UM9o_ WIʑ'j*>h-?weCZK}_=Tl*-A4E!}37ݼ| qX~&wцO8-F,jշБdO,tLi QUp?=se;^FojGːN}P%5 oj EkA5ַ϶T:[krdqZ|c1l9 $:GMOi0`x3`De1~Ӂr)UQL_1/]JY 6/-18࿘Tv-D6q8HNGJƫ t2ٝ $):iIzߨk5HXrͫƎ1ngΓV <5:ra/쯀zH-pvm $509ep9Y.7) F[>TaƱ5)w#M R+.<%.2hnFjݿ/]xwM`L2K"?Swg|5< 9ٻxTǗ%PketƳĢ2&s]ԷqL>ʹ:̤km@BgrOan-?ԉZQrBO:_~ě1m_+ ާ|Re⹋KqHR{X؏ty&AG CnS,V=~r*h;7ܽC VF9'݈g ) 3jU@aMMc[IδF6IηY/e" Q/G_W~DH]Q~X0q*NAZǻ&<~%\Wd7:A*H!i+xϽڼ|_ 0r[ xJz {0wumQ {0*4id!#(}{D-28; ;DTmFѩ$$SI1 MQư{5M]!+`c 4JQi){έ|սlǥ mJw'(v.*q fnlג?,R8G9򏛴|#gz?\Q5H_7/ Sԝ[>HꪳsaBṠಮrhF|&OƤCFs2 bPSG _zqվo]j :F"P8{7$DД5\hJ/DC$<%UCNMHIEx@JOIPo#_X\\]߉b_@9ɫC:;VCzYТtk F\tkTcSEpI 4O$[|}jJX?ժ L~r]  ˤnԪ2!?"EUoOlNI9 d]Rn2v`?]UU&-_~ 7uwմȵz\>PFQ L5W\2m=c.DDZp""HPYM)8Jo;,UgTYd^ޕli¿@hw-@Ӯ\csx-L5I)v[ۡ}%sSGK4uKK(; z,=W8vQ^!,l"D@댾ۦX.tӨK+\] fD&agk99GZi/ÀF~OLd 'ޚjy!*cI q+qA A_Um(C\zC/R/Eڗ|^?XD p^bc9WuFuف{ᝬDąkH)ۍPH!2t}N NpqRDtN-=kta̳ڍc$,KK!V/*ba Sn!{}NɨƄ YZNda6O>l`~ VZeJ@Ƅ(!T{# "٫WB.[fycnuNy1LWt/MA+(]ʘH, \7+(.qnĖ L{CZQ[[U|(aZჲ GhְPsɴs,켆+B7T"e[6q`g1-l:Gܒ틉>)s;DF+WuTy| )l)YuUOW(鄸f*'P%H78 ^xLt+k/Q{$ +I9o4bӵ3s4L kiY(h"Rz R\#J!~<tVjr*NR@{8o-Kמּ 7(׿ ONDK/,5$%_I’D%g?_Լg_/`FpX`)?_W,6ښd=̖I=Īۤ62<W[ ۨ@Z@u'< VEE$*-)``13 6PQt!UoszK8 Bс=2 u.o^ڊ)\|yɲܳL/!щ /ʤƆWNMMKlz-ܼQ'1j4 0Pfuú v}[3 '5yRW}9o\m^κvݷƘ %qъBi(~:[n0UN8KUWdAx1u Ip¨!i O~Co]i'kAHD%-e~$ru{֑2GY0]Sټ zY^Өx}8y",FNe=j3Ax]IɕZl?=_GZJr1Ivh(y*7 ph8` -+| I]I ˔{+#$SA=UTVmIEP]Q@/<г D yOQԀr@8 `Kn+=?Su+\mCiHn8))%/=PskffxZbpM6.hE2z5IbG}#ZϞχL.5kŵ<o 3SkL.a"*+g=#pluEzQaZ@eP.ZΈlz]SA)DS 6IV'[7M5lHsDEEKgjv:#EVhϭ>m~/ kdӆGs{3jY.D/@ESrkq59;yN@q[};`ymr2^.ĭH"3alY4P*Lq^۽,5t:5-V3r#3!ii&}|=1Wٙ|j# Y7_q68.t9:DT{ ٣~;RX+;W Y96T\]'Qk^OSó{vKd ?TS{bA SAAY^SאԲEtjVއ28Xk/^Bx|FȺ|9 gwd,r#ORk!;ZHhd`50D Xdvл@] qEW>A[7FkUgSلGy*t22ԑ>-?>× c(JHV]SC/B٩!T4 euAMTVgR4 &UV@>VrAYfP\ozF'!;'8 /:RCxzkT !cRqO%j5s\/ g{v?DpV4*Ebb x;jXY"@$dCaGv {g.%).xU-Ip.ԈzbGk|Bz?.uQTx)_L3d8Lر7'~9}@ׂ[2P9g8>eҶ=NJ <) jDļ%bz-5S@D% fN vl`~/o/2jz1X$+SdrL-ZSՄ/IPhP) /p5[))A@`U?j`4:DQ!ڋI0X`w4C RrAw-  aԧNo~hC!賨xS{[~':)INL*^h>L3Am uDįs40"/¦MV!@șMi fI\ 3,֫MLZO1/Lh&lDRR1ѝټ F&~q9{%O,eǞu*9%V'In[^/YoYO8ZTc'q.{"y6aAd\\CX%JovMuRwb)K|pOÍFbLD ^2c0J7brQMf(nTu>2qd/?`#hQEn5B O t/8}(p-F'>í@1H,G|gdF^{Y;&nEqX =O:ao/-.+wz\ăN"s\kR(XL M&ka2. (7͵P nKDiR,PU{gPz nܷ(^%E \hjų}<@D@[bcN߰c#9PCk5_mo*Cl\.&~"n#j}jg<5rT%Aj6'{aξ*)ŒITDsd]'8i~VA[ܹIш!QIeJnsW~BϟhٰK]g;-}-"8k1v^̢*ӄ4z6檳 #Zn̡N3~W)ϲYHa*o>*-}i"(L42 fC/ryG~.IpSCe;D(6E{peKE aCR!L +* V1`j(ȝ_Q>l 拫*;5ܼA;AA?.а)=*}p*O@t~\ %@NBtwAHǁzSjA9y9Ȗg <%n@08p" xʯ+dZ}u."eރ [?FU[p+I '[(x܇ `hƭxcKHwt_G _4Z_995 *9eB 7xSl1[3]@@c1 ╯cq?V.VFշѮb5xS) ߵ1M>y G$+H)TߴHɍ!c6LQLzᔑŋKK_h@ٜ:SW#-kho6t:M Bd&\y/[o\Rk#(xDG*~f2-zW+M[3X(lOI| F/< oLs[l9kaIeb;W9[Ig`̏ D'*Y=k߆j2 RKa`ibZ Ůѝw}#~r?5i "E%FؼcQYzwb7q2 2;RHԘZ)Kqj,h"Ꜧ`dny٠4muR.o9]u^AX|O!f`*6 ,.BWb&yߜ7pkk1JVް3#_pf#~(lIgHE,ћWBʛéi21ܼ D%,}{d@-rk0*V zNe ~2\_ϡ 4/t18>(/ծK+POy嚖R1X e>:FOVSGM]]~l@oOF%PvStN%D{ `ZHAkY~ᄁibc|CL[N"`7K`-e:,'z=horm}* % C oΉ'=ֳb1H9/ԧL/{忓`?T^XP5Vwѿȱ[gk> ,^-Ei:zbOKNj@eM:l|f`LVt߼g>"!= @y˦: tOȂ&ϱBZGqZ2[͜Ji2Dիm^wW{ʖ _]ݗLYV&zbCf֚iR,8 x_ 8αdg4o_3jrUzto<~Ն.0^Tʎ)Wa䩎ȜPĹBVQUJm rmj唩/lFȺ2騭6wBI ,t,@N#jgrut8 lOPZ`Z\. rlV|fRWAbf*9:^,yWr|f Уx+fF'*#6xZ@_uU73mK楉U8VHV"{ǟmmN-$c$bFM]DYULW ԧZsOGᛴ xBlr\p _)@ׁJOy*BB䏆_''J8)a_qʂ A dDf.!?贫eiֻ:@aԲ\I,5b4Oeő$k B]:E`l.[c9|blA\XlI?(q{C=-wؓG=A4]@eeZq?T=vMI+0GqR:x-9du_Vqg>?Dǟ”Of٠.R/dUn!ȑBBу aW($s?y bYưw1h莐8+R9R`DIIc8Ky)Tz7N_poX)aeİ6u`[ቡLOP /+-(708g >Ty-pi"!˒_95koYO)_1}02|@kdU=6 QLZk=Mѷ{Ah )gQ{ۥ!DQ~H, 92Ec~7!HO_*6LCx>Dw`U7 'ܓ?+ &:NwcMRjé$;*KЮ:R*_p0}5Mfߞ{}bz9{4%.a,U8\- ԧJ(Zz<&ӱj-~3cYCu_׍!~Z=fSBdK ,uu z;/59W=XX~%]<rK!^UL#?G|^b1ʢ"fRCPj.!y؍q狹a`l33 g@eEDʢlIQ~."S}})K+ )/A+:dkT^`7fW leK{VxpqYL<$ aS/"TeϞa0o`Rgi"1PnK;W1{_5d@~ڭx=~([EB(JZAKDP".f?b *E.>M= @My>JwUX ˻wj!ǔ/ORƅ'YAõpiH :?Ĕ<=㊠}szfrXSMoڈ8舑6!0^Ȑ'yZ YuD.*43lw{!W(Rn2<}Mx4*r 5CO6c0~S˟=`7'Sa1l$y>F5Ssސpݪ9DjIt-_e!h\ֱR ꇰz%R. õC;p{kYv$p%tHgWm * $Q6'śak.q ѮP1܍!C6 aec91'r`'B|T 5M`8B{%f񈽙=?*Amc8 {,mfͫU p9fH䚓h)At/+Xz4SNR )I R4͗m]@{N`ً0;@; ?Yl6 KbM[",m9)~)c{  }jdfVjH4zofJ9-bO{Ru;"Et%ϴڇT9Kr-@u46xR,^Q)*]fO &R%]T|Y>€ib &kȐZʦߖtR$YBڙ(&V/Lr?\]|C8G1&AK^ޝVgVPFirWo4/c_iXiS\Z p\˦K0~{Rc>bRZũDqk;Vޡ(3L`'57: FN`hUw0T'FVbǢ? ko[^f!E%Izh+ȡŌ~B'<3[wejAkQH#Bh)ӁF&:ܲ"`)Ff|Rvp^-9XlLjK99 #dAuDUu.7B X]B(shfʧhvXHR^UTxI9J\hyx%an"76:A?P/D w!n-6CRl{]!t4hrl}Ii`تg&Wʨ@,|?M+ O` {!Q/j7T0A.f"o.޶?\:abWOy?L@Pu[dIGݚIbd5WXcp ўgEgnMݽ8L`fZqDeYFKٳz^ҌrdY 2D~\QNb+n.ȡ~TrR:DPN'6lO'51%.#`$!8YVIGj*/DP*ZZ0)SP OI,\kL ֺS|kt!i6ńa!Ozy*R~Ss.@ O?H+W|xE|4=NUipjő:cTD~C!&M#b'J@Fe&J; Mګ 9wtC W͟Tv f(|bүD.3;SYOk#y: kN앓DPTmCɽ>(VaY\VaKai뫵 P4dw9u etCt¸ ֤[7gF*]D? 58Q'ϧƳQHbA!&FKMp?z5'-5m+`-Mq!Mm^%;P5S(R MmD<!XLmKG䰁Z>2ڈ`W1Pki(eUVmZ?- A= p"rd{nT~7xH1ds!- KȴQyOw)gܴt WXL*&jzsQ("ZH 6u{;MU\"\ۇ|Ak4&t"}{%((9ntew  {5r0np?oX/ C[(FiuXdπTB 4l~Uziw:ӌ{V!J$LĊ,O/䵇yj R::#fc]\K;^nn+"{K19|Mvsq2*)J8,p\!IÅZɥ?%SZr '=Nft%(=d^&xx6~W(X#b{|>Ӳr/ځJ`y3uW<_vvkDo̟bHTP>M.Ud4[y[_eK' S2mv{]y9Srv ]rt%=%杛WD^ :g"wP%ߤ-K`X~8o\W#$fnwzҴlޗ81KE_mʂoblG$BF誄9>`v+ 5oQDn|ޙ!=Z@.br}Ly&v*ZĠΐ=ˀ,%ƈ㎂Z'C3\4]-A6P'G7H.+1Ԧx'r ;Kek0NvH9uaCV'JO*A~\yIÞe؅~1gF?Ieo ژfPp!M#b"30-}b}d6ǚ.̉N 4F N|=t3"xM]'@RO3%eSGdA4XyY}K@!'_w@'b-G 5_N /vv IFQa m~#xnZf=BJITXAiG?e嬏#\L,f ,1:5gȑv,Zqhb_fjXqmwuw'DN Ѐ8tj]]m&RlD)T/ov~n.G|Ih~ǿ'3&h%s,jifR>ّ1F֔`EF SCX{狮ZozXҼTg0%*{7JqNBWpwȻ{ <45SFϰ{"ϲq;mjБ/3o[x0 5YաEhDpCb +یc9Amu >v]ʦveN \R_1%adLAԽ_dD̏=InxTS:th+ΫR[XIAaICMxPP1O!xvv%cPb[X/w)~D! R5ٰaW' FW+Q!xN2]Rrpi uϛ ۳C[hCóBUX9:/+㏎&Va##rs{׈,)&hJ5htX Ģq"7 0~ڥ5QE9{S[+ۆ.xmU7nyi 5\ӌp"E&Juư$x| ܬܑ?0ssh 0݊ɆdM._4tHk&OJ/o  hoL6/NiTA)#WW6UP XG0,ˍ!Jk?.T,3!aqQ3M9IX tCO+6Ѩ:y> D#t).1a wI7ѝ|⿧y{)F-%f{񼩘sGmVcO^C]00)uOb~`Lѥ0T~=K78TDxl+9~9j^8'UB 'p 5󭄂|#BxcZ)ךu6AGKY9]quh]cwSEM+Gy*7\},Db =OM5QyɰpG;C%u>|%DljR\'7v*csjL'{:ui͠<)-ǮvS)Z"!K f>) gɚˡt9d96$W<03x,.ڜ݀j'\Q*ľm`'+kv)O Փkbna"-{K,A3Jߝbr U#WinxJt* ?n7$鲉0[KîE>1'a% ku'p?Br߀ $U #sKN:`;Telj,:QbfZd|4oaKAڥlGW?8zZ"7cES;|i4rj5]Xf*ϪaӋur Y x/9_1do#Z+0A5A(&~~kA/ q8i!KD`3 z& <J31$)qt23 X#fvP1>yn?W榘~@RD@3"eHsL@, 89 mBNQygj/<z[a0͖B;:٨t#QG >WTZ2u:r*jdP[6OߦbOi ,8@ sNBq#uLx< p/$oHgǞ Ė5A$@.aIJ)y E/U] tbЃH[(O>/b${ :QNA$y,2'y#_LH Iϰ &E\@$)$&O 76>LQ&#޻MudP sSx2k {Or7 Vv-/&+OWKPk(o*5ezqh\5> $m}j)32pPoI뒫pkvBbl55"#Ǎ VEn1s9w"+2BNM3*.,' J)tyetF_UdN}!9>y"y%":)MɆDr$Z i̮Y8d%x3A<YMrcou7i>[э '2'M&NuIQlӹuD }v9eh1]pz QLh2m{+@T|T^J =%_VbamY~3utA?wB+ΐ &_t&Mfm_TԹiYAR̮fi.!6eK!>V@F =7esPū3j7tPVr4o4tFce0WyNA3bx  nw8aeKsԇ)F鋓@A󜮗?*".2ܨpvSf٫a]}!^e!]R (cP$#ڧ~'s#|XJo$0}68X(߂Niݖ\ڐ!H7lMv1$Z [=ҵW o }˩ t~cUԾZlp>"ȅ I>b&`"=9*iaJKlH3GpS'2P {>%G\%^ZBBYlfyl1r9GYpi7z>- `)4]׾3 d{5Jذf>0|uy}/+o.e380CM^6fApԏ"4!HXjM MD[Hh@ɀ˯}Na8]U{]>Gt +:j2дMU;1a^ *</]=kݐHb[K8Hy֮bXԸ>g]x yo mz; nL7nu=8KBvvNrXJOq/v$ٴvCE[!Z$8GRHƽNj ~<.qoQVc.ɞYY|g$b7+|z"ݺ8, c s0]^.Mksn(XƔ㴑>Jat!yqbڮ8GEHi66G jGSg#dFq۞8U*OK {xOf""Qc܊pُktM3K%ŋ_2n^o`ҡ!5H~~%IًU8ik!x9I]xރaz›5RtW$?zЗrFdJnWeˠ],S~d@Z#- "j+? J9A9~ }0krOJ+Gxxdlg듟$%VZ:qV f(;LI$3bYɴKC6^ܥ*m_Z #\uA8n0RZH4xtcQvMd2d '=ᤕ9vךS㍜HF;MJRuU[lnuBsyTs>P+ C~v 䆎~xxS*<{uIg=q}jN~r^0:8H'A]6e^w`] \=ݠQ5.O͢=~o~A }Xit5vo ;u0f*c6/0ŮJ?n6]X.ݵ>C7D|KB8$`k/]nߎYh`[4x%h.ineh6cqHYi٤_ށt*k-C'R?owPB= 1.k!"y`̑Ct~@?bp"e iyB;$?k񕥶5u~!vZ\(|-_i_-Y3c58_{oP򦹢.O4EHju)?*CY@ Pp~jBsvJRtrl6w2$d!aC- ʑxoIíl82ʣh-p'XMw)w@(0): b\X>VGyc"iay UG0:|Thx3O#tc2 y;ªәVsrB z[j- ,8-x߼Rw"4꽩aͯƵLFh)PP -U^O NwˌNEnd}Uj#0]ŀ @RB&lO&u,ާ+8FUEؐIBlحm! 6XP :зv{ȆS5xFhfΏqjtY4RDXJ̷҄)%#hn?ԫ>d>"ϛB5Ũ8gZ͜M}XG8z@ ^0FaF˫A}|͒?yw zt>䢫!/hkJt|;9鷠gzY:OMP堉PW,ɊMby>^b<`^;9$Pp G/6?X\Ժ6>tRYwN"\^-AЂ/p>~]o;N/qh5u uM4\EXL#Q 3eo" BԎopR!VMğڤ !0LFTJ]?NٰVtu#00m" [g+^#M((GFiۂY+;R%`&Do`DK8Q} V萝=%ԯ!3Ɨg h@ :J+DCdm G:U]CGlNz(L`x1m@Hp;Si+Edv=Ds~NKcK^r[ 2hKnF)MݗE/Dt:(A4eL&~3fgYc#أ|FۆþwuO;v |܆LsCC)ʀ;:+_zL' pƏٶDMH[鯹-PK"×9n2g0g [cKb/eV:,FG]1b,4@gChiU;sPhv+]o|gmI*@]rn-Fm#xC"JSe8ӬQx¿Z c(#W]Xl_֤{||ڪ:\ `@^)YwF 9JjyoULgS, "${Ncxb>?ޘ}7V}Ϯe~ *seg,0&xxk$ˤ孈o]1;]A11w@hґBNfQW蔝^HN/Hֱ*(yf3QH(}6,(>N_bHWӱ&U[$kN+7@|,K4;ϫV}o~جݫ aEq$Kgsfћ5 R℟cIoUE%GyW~^>s=2e*'mt&[9 u;Zo0DJ9YL.nrnۭNyn}yg-ն@36nyE\ܾGv3C`7$!xmBWF!݃j 4o P}8F$.=&o ^m?͡J|'ڷoN+r2[c/I2 Z6wIxd-^Rr"Mۦb"͉kuJ5;tk g783v!lfԶ}eKɒaJ6 }E2@_. lV5wbqhn*ƍ`t]yMi[/|(,ݭp-ЇpUo(ewה-oIuMEG]fR7C|O\L7 tKaEWJ~S/@ RM0~73)TVE|gc͡+#"`=kʮ/ _jg^<SsvGN=PzM?-$ס~ts3a]3(^։)l gekwi(@^`4E!Uqi[NqKxz5!Cpy]]Z 1OqFIdef=,ۇe› G=c;:,g}(${N7I $p-" 5-_1ihÉb'N<%VIʨ/ ĩ,A7&wy= 1 jzR u:7`x{ Jf#Չu_B":vpG=%v~qYF~*kRin`hG3y26 hCd,;U]G0>z-kx"A~n1U %,joalE3,4iwYٌ%?:+~Hޓ9ǖT4э 0I{\C+nMi萭W!KY`N ד8xalR]Ұ)q}%99 ;oŬ%rpU6^EcVJJ3 %o;m.J?G^ЛЫȫ(CrVM ]*1^_9ba.'u\_dD͓_ia%óqknu.;p@4*:uU/m;n3΄˭V_Q%e uС`U>h)q5 ǟ)X*ŏˎWXUG[Ɔ/Rw2aϊFT@Y3-NjOde/ptׄCUdOЋK S8o0f1XTU W /h8?ͤ9]U6.0y+xOg-Y}})Z CHs }7W(!CJ>r сHXx]ȹ}..?)621u~#GizOMR u15(Nm ӢsѠ'(8y:MvwLdzu\=*Vwq`]_ $))\s;BNE9MѤ M♱mVo>9P _q0՞]6%sQ n..^ٻE>ق`=ؖ= J$#0t3rjl yqbK_&a*;8n'pE2ʘ'G2)GNyI]iiBΔף\dNl?BYł7&H}h& ewoΕShvѕ?4UjP?@a^=pYQ"o4^ϽAwLl:4l-0t.<̮f `5RiH[fɢhH0FzD.ΓϏ7\6 0Ix!D6 \ΐ _`/ꚪFp rӔ/N.Zjተ9$IP#_.,_PF\ QQw&絢e3Wk%Cԁ](#G'UM2iь厾mxWG֭֝Z[R,G@jF`TT2 - A#6NN܊9}W}hqy44]Ww*3IιLC crd~{}>wLlB*m-m R gJW<WX5mϊμiC^9*H? gh/TG5\EVJrj+>YE'G딜d(X wVf , nl%ixE_2=W+5ׂ>[<Ă䠓w(|i_f氮?|0$ +0#KBh}RݛJ@7 O]aҍ_I''D"/˛;!؄9B<z>'mB4h7R6p7#L̼ )Ƅ`K^RTK̽R\fIx8oFW8KG=€R+0ż)ʷCDⴙ8,gzW_yƏ5,Y㥶Di)Nk.fҡeng+ֻ@Hz|^OĴa Tu⻣FmN 8ǫń=!zIi>n7F2 r@TL|Yp@^?!ia2WÜYlV?Ami4R# d5o^~u??rJw3el"u쳼x˝. o/XC3ITk) \3N-UG*Dlh҇}ث# !!cv cd /Jz8ՓE#}Хlse0]%vbl-:Tqoគ2k|noho^j~YTxCsuvD8 dA{_}*ڨftwh oxPe ;N Hf=$d.%/a&u8j84uʐq&*P;|]!Xq'Jݫ#" q|s"8hVot;JT{? w=)",È(Lg3:bf :ta ޠ#DL l뫎`څK\d V|d@{@5\'7LS3-eyh偍޾V-2fڧ##mh< X[|_%lD{($PoKZKZhx@ 9_,0n*9۲h3Ta,pA[-ZA/Ty9P ' >B_ɹžUYGaL6 )l&hdb4P oQ1ne(]y\ߞQB]yW:*Vrݘa lMtW<=!2NiX Hmx.a]6rv4rfY.E)S`")sJ!%uκ$ˑ\.-# S (xj ]S:uav-ȐծPLU 6ѹ]!$%Z0#rip>أƂd{`71`?^ v&_gq+Gk tUJv0G@k."GϘ0g_/5 f3=3߶l?ǽZ [}/:*y?]Fu 1zm.M̮,֏}h+$M23# :5hZטqaYz^vUvщnʙ{馢q> bxƬ ]<"CiyNlag/BSMyh/9Zz64B)2;IMDPX*S%\$ƻS ."ﶮ`7\Q<Ӊ `JYy tZuE;dUyF\IQ/P9j"X,%V\#oD7V2+qiQJD黽fRs[WF,`;Y&a8olYQzkSNSGR0<އD`?ybjGݐ끓&3^0w1vn{A!%ʾ8;JJ}6Uyͣ3ɬmH&AyOF Lܱ.Xп=%NQ^V]FC Sj@:Lf5P旁T<Rj NbA A[oyu?tzʅƛ Pi U_]e:/7x 49?V^3~ΕZ_fi ^ƖNxJz<ܬliQÈ0}d5 OEd;|izS3iP%٘D!k>av) S} E2$Whu3* YBY3%K`N%a\R$61Je-$XiA 1l[F#M̪`e/!J;)~kn ຼQrP1bg$̈h,F?0BLE( ,9s3Ypnc_-? l%1SؕȔ˟gr{ mef=Dө 39w)"o-(& t_QVE(#W=R4$.p9&tweW%2rP#f kl[lMR#{ZtEf^(.4a8$amg@59exً~Dž ̷~ nûo=FW|ţDa=b#a~%RgN QԜ/Lڰ_QfѮf@m.oפE,pqlQpo? 2$ސ &._x8K֊Ћ bW% VN $Xbc8/ 4~l^'x7Jt %YpV zr7' B&>}bOD+ C,W;D$e2Pݸn>Cu+ެzgNn"σ{Dž)k?)TPõhX)?Nᦶ iFYe~`~hi`UG+` {Nn[usƣ҉"l 6e.ec\a1a8 g 4+OƞtCOE8M0 YZkernlab/data/promotergene.rda0000644000175100001440000000433413562451345016031 0ustar hornikusersBZh91AY&SY?UO<6H  @/݀a _^!G~*) @hh0h!h) 6' PeD6LLh hOv3[f0.8ƍ#%><{`֑f8 tdc9W8ALcekg`16*5sP%d 9րM 08h3DŽG&f2wu-Aˌ: ]/L4і\ 3Tڔ,QLgFŖk+lUqB-Ʊz7pBXl(LZ96D0(Yck70aL JXsPL1fult-qʁlp{yǣ0hE{>4bZZk!3L:I2/Eqa4ÁȔ`,vpbÂ!"˄{{59h6f,󽽌lI觕*N 9+! : 0Vg'`Sϗxl9MWρ ADyֵ<@Ms)ow>wvc|9tѝӂXdveM4#7ϽcZ,56tz4O ]$31 iK+P T(q2.nwQ\ܒQ0R'F"uŖջѦ]:tg]@}:,UYP8cN96-D &[2ܘ*\@eN4zk* dyLˆ;EtTƳф^a,:L)'!9PK:+c4\ZYê:rz\ Ҷ\NZ#axЉ@u\N1f<5;:8:4)}6#"`-J:0W3їӊufF JfM ]kiT1^d]( Mn| 'GLh;(vK9'+ H,,Bdm,&2ដᆬ83Lױ=Æ!JxL)F4원K9,:wĸd90 \64},\=zNpk4ʒV39N @@E)A)BH (H*A@ RH$J)%$TQQ TJ HU)E"JU"AQ UP *I $RP P (P$P((P g0lmmT*֡2pWazɄ7j5<;ٶ& 1Ӧ"7 :=`xdx8`(^;`h=6WDx z{`ע󞞏F EgƈgѮ.:)h44 di hL#FO& y2'z$fO"m!@&@ 4hF yɦiJjoM)=Sёi4&ѩb4Ʉ@=L GSTC =M=Gz 4! I2 4dM`L&MOGSSh =12jxOSMG yODiA'RRm&4h=LOP =C4FGh44<=@4Tdɐ L &LdКe<i4i OBdƌLFCFAI)#!C!TA# _0Ծy2SJ(E#r?OLhdi6}Y^S-̝uT4PAYwBJBAES'o̘t/U}90_eD+9.f1WHhL:OZk-O%իDp( %sZd@ zتbX!9i 4:K:7OHGl#X~` L8^>E{>X9\d@l'LŮ?6g{:):}}̌?3>lr=lT4}Ӂ!a7x~RX^½J {x[8dp'E峌?'`*z"@xTϓU}g0^ ^[~lkR *| BP8%XOM}H|^Xɳ$ LtGY8$lcK>ZX ?7B7拉=ny- Y,){.^$Ha\#vf{Ew9BlKT!!O\qTF(Ybd7w҈wMȡ^1)m.# F"]\  A8<@v\XPH,.E$@nF/9?+@=\/AHH80bq)䮎&ɀ`99}c 8<8 +LZGv6Zk"EgaOɝ|8F>[G>Qd2_gA:旹2JP BrDBFF֕ăpVkrX]P 7lHAy8*0!pZ(zZ'_48*Gg^bDaAH?GӣTCu")(xD9!C<&V x3_.7yCEJ L|G7oɂ[z!zXfT>e>zRCQ!i@GEw 1\kPB@*6g"*'O*ܺF-O\BupEC)'Lе.s)% R9^^45h_"˯w>@t%4kkT7WqL#~3s]oXEt?w}e<sa8D[q,UvoR@tHQWڷcEk͌.Wzͼ JR\Z64ňDIBb0Jeb }t>KPJ]~ʧ+'>= 3Ka eEzEjsJ72+ث !AvN!c4m(p1#8 @G(Jzxi;a {J*bF -D:աk/'-G&KDhNM03|dFr5wT5=n̔r!#3=F3)`<֜+P& CH%WkUq5R^{3+Eg88N`Ix^ i=0!,VIwGg&(ZUt7Z:1- r D_ D:%7gg -Zj>409AOi 5ܛfl @y;bn5]Ja{xav׎Jq83!%ae&{7AJ"#q€XW6$Ai$CXydpRu9RhfU)̝)%5}))7d,Q}+0޴i^KKD,s'tO"g$'~X9~H4?JL&p]X v*&$0 0"omi/N췒oCY& G} oE|:CƟRdRئyx<:~] pH>/ۤ1T9fYgId zh_oehzH!V]S^P!b2~7=Q0%IcSKC,Ivzh='dpnFUqC>O*ZG9Mj*JBs.$.`Ӵb Y8hg SƗ:M/y EM-M< w5{'ƞi.V*f8=M I)6{vĻձXp%~95?NO,l gxN[<8Ðw,M|=&`00Y%Ռ A'I.Z}\HJb+Gnqӎ~D .Ti@~"W~ǁaw>d)K#//ߵ~eҨ2⦛^`*6UoEV3_f.1 m'/<:~09V;N"w#elfblS1H4j~Lp`h5hoNJKL {"kOǿ=nUꉯ=";3^WXX貙0s~|}aaY".=*V cT@q*]/J)Igɚ^oޭ ćM*disYyg;t~a2坳Oe0C>Z[WK\.moM̘MI` bBr CJ[&:eKW:Y Pd+Hԏz#ϵ>C6jWA 6cB`O˗By86tga(xY0F}j߱^=RQv!mx zmۣ.ek-Ѫ*3L8Tab%FY>mԠxAX1l>7~p 3}*rB -V_^Oz!'v|ޝ#ݳ"Eӡ+ )t/CQsSyGئ >4Q4l"KJBx).X͉vZ= ޑ f?Oc~vz{J(-ɺ!_?%UZX;Ҽǩ\INS϶CmRSWO]DxK7¢/Z)i4 k*Jg_)(ׇFb^eLɫ6uXeuȏŅÎcsW ްUsŻ >f%1GG=|MmO57Y -l͉]vGG GDٿsgTqA˲\(hie&a )ȷقG9jЭLE"U7gB R*ܟV!4e4\LمȊ$ݷi+sa΃\ء8:Ig{l.\r:}| *#/*P~+W~lXm֡2=p _&9Dzy=R=z7ĄW`"kGne ^!ּ!fn5wD򍱷c0ꬽ7e2M8$;NYrwN86$T> "ODӣ-..n,K\]3JW`Wzs5S$.M'V/ٟf3,k;aԛVSϓ-͛:z y f#!R'4]2uf;y6DTZ"jTLk,N&Bd_2i ]f[>$$E:Q'<" f;d~cajkmXǯ\6t[xp>AH)+.膜Im9q3٪O _fqw$B.YBϒsKqvRYʱʣcPeۧiq sN^#^12g̼!=ڪ6__,;F$/ `ˀ1厑vZРo Ωve\tH|VyF31L0l.rb+1=gqoQAЙf*GӻxTxy0M0x[l^ϋIM}yefkJ^|k\Vkܯl9]]S*<2ӃGBD4Yb̹C=U$pRhQS0&Il/G9Mqjw⥀+ڥ(|7uI P󩉎Fݟ)ɯ:ַ͛o󙃋 " % +p]" ert4 *1kqN`oPYµN4hlxE{h~ {[sr+mf9}G"v{8>[V$щaw6%Ei@ɗx)'j 5UX ".[f8IsI>8hҡig^ f݊gJp`iuvaWe)ӓ_·1Rs'Fx[c2˃2Qn0 k-x3k_ƿ@U\] s0-v0'`*Jo6ΧOYri^rf3j-qLU^=NM LlGU:YŏE0˷G#>Is_c}Lykz9ԶL6im<]M? ''@fcCm;GloRd#J]G&tP$F rkZҮ\$PKj4V'CnL[J.LիD?Lzu=,j<}+%^+}?:Ʊ#i*\j8heI,O$< &ɵGYN3FfF:YP_f)R2лO~e$LS˃T%gbkgXYF`iz`{HjG$AsIYU³ ۑC: 2ťVDۏw/c"VT7uktHarFbR!Ögݎ9Ӻĺum%+})50ϧ*gN[{OND;n1uً s!V8x^v}Vv%:Vb[a_c+7rn+ C|ZgU}=n=ln +7KguIe]]jIN71>a?Դ/՟/Sss_kzudQnveL1V%"MLX9K#ibvd#SQ.;(lͩ6E:SA}1l7|0k̕w+^l,skݧ+W6]rF61+qim8k(%q,ް[: Qq/2VJfoV!cGgo⭚l|MYC'|Sfh0i9OD} &kpY󴚍,, 's/}*OY7wy#c 74HN'cޒ Ƣ.~o[{9$T-kfdӹqXY6L{өf'Ojv^unperMOEф}8e {O*i##mKYYdWoV%&~h1d%oTڙlY.Fn-J=䚐Lk1̻'[ܝ ;sg3L2n h\L7׼r)a_wSfI7X qx֡&r2d$}rҤ8FH+:pۯ %^Uw8YR۷dz?e\[[kR~v›[2zzV5²ʴl=`T9 gf ;k*5L`əg|'3@h6ZMA0Sc:|ޝ LbֱŖ+EǂKN`?+mR.bdl{Nt.ٙ(Ktڷj #džy=5ެ87-ܾaD3hq.6PdS0k,MKWZȍе6vC 4&6x)Ϭ*(RThPV2\8n2-m3fUɕpOǘv -1p<1zO<4"`yр[q{7F3c?.?[>#"PG>W#LrޫF`5=g&;g/ɑlx6ZoLp,qVYe3 ^*g2IF],[`_W16Nju.ƆeM2*5neΫNgάQSXUU< Q本9 ǭ\64{8ם:lةа$]?к.r~kRvyb t Xi;iD H>}4J'U0wvv[)_3la1vw$_q8$yLбkoo6zšDiح>zULJ˙r0eE H^Y H&p(xUNж%RN‘;#lf;0+s>=gV띦nE/$bR=j):義-KhN @8?}Gxg:t/fg WCV!̧%VON*vy|va[/:5JBlҗYwF[" ՚PEBjFuWpvf@sU {S ‚fU|{>orsFX(Q !.:hEh[A7Z[ M~A M_;_7|Ϻ,Ѣa"fCf s&ivBxqWz{8v. mlY\LI.h6h@yصnṗ|}?:\~|RDWv I6N ڱNC].#Jg/3!قlێ(nma_3x~+^mF<8E F:{&nr"QQ\ZAL$$'tu|I]3aTr 4\iiG>W80amxYRxiZ6߉lW]MLWSa J[i=VLc_On}Hq89 ALyR~ط% R RHI +ؿ})MC56,&Nz]!fR͌99dBkRdϽT]!ITeFj+P;{e>=[g/CuiX$S,VoF9 =Az!VpTjjo߃aE^.i[{߱Xzܘ)JEhK*'j~(2|u,Be Np :!C:4 `D/+%-IMK.K` Pr\vnV 8P "֠)qɏ&>m+EPIч%ܔ;jD &z'tٗ!jD0dd\Y)/߸4 `9ix*w n~#sM!PH_{"ܶ7űh B6E^̀J.\Zv;cFp+saRC자|$JT()gJo{aLe;  Wo) {=96m3Vp̛-oU~%#l8=q0;8+aF6@Jד0ri )820Q5mxI>SY5Uz- oaq@WpKM4uX\^[+,&QDܧ l}odH>7 j2O`Umy1ٰRST 0:De!ކ1aJ pJϘ/~$Iq$RW֭.&D/[SU1j6f#K6xd֋ǰ/0({J!d=(NVz7FB}m ;N3d~~9v7[&/-T o䄂K 7T߷,b/͟<3ZUC)%'gE]uTˮ:uP c44v@EaPa8;+cƬs\/'Bv Ė$ҳB0I3vqp\$mL_{iN" Ē߭z΀At+"71M"tئsX1r|Ҝ-AB³ϋkߵ(%aXP6-ٺ77jܢÛ i`E i'~, R+PWK&$HЀDz?V{/=ٮͭ`II["yW1mO=x%iEbLʖx]xL̰$+Pb7R2)*I*֟jvn q`ۙ$g$/1CaRbOkuE[RJ(u aYk P& 8lƅcWK'gI/L]ZƖڗ&qRny1OfRtZU.Ɋ'qt\! *X\sgV֭X@,^f&oJ~ĥ$0cTX⢾|1ACSU& q@aIYH1B")Dec@+/\ԻKQ¥e7\LBI$aQrDJ8;]MZ,/Q:qWud&qq8p!u`^PSmCP!(Lׅo\Q:jNl8kB3\ce$PT0^-3$SJ0\YFC9^RD0)8Ij4buJUefDsH+w~tX}o"k AHIp­UP\AW0C%fmؼ;LЫÉw'ާ@uڰɵHUZ8،sO(𫯡2rs\pB(=i,֧BE7<&*"4]e0-^BM&SɗdBV7L7k2 |93% Ƒ F59c l,RgTEX)c7[-oOy 4ijFbԯ :Z \0+.A.YmU+E8^ÊOc qh)kc`o<.[k ^ ,=T:,frC&gG_y:ow{{ݓ}xwύՒƼG<*@G ! Zd~xd\U r$eF(Wu&!LjZj"p(;/aS\3G~hAZm#˻F{u$k1x5Cr>$g#.0t B%GxV{!DV0nan7救lvy>0ҧȡOciCc_6|AEw$S-?ӑ:.gpv8ҡAm5Q<ߙ1yLȎ ַd+L]OBٶ>v.D> \lҬZ0T5 euف9imUV|/y@4ifEƪ|yL!E!$P>{>5i4nW%Ne=r9 iWt햡/2D"@RS3ȩGmV\~Uceii i1 ɩrqRUEN݆$:䮂nƠd.b{j|EkfUї_jėScND 4Zv[9Qk"D~v8oIeY23drvt_ +ٍ) qԲs%( oXM۷lWm𱼉SH% *ZY%Aʌ&q1В1uM,ņE(<'\PEnWgg`A.&"U3qhcmUi*:bUCwtqw&`٧'$"d 1Dp3+ kE)UIJHS"Td*;W5| HfF$xpP.,Š5ԕtQ$jbOJ n#!G* 12/I̍sp)R bysG2 K6UY\|5i( E#$1XcɪQsD97Ӗ=I} ڝ&}Dķn;IH6Z‰o,#N/_3kG8q~!ӖxP3gtw'dXUHH#݌&_+ҰR=((٫|.'[RWARpxO76E=I5APb񽨧I0F (qM%O^=Hc7%ܾҽbJy4R pnZ6BKl8.w %DH,p؍"!D^ǥq6.ٶn}5l:b@y^e]fT$_|%.J`5p@:"3Mۍ2I 61`Nj\Arcs2 L| /m1ue1,r0rYM &l,kH{d7㓷)D*7}^8,_D. N+ 9𻬺Û0И5}_CaNە T)wINRQ # ~sZb2VruF≵,|}8WgMpc|iV:,B僀D]\H9v}Wz :vVJ Pzr _6&Uxq/?&6gHz7Jmfn"-(цHEf.NH&uT;2N:wa@ h2Kʖ@9"CI6bY3>%Sp)/#i:ǃH)އU=iVX HVL#[U`{Ŕ&VH'%h9^s Bm;.rb!v 园es }I ۖ @w+Ƈ# m6AOtoA'mFi9*R]Xb{Kifẍ́dmEXYҕ=L#llLa Șe$xJ eKI_ %56/vq4ŔX RƮ;7? $! S/6a)Vcb, p@NM(,E1bƋTͤdWŴ-n ]5bѧ8S'2`o(|5jv6$%+5g`n'DO]$S[H["*.XY614O G Г d+Eun@V0xIRE V< n3-o` t$KV83*f:a 4F AoT(!qMUVZغ1|(@N81Do}B A/ Hw >2| X7>*S9x8ejތ8DF{ub]oᜊԨk\l_c/}췚Y&KYŐ_ƴa ܎[6-rNCOZ=C`gN9 2N뻵 gLkݹvl]>JxѦ (ftK_A6T)cO!ògXq?C vŽfv,r'йv b3ԼR1PqEٽ}>W&f4Qȋf3-G#8$'e9H(vZN TL F,wR'eaGvCϘ yYjXx9Ukw 3/x /3KDI+] D{3J9 Y`6O4VdUǯarJcˮsdfޗ'g%K,۲p$Y,՛viKmaֹp@B1I>Q.s;h62-W”uiƍV4y@n%rcneiL>]; Z %k73U,va q+lr{!Y4|X/qƷ!0n_ `|VY4 u4܌#InV2}Tն1 ,c0~ۍW OXmR;7k)Yat"89O@eN*y|HW+j+ԉ"~EXdֲ#s(Ia8.< g\ ZkFK^w2_EG;+&•SGhtu+--|2Lh?DeO˞5CKK-j^Mk~t29Q9FWT&|iu!MgE9:Vm_Y+IM\E3%F&-m|@o}AlYYwFRbQ"UڤXaU&M-Fn(pX0Ri8VjYt—Iꁃ.γVXx3$~30v~36 h83rVXZ e-jh Nh ҳ5c DT0E m yWj"q*S,QmblMepcWvˬê6:"6<ӣGi !-VAH%G l1\Rέ|`ėF}y{뵚63J z]~ f GfOB^=6Z~u_G.|7HokvWW(lHGmQ(]LN')xr@$ ]>NfN7%3*~:JI)Ro9ɵ.Q `N bcs`q uySߚgEصJ7PW_,|(1Mt d)Uj#ץԌ{ASڠG]wU'LPy5]~f'{o;Mz ynhM9W]OSOS&BQ $`d*ڔe(BaX?CMm&׬y~'^!lKC6 /Y(.]+L| =K\YgV\c7rVÜ60~ ĭSb@6”X51YD'V5-z*qU d^d0K <߲b}IE›B61iėUW4 DEt-XS9^ۮYrd 4a>ONt_)m`*E//XX&UXAnS X*/7Fax@8n:!= ):@Bagxyp:1d+־LY']8[}f֤lb+ {JX{Yk4Q?$,cMGyPu6xS-6rb DX]\cQH 6Z |Gm8]!ޢ7l*Ǘ,*tI5h_KG$TVhBS5W@bO :h$h;VDl]{9h]:crZzExm_<)J8Q$"\yѫiyF8x]]pag/jY"7cr3<1҂BBX1Si5 fZE%:j8}էԉ)KW;"7ґ:h|+[9:ɬ UxSq7_4BM@걃6@뉞TT Go ^1HJMl_[r 9DXLhȚɁF$u9)u^w׺v:Xdr*2,#6c'h,$-^ @, 6s6@ENsN@P販*]etVVKa04ּr~ _Ei^®3KIf.,&&JQ!U JsMaT=oԥM`YnW(@D9 {9 y@@V KLJ\袊=)MFι8W&$$xo6I: DEtrj0LQg5=p8/o]I]$hFld=:ʱqHRΚXJY5sd= C߱{1ƯrzvϬyO~QE̳&n3ŵH L&ɩ h MxgwHL&%H'r wSx17H2Csp!:9ErfgRq2r0Q- 4C܂8 {;pdC# % zmrӓ:9u(Vm.F+\S(7Ҳ| ,6!h:3gQ&np(ӱjuA qɮ%přJ!b^̟&V͖=bq%nsnv6qdOH؎yZ (Z!kZ٘S2kR}jSi'[ŽV֏(q|ΩAO6KiI{7it|mDvymrJZN Ć[XY5Z[J9ZpMclTl$}T/&5HˊMSIm༓AB$|-V]%2 &G  aN;h_-}`bqI]]a;D xr\eY鎃PF@A@?_BG)ո"5De OYr͵#%!HY \1XV mA2L  2oDq GҸ!w@/Ta NDk/YyϏB8#Gۍ3WG{2H԰ ,m,>ؿ.INE[/13hfZ)dYY(> z5^x v+Ld{; ؘZh>r ~k&c7 A%wD|^`Bu~sSp* ,kLa3QxFHBEJ ]ㄨAZHKJ{[|NxZa qI0x4@1p͑C$c\Rs12B>׬_D>Ggz &l-ȋk .PУޡLԅpt5[) -g&嶰ݓ`+$A߼֢2R6Ës}}J[ fЇ''宇IϜz˹2g=o4~Gǰw#}2->x4ӃTU G{nޞie6d:몘-̡/x\!)۩Vx$8 rd\թ`r\և,?Ws| †FG`P73{ @ -r,C g"`ܱs蒲! Z͋]џz }Ox`[^$f] RӺ58rIFOVvQt4/b),ÉD˗ȀFfoj?ͱe{R> Y25[1϶o ̑BNk},v:>lPp6 BB8_1dPb+~wCKT. w} qd/RţwûTq}ZS)W@^`WCuskf^Rix.B `D>0K\'}re,PFW^8c`&}u 4W9qN piVui;mqE_@ V7˓*i.1:*Q s@$ "AՌ~&>tittGJ ,XaD 0yF\\dֶT#]h!팮V XÉR5lK0bsA.ZI 1,fs-ckNjy`ۼУY8QiMqڛCIWn>ɖZ/)P S$.(l~~kBF0d)]V_v}ĦAAK9oQXutBy'orT@#s7u]2I\ ;emN5[ IVa"ʺz[lpAU7!\$g`Y)ɩv H^u+.k'6l2[S.T"$Q&YWK0g#o|sõ1u|̰iΡ٬.QzTRqz2?[8,A~ :ZMM+İ%p<|v_, wOEh1~b4L0b>kgXU rud!Wբ,30hd]":xś0O+lu0A==Čz'f'P]5^Ad`EIV pz>>O, [/Ɓxk~|.!zKrII$+$P}#pv?ga Up"Ahz <Qio@* tMO0hr>CqR`:=Ňۣ δݶ$ۦ|p]ˇ}'SVq:Gi7oP\Rj#M ܳeS]n Bm~(=/^oݮf{^(l n=aӲA0n. {z>3pyyKe DBeԀ]˽̥cuc*4x W>T06'UAJU3(A.xKf}gZVv! *\6-آo̟ u9<01D 22Aݲ$w]pq\t{6Ķ/TIhg)ڄp9u *ж(`Q> \߅7ii &XEZY(# ]_JVrJRr{zq -6oI|uvɪ̕BPHI8,|[ׂ 0# HuCZdoE >mbr !u.wݷED?JCZM_ ]w77jv`8[됚L]븻rDd+&o4zp$ӺC9s#7OOOOOOOO@><`07S%:p$0(So٪A i9UKQ=@*! @)HDP R,0B2R"4J/$F1JU5jTY @R4(H 4J*PJAH*%+1 RfQMZl:[imj56jXJ+3VʩWsh*fYP) AhiEBYUAB0*'$JQ&""" "q5ccUbUF1"B.AQ 5 "5HU]qnG,ksrd`+I(S,ʈK4diM"BE $1l6% 7 ׎k r$LPb bLj%)BaD$K3"")y<.qhDwu P2ˆחtܸ&T"b"$H dbbw\#b`Φ$3RwXm0Lȃ[D&] 6(X4%BYD5TA":h<^^[@ן iV}t G=@7h!F I H~bͻ'<]yݔ]8&/dyxׁqeיwxˋ=lo;yI\OeDDtP҃ ̄][eK6o.fS3Y$o-=x\,i:B뼹F1 q:>_^9|nR!.9{fԡdoT!w[+B9h{9* G:B]ʽrXy;渏T?ֻ(r,{>`, (">yT/Q0=`bk:Y [q<=\ȋ/B"[kYWt8uo2ˋ͹+@)W2TYGŕ).h(9x$u@?EoT8)_s\Y+S;ߥ4q%A竰V;{3ނn-!CLE<^/Jr+_h8JCA-a ߽ޖWZ|N3]8'Cgo{+Pl+:oFH1c漊C{$`Pvdqªo$[&EoG2phc4Qfw]"W}\ۜ [Yke󕙢(=|A&iQ o B$lpkADi(klnŀϣ.PSIX#Eie?1b0pD2"!/tc}Q4<\q۽wb , ^VҜ7ǡ}4yEXXp 5ڐa {U$Qc]W|0(/,!<^rT" B QR )()JTP)UiBP&DJQ j5&-ѶĕTZ4mmEVJ SD~Y?bä(\ D,_[w?ʮ]h!U749PV9.R܏!O'c_:hZnJ|7~gcϕ8)Q`BJQ (Jv*1xow>oC_x\޷ (/MQ`@ ~|<?] A@tˊk| E$iߤCЎI/a?6YX,T jPF8X!H*4n)08OP/~<-Ϟ=OWkQu8Σq\P>'T3F~&J/Xmv L afhMF6+KF6B 3#Ēd5nmύ,Ws ^O{.s8㍷꼤"&E}s@ k /w{v(d-vvӕ|C5zƱV/S<(X+j_}?9Av+)4ьwTBlC!=UTNJ^=|d3|r]VHdJ-"jQV "YDYXTj"QT"AV7 o$ɈPYƳbAjYݭbBda@ ,9y'Y~s9,kF{CtFZ6MRr5Vw9qǰkpq+ jTjdXȰ(C}SЪ"} ^ni*Mlz}<}mt sfk)%@kEK"Nn+Q{.6\߼~+EI6Z=@1a€6elmm1f`swHT)PiFFET w 0!Ƙ?I {P'TNEsUTf"נP}Ѻ}5mmrGOs Yqeu:oP8pY$]P4)X{z2U_s_s;gqϛv`C*积%2[e I?Q*i?v|F_>^#] P!Ze)hdWR zg@{RElof}oY*cVoVe^ejh^ii^!d61@de,8 j"JB#K{)'z_ˎw"PE<x1Tcg_A"*K2-+h" !(ɡHFcbnW?YzcsܝPM UD1QHN9>wQ԰-QOkWz`˰Í9^4[UAN 1PE"ar 08 H }0M^[EU.* P$.Q]SҬ FHU^X3SB`LgR QYJpw(48(ͪ@!H̏"#k eP#CŬ2 [r"8gDnEUɺDF HQ$R0_c]`TYi9 c9|gf},֊c#>*OWvOXkKyiE6& Ff*gC»kcICRso 7J؄UfBq{| ӌӑp}n*'6J Pv”oz]^x0$YDಛ8?6O/_)K]&߀g"J*:W`<^:05~#q_8$ QlL~>ΐCC ?s^or8#ȀpQ%Cn‡?QdP(rH3߃P(V"DPO |6* w#\f$ wn#!y'?'",(B$I&k U+yBq [1‹n /\Bv2L n,C@_Q)_ eDU:*Ì2K[b5g3+?NCKfle8-p]Sп̳9 $%V0˛ۤ2,/x_nk'tX"Wuu4q-lKϮwvy.[%j~s3bvq8pPCPT40ב0($x?`~pqzCy\3N,\CN2. L!acz|iq`qv)w{ NzHbtNN7jBJW) 5b G_0U)xX5:9f'x]eؼVQ coʅ:/P&물p3aF}s c{TlpjټrgFuWoyy%x*hU%+MЙwuUQ^̩ɽ*|ыg}hō0D6\ofq\s0PlR*|B#\i)z!סITB&k2 `E!75BQl&_5[W]Su"!bJETy)@R PP`4uٜ!MY`q_`(~;>\קw}\$I$I$I$N3<̒I$I$I$I$I$ $I$I$I$I$L̒I$I$I$I$I$ $I$I$I$I$L̒I$I$I$I$I$ $I$I$I$I$L̒I$I$I$I$Og33$I$I$I$I$I032I$I$I$mm#05:ַ۷no{xǏG}/w{Wz^d{[mVhIH0$A$ $`ImVn-˜89mZu֭um]j$I IH0$A$9}{|sǏk_(1bp7wbw*+Gع 9wA_՚GJ\qߐ)49 #  s7tx}o2|:eo)Һ)M9TSK[F>GϷKNKO}=דkïRw6tЌ'<`tI`CAF)W|AiX.x T:8).[t{ 8!@ I*":)7 P^WXmgo;'Udh `" ZL&0Vhy\F4#ͅ6=v3eĮ r1wB@#',;pU:2)ѠG\k@8Z*JfDM&ڿXBVXu.{gO{ߥDikW6$=lI$I$I$I$nI{lI$I$I$I$nI{lI$I$I$I$nI{lI$I$I$I$nI{lI$I$I$I$nI{񜻶mmm۪鷜]pn$I$I$I$I$wn$I$I$I$I$wn$I$I$I$I$wn#mmmm|~sYgQO$hJ()>#=g=s@`ݶ$I$I$I$I&{$I$I$I$I&{$I$I$I$I&{$I$I$I$I&{$I$I$I$I&{$I$I$I$I&{$I$I$I$I&{$I$I$I$I&{$I$I$I$I'33˜8;}$I$mmkzD l`ȋ6 6D: vykzK,-D[$@c(G Us[Wx}U0s8' 8RIb܅G qV^csCAHp{?~E .:PЭ" -""RQ`~W;%O͠jq VjD( ?)FcB)J"TLwNm{[`T(2%4$a0,"'0!%Sk=a+eR("q)XJ4To5)t9hN'!U"q '*x,PzuӔ8\TJ+B$SoRKGsz?590RXNў#FGvkxD#,º`sY^]&A4 >ϙ)%*]΢l-oj>x81^]‚`L hUjaMa.EpLFJMh1"H\o!LorjHMƿk oKA nf'`)]p\s ʂ? Uh|_ BQBr:ogs/`#'oG/_ f6bӇ~7^ ,"Kgq~vwu7y<\DB7'?@q1)&y*'߻@UՋq}߈xJ:e,~/Cз3}GV ӫi1;}8+ʭ3ҟZ?򡇏Q/ NqV} ۻ<0܏t$ΒOPhmc1 ;`l>37Sy)o<`iu *<ԔN.#^jeΈP\Gڏ69#quBj?j*\hp &Ë\zMJ)xqۏ*pRJߵ:Uy+raz$ou9^_DN%D/Fk&g&AЅ(?O)|jgc9 ngQ ߠN"֌}};@>ߛ):~۷:_ӧ\ f,BH FK&.\Y|LƦbsɵg"u b~SB|P`J_b<|HLɱ>t+<8G u2`\X<+M D[A1k W?R Q)c˚-w`126$yꓱ:')v.җ@{d_qpV6iL7%Qfߞ8g$B@]! 1NcvI lAG 42>VS^8쎁U7u촸=N+:<7+MJiS…T+Ԙgq다7DiМ=R'9ڕdi{n(,sjFJuコkOaa#$NМGL=Ix|rsr[A'r9ʚFjD8Q֑>/ܘrqlb8lj@E ،gs+>W xa% #Ÿ%p'&xL~ЊO[p\,?OZҺV l a!+Z;9.$YtNkkgS!Ă/C=y3fnrsr8g%~p)y Q$0Q9 DC^$FvHBGOU(hSFcFO]}950b.fYM"wnx?%vs&tdSʞkoʼx!oOrjna{&kdU{j{Wזz-[  d[O% !Nfӝ3`3tN|I?o/𴸜/Ï{c) +|a7 K95j[x%Cĵw(j!=ÎZqIx>@x'3mǞPNox$أmjАu2u$l+1 c#*n6Ջ_k\%rߦa$F#A *:)K]|׏päE?bݼ։lWl?=" gc$Y}78,=d>T  (߲_ʴXa]OtgVf'{Ϲԯ$I9(\K\I(1"~޳rc.ԉġ5I<+RA@r؏jBq Đp?04ل0-}MOV%=bA/FIJ24!BwMQ8 #[Xz̛u|?쩓ە.Н}Ǣnws5fO"UgrydH4Q}|q|;$p~Y\%8Y\,q8 FIĕ(O3dW@oShuIY? Ռu܎a”" eʍ瘃VaBʝǥ𛷩.1:i.*>fZ['".ϖfl3`KJ_tΘ^#6\uzv6LHp8o@{z\ gw;tn{Rw|Iqt|-lf`'?ү=H3%M&DL{W(գw[Bcp?NrDʔW8 ?Bt'v  #-'6Aqi\x:*~*jV]3a62<Ԙ+7vӡ_|*1nVGRгM!$(|b&Ey- Ȼ@gWx@D_64\[Nߟ:V\L *Fk)=Kg^Va&})kU66zc;db/+ZD5E1b0Cݛe/V~E?[|G˅$ps1K60Eg^A`j3N'w {pQJ^ۻ n+[$ŋ|+u>,*aHoTLܲE4✄ƌ{VmKHzHhdt: xY*y{YCyAS1难wpzIO$JD"ӭ@?Iz'&IpqjJ~SQ?cm H1E6-3ehKkhVrѢ-e_^{e7u|E4the^C3_ĝg֛Ṹzvv͞SKoQZJ_yS3ğ?93Glj$ a_;.楏t^PO,w%&X_C݉S㕗TH/C>xcgP qbKeK*5$ 5eup.^c `܏e{y( m=h29ZizC♒IJr1/l 1&:lFcևd*{K/X-H&<{?[ċ+ v=;(u= s89 y_U}sz:5|:j74/+kyr9W!wplpM?@z)WDO"F:AT𕍶greVd0dLJp;8xAT_8Dcuo$LI01z"cI/~9a!Of/a)GKͭ g:bڂyzdSpǪ"Wt/^3aA%#TC:p_P[΂^f8*a$M)/">(HLgGړSLz ~ƭ\jn}) lZ!,ssP)bK|Dx(P?Plzǃ1[3b H6LHe~Q^?orI:ܒ$@4 Od'0HD` % EB #. 3ٹGm. B!'n{{VI${0qA_TUر6AEhuYt!b84C:f CGB|Uquɉ؁Ru5M7xVqI BkQOaTP3, H)НgqzL}ϤeOϑ֎S&)yTtF~'}MޮiGkH|U 5{d[k6]ϣ%Ol#}fb[?5?Aw7B6=WtbiO?Gq]7@~u`"Oݷ zoL4@q j̗/\F؁ k-Fe4C(&2 NEsbn H`MbIżrk"-3~35ɪ=VZ5J2[0/߆RLnlD;i$p )QXe=;̡qa3^6w+o@ ~4lA& ְFuq ᤻3#+l bi;4%;TߔWu /I4^P0|b퐖b;WDnɖ()~HWH?8z$/Tw u`c;_d$8ZF Ij!W鷨rw8ӛW_']en1.N5eZ;);4M֯R©;Snwf.>pω\-Cκ 8iM.Lb;?TI6}vwlɲD@'򦱢/="?7 'ÔӃMmip3h)IG_Cty0L伧,3AP\iuOo=VC)9@lPR5xrCh"p-&r Ɯ`z-RcvJֶR]vOHjCOzp}^W;'er_M}uqhڤ ^T_qFj߱/2LW=XvfyM@F6/5򾎾^GAȁ72܂nȿf2EcN|mRO70,׊# /ѹsڥ؍wp ,nJ@jlW]9)i`ʡW 8Ǚ[K@|$0L By6h\r %NRʹSW\tA/ FGצ*׀ȿ :gFĠ8^t?!?tOE5;A 9 J XtQ1Ez,|'$;~#/lc~(-joy${?T/2@/4@g/O\ޗvIQmOqodht陽qn[1~++'jbshhH3KTڲ2#8d$㒑T6?(v_g}hs(\oIÑ.]6!Pyft]p^ Hc5X`G4yi4)f.g_wurouDѮsQD&{җ{&O<\{S\c:wAv5V`J^6gxǔΎsX2zj~DF4ٸXC"w=^^9~crߘ7,Z÷9͎;yn'Lɡ{To)3[sM;=eQN ]$vuƀPg'^$4u߫.}yX~BD-U/i$*W a5;#zNg^Di_oY~^K0X| ,Pdߏ7r=fO$Lop5 V]Ƿ"L^!cp%2ʺ "6J] >RI3s9[="'tAT&y1i`%;ut Kz[KV/2;~/3?bo2z:s)d8Z j"At'emfGH8Z8UU Bl%mYrjD54cAɪeuKd)u/N[p~k$J%#qc{'!M猎eث[h>vo{&̤qM y4o?Pot|\p&{$al.;Π4(YjαӬU=ʹqRXvV~g_鳴.a| kqЗ갨FW%O FOAѰIN\1}w]K'o)! Њ٩"p4oq{MɢKEP,0yu*q[ PgXhJV2~Z̽"pѼo8Lk)#= #Mp~Q  (He/T*ImҊ`myuM|X `~(0suig6-YVTZ^Qo(= VA]QjDBߍ@$5Plί̛`MFi)QF()'!ꥌI; EE˴ƂfYL7gĒk8gU0mN Y;P{OE9"@6ONwp~_{w_Zcp?jv_:QwX>g kM0Z\\j ` Qk;o0{v/Cs*-Ȼ^Z~Mε HfH:?yٿTkG/PCdbkkU΄1 ^1^uOCX&WOw;Mv:l|Q|^E6է/^󦟻!xnFp|pl( L ܥ9ؼ&>(gGvuI[3`B6%ry15+߯tGSiiȠE66٘LV8ɤ*G&oXq7ʝ)hho1Q#Ox(!20(դNE|ct]M%4[5_'9* s ];/#Ok+:U8+ּ{ՙR&([}.~zkqv[$p= tΊY\1H{hË_apd)fsy/KCwLj(8f֬5_;t"S9+jC= $䤰Y#4)7dĉ|4Qn…E$ ZCsM/?71wHG( 6)7p7L.3QL5MOF^'KsLHf(37)?&úNrWퟢ?Ioch'_&u|U[ck#LґgEzx(wQ.I!g>_%ܤ^(2a1V̌hj5{RU?>ΛDGy(raUفA~&؄9Z|>;ܿY dgrg3Kz'mxP$0sDB~<$Db6uK-PFJci;U!46̞2 ]ZGQ)?k?ޡ?a_^נoMR_m."B X/@6x 7H&l|D>8QSi{3x~3gF]o_xc{;򇽶S|p9 `n~{iG0ZBgR'w0d^{t^1#KQcL„CqtF9dDд0B Ysk0B3Z@]YI`9'П~z`C!3\E Plrs=W gn~P-Y=Nt%]pgbAs])WH͂c|v"~^?GϷRE7'I1qKh»d13Qs0(Ay8J'!'9&A|ECrwvز^ {4vCju!a\{Dd(aW@\w<ss~Q/Z^ kpˆr!̈́%X8p71K.n )߰.+ 4ANux5֨-̛Rǧϓ%p5_5nLh{~!U^c^屣 ŚBj  YN \`\Hb(d0EF. EJK[L)q!͸!>t*az-ڗo?˲իWHŚz*0x5I۟d f=. ױ]GXO$ްqx~'G|jyEp8>QHzuR(yTBz~em{A)anr;NW jqGr[C{?_;20.y)nD_ ~/!%WGq# T5&N^ qߪg5Fhv'iUhXwDɟtn7YiE:xt)ZW%⯟GsBϤKLR`;mbʵw%߽Eۘwٌ1 ndӱK#QUCvi3'ZĝI˷_<."> tGڒ~@n/6߅3r>Q9)98!|ۇ4BlZ2]2)-e1w悳hA#K7QJ0l88;E3_K1Cm//|# 5=1F pQ, PS+}_^Wn+GG]lo\)IDөptLf6> M=bwzA9DPI yD!Eq5ye2>\-bv6\瀶ɉ͕*QNGh]cRpyF5;7xgTD~.q/[62:ζ qo%}l4|_HYbD: e3Ά]y{;㜐~PC{cZjFI'!h"CamBrB/A¯q3'pښiOAKo»J5RO :Pw@$4}R/C/{):A=A&rX4#]- G@p;,^@X~vb(3NpX]kQՒ\f |f8C-8On݇->i;,,B 3K!fft;&^E@ ^誓 j/vsEdA+X$ ݊|#;ܝ̫t57,P3'VŚBEa%s?#!Kd@flb8L׌ԉWi ':b8n- M[A]DP6ba=}Tl )iB)S41dGJD I@ҷЏzj0zϒ.<RI 5];&f e4bv̎&Qy9шvu;S+=keVmBr7ٟ #ź|Fi#L!arn9C,4C&Ăf;7' MlG wH=_: \cO>T#\f1/tR۝OJC* )'Fñ& (Jir]q=ayzo[ajr[(!$]"2-\[L֏tRd6!2ܔRe9%]q3";Yy8&b<12Ac6l^w]|}i 'mԋه,F9~zڴx1|SF& ԺF$JN`'): :Ѝ)ls3g/FiMg&‘(r~άP˹"GbmAbWD>C44JNWpC=' dD@!C;w$`:8v;: xqz6 ᙠw2@~]2fcj Yz(}?,ݩ?;yӚ :0󑡷Ikg<~Tye# 1)Lt aJ) ~(4(<E&. ƬÕBXڵN8>rGkMhb଼ NGyX/\Uܮb؏nbBymz\1e:IϘmciT>ѐ69nW>SPa Xdg+E#C(nhwm®Wň?7ݩ27}Eg}Q|y0N>_U6N^oܮ-yPIoFLX[*G6G-̢p%x%@ I Q#3p)-Q}f߱ Ɗ&A?+a@Jf\e1ݬ'9@.I]x^m@w iPn1#vR۰Ayn|gfWx'߈@˗܏v۩@ϫ3CTv?a+ٽGc]=Gmӣ8ԁi^1CL Oiގ!Eu zw)0 !=M pšxGP?~z19L}^Q#B,1!pߗ#| ,NY#gHm?APșQz'rV= <_\m?qӊouR{u]Y9:OC1 aoޣMСhO,7'aϢOqz~Um!P."+wcg u@!W҆}8)>Dck'{OxctbNMy FE,ZCy`+IX}n4DnfOq2vz߹S=GοVZ^<p$Ae/_}ėbنg[q%&?`S2-e{7j˶]H2@+2 YDL?}D"|ZMEddwIhJ1ds>B 1vvݧ7lyyBQ{MwNo::\:,b H.  ]WY@!oyr ؄ 'dqf%K$q'k;9hfB]cEE5p@@kv ;b]H5Ke5qrwBdwOQ1*8 3L?AqvV>InGfn2YUr{>-5w*yP+@aWu)5ic.dg_?D[vKk/^=D$CiLf*h[ENգC.VJ:MtAI5pM=?i'ҡS<_;cXM=.w49Yv z$ě/k@R Zeѐ?$lU U%g'] /EIOR^% ? ڥqYı*d$[٦M!յ)Qm˛@c܄-ì,]4tK8"^sBE1W8PbҪAK oCeC~nטgi`ͧzى ղ &rшy|#GŨ@{A ! L|{/=f^SFj wLFXw~vTP 9[YwK/$XcbWQvG-{УYݺ/^}Ie nځ]CHuIN~ruro󨰣!葤od?f9g"kn,ǂ g\|1m$Ri֝ + G{e:w@HӺaՌAYUVu:9͒QE̞cChfؓYHOF;=Y J\4R<2SZ.pX+J$dm{?wEzJB}qj8e*}=ƣJOvnj܁\@BUкYCHxwO7>-*Pr]zJ<`=(T3@U)>$LVɐҺVlIޑ_Kvw7D%PBI)Vc΂Q}9̷SB't%!с< @(Sy ȒU >apꐐc6sH fE:>KDHXe.i18m|g.0>S-s{> t1K f״)@;mb!O0:Cl -l cd/7;.lm<]$9gL_\G>iР"+8nI 4-[$0(~|RQ3o1 $TEysěġ/![cL"Sψt|i{٭*-ҡ{ TjtKvk5ϘyJOtlX#v[=(\h[o\s/1nҴ?KL8gz~lؖq3Z'P`5)%̉I:KG  f艀##(r* 0e␯-=sZP &:Yu%?ru?AϹjLUj(jumH-e˂JʅFR qaY].#n b3)nђ=ԫn~/VAblB^чDf3H@~[-.˗="E6:v) 52zoAO p)3&P#*RYr1j]dRW3.ƺfyj2{P7No^1K0 kUL8Eb "F[ QHQS:EѾ7xdU_Ȍ9sf c 2Dꑖ*fqBzneBǺl#蔍nyRcˇop"+@Ձ۔я,D;ŎT;O3AoW0_4y`kUo<}_)65+&Y"7?jda(Gߙk|֜lzE*ńm7(npm`GHVie-H34a9HC!ᙜb^HBA7.1Sg?PH=H\@?)'yX$"!g)XⰨ +jR:[̃B$\l&c)^7 t tl)yJs;l ;'77}Gc 얅HWǎ[C%Cފ/B'@PFɔv<]3#i3lWo; ) /^jx#ϥ`ٞ~rRɰX6+NN+ o|{t9WXg=M[ڮ c Rg 6@(_w*H[aifC"`mB>!{("|ݱ{o|{he4L}z rSq܊ k2K}GBY@_ L H*z \9ԃMy';oȋ\bRrG07M3 Ayz" B1Erܞ$4!d4#EeęABNJ\^g̠ A~?Ϛg(,b-;:إ;>-Ca:lR( ×v>c:wʽ,C%X#@\ VZ`M+6ЙD7KTt^J:`#9,sS(s.-?}(i \;Unxܣ-/\ [($"  H44H^:p\ď]w4IJ/N/޻,Pm~%[1_}M2:iZsxlF`"8db!~x;>ق2J rͩ_:mpvpMK}zXW(E>FGժbPҗ0zÛHUe|E3Cx\XBcR-bA1:(GP'JoWb.,BEkLP#i^!>uDQϸ>yh]psxei9yXyA7h;{6I˺vTOhk¼P(+ ǒ 8oKw S; ^:ᦅatc LW) wuIT̿ٗґ[To"iX7eZjS6VNV6Oŀ+_6iizo2 ^ߍH6gM@"B+=eK^gdzEͯ:긩rzD9]A.nBԭqu䋅I Fgx[ /%F51SĝU^ E! j*30*㛆Rҁ^ #.sDU{^U~Y?t/?g ' rrt?qe" &B2'`W2fz@4 >ʆ#wˮw\҃3E{%AFe9$,$E 3jȗ8=LoX3X:'evf)?g# |G߃Lr^sGk.Oe߄h(hWѩR!xkľfs|~4Iu/!c4&%ׁ,]Y7U@/ mͧR4LlYiLC7'W5B5:ޅĨL+MsY4SިWA '>A"g~{KVI-HbUz0|a)<ɴ89yg]6) TaghhX4 6ߟP 3\&T(ivcC#qĖ2|ĠsNiа?Ŭ1S4M$oB*GMG/Bz -*E9>%kDUۤyu_i@oz%Aصu8/I>Ќ"d̬0c7t(2IN9`AZi vx"S>LɥZa.Υ@ kYj@R d]ZX9Pc=_GE0DiWM5~J#.lM:rֱk9%ɯ&RI\>i q]%P_|_FVugԂVtY^]AD")fM-?rfП\Ĭb A!kLyNQF= }tJijӾ`*.q\J=wt2ݹIjY#A'!iQ/CKqq#)D@F_eh,jGKȔkIu`6lvЮ,/DЄ 뤱,O_,J('jD5H*{2I?x#v[:!eC5SNu.P1pzP~ îU@qg>cU$g(:L5 3 T/c}aS=tO "prvU@?́-JL0]sTB6MBY #rk>-Lcy1>LC2aB!z"w<R& 160W9zj:Bb)ގ\ߪܬזH*Z"AxFN}"Qn6bj䖨#P} SouCIæ H(e@>=%ghw@R0A+AkvT>cW `<5yKZsmJ3>gIB.N3}tvRc=d>#_7hnS^7pYyF]=`3!9SM=,sׇD/ِxy(*[ GC6|C7qB2{m$ /8$^9 pQ'|)ZΫ_j^1 2c,Pbn Q^z|`x ].1cq;<ձj ٜw̢+/ u7F[莼DQS+͂{sPم&+!W! g4ʡ]v|sc ALy6%.s:x叝V48]9dM 5:{*B2IQ$@ZuO C([G*=q9x䅢3atx$8z5J8ƋwZXmTB:;C+< W#"eGY8V2i ݓӦx>< I C,"`% T8,؞QǦ<yc? ҜZ^ ѝ;$J`/u>{ax<$8·.mg$AoTmV…VXAPBqH AU +}M?t|aT7}WJ;[!̍d [(n͎ӶaD2Ayr,-"==IRL[]3#iٷO]2ETnKeE>?-V'v),d)qN(fLuA}q4ķu& Ŝ,0ҝAMU@9dDAzݷ,J 7% MH%Im~iT tO*6a4($H"UHZ:WwyW"E%7 A`UDH> A$Ǒ%vbx42 #nVݶ$MKS%ߵFKvD!b) "¨dwOϾq>v9O ߧZ# cG7A?'ٗl` Ӆe os1vX f; xf`C7J)Vi|ut 1q}-;ϻ:r~y"sl2q[ӽP``,x |wֲT"O{ۣϪgGDgmA[ZGǔyɄ4D`ѱ8_A[xIE+Lׇ $pe ,kd#bt "q Hěen("W)L"6\RmkRsz8Rm5Ahi:vY~{#V1q$d=2lэDY'@]KH ^T Q>ߩhhT?YD>؎<(Z%`E&s uKTKF[JctKYDU֗״aAx(7xm9یVY3F:֣^ ٌslFx,!O^_x~7_ 8 +?]"CVq78b*Nq*d0zjRqa!) o}7ʣ7e֚]Ϳ(AXFG(Yd,CIF/j X~mff,){jC8OLCC'EmՈ#CR~hr}H{ƽ݄!ބ\p4b>$}_m߳-NHmh (m[զE3SYTm ? a}E.8*.O4lL;[yff$ +AɍW'Sw)gSw0ר5?=bEzS<: b*tƷ0=M@uvam^b᧵N,gdEJ6^bxyϱhl!F[, _!Iw XM zplCy_0mONMӁcmU\Y?X:Jf㶎_??zل9aǏ ܧs̺J}'j#_L_˜fk-[֖g>W[|l0gLW @wT+SKwB^jt:~eVԬgh•D1?A.Gqg%*{ɩ|9,CQ_m‡YE@ޕB R-x>H@FŮו5\Q='\a\G?1SfUhXeTI #LA#*]ɛ|yy4hh2#P9t\cyV<߱C\l[R;͜XĒK>j< ƵզryKp3, \ERБQ(/_:Z#w(G݈aO/=Lw6^;B/P!Jd@ p=m*Zj:1ީIxv~M-x2ϯՔnJhPobÚlhh-+:,eDtSe$BVdGu][3u<;p{Z%(eXBBXb.s䎤Q )C||pg Ɔ*iDf:qNEBH0>ĞH-eO3w[eiah| hriTQ3~IP=,js"w*": -bw6H2ۚڊv5YXⶁ1HJ8IRMdɹi) t$*$Niʑ>c2 &A 9(=/I\*q^gu2anYN=.a;F-Dǃˢ5M-@ii:+WOm3M6XI۲c2iu=H2l'/&^eYpyZtJYmg[3T e4 g09/`)D;aD;idh5FREXأP'uw=y< ?SI.љ uS:XwDbr6vwQ0ŐbyMgT\)^B6$ c{ HNN~B.4q%'AS%}!H;+S E_@]hԃiזpsYQoWCqc"GWj5>_ON,qg/30 zY%n3]YcI^:$s=7#IzuOtXXP'y))}W:d2Nx`JƮ!" B>]u;[n׭Wnfo7,^77v|Q1Y|6,P%sﶽf5Q;d(#VpT!Wa% WƩ gqѫ%N]-Y1fhQ1O; ?\aYʚ +PwiʁA,zM##f'G׉I%B2c_|: =Z>@{S`ӻ9<*x9mŠYϜjfsӆu0( 5.8pg% m6> /ɍ5h B)2GnqN`S}Hh_B,VHd@ؾbgd$4W7i G zBB $<'VR";aw}nnAwh[N;{[7 9ˮB"rQ3336<2~lę6>6 "J+IQrJb@L+#B}PK98sV/᰽FѧZGe2$lǍk4rrg(n2[;Ng_H(r\Y2ِ~~J0jd-&MJuں&#jdL\mLl%i$%, fx`.fxvfFy"WS2f UFcղZlZrSfa%b eސ-Ξp]Ei4mX_inP[WCFu|x?cG:@_1fF>孍 ZE<޵v3j_d\(eTT(`ey &N2Uƒw|J1|zw)<^Mu_ES&==67yµW֏ZI! >\gA0N2_Ba6L^{1۟e9']$RWm 2/:S%!/G^//[>=)z{?_nM:cFn6Zk[gh.2Te=<WcU}A1!įI$,Ơm],~lbq^|*ɏ'LQWѫSt_T8ܵfXUHdoxD3"VJ %Kd#⩳k b<}5E)xOYm fjT+1Y4,IX,RˑIfU gB b4in.%'\GjCdoJ`o24O1uL}#:pZ}-Ef`0}_CaE NBQ3GByAXm Bnu^d6 F!mI.*؉(aQپ4#)}֍ K6+{2ӭK@p[8 nddP <䮿V+5i3tBy#OVi<˪ "B.L:nc:J!͝b;SêE1ض?8J>_+}>+&@ፔfZӷmnY(*p_li7^0;lBA@(Yq!,P-˂RZVJѰ s(_ ""3M$T&@M^_6&G6J%L"*U<SW_h˽| ȸ|;(¢DaT[NX7n7Rћ.sѧCc! qBcP ޲ӻln"cªKNY$HS#|;ufЂZӕzNVȮ\x!$,mH6em($r[|9;0zY1ĀYMKDƇ?kv c/ [60 f^Q_9]֖tfFxBp7yL{ߩ^ ݣLKR{-:3,&X}?iZ=$9J֟s-˼رLU4cRv/حb$| HeD/dV4])9oQbԟQR`.֥f$$ M^)| )} .QՐF.߭ j"o)"P 7EDs~W`6;0;ir%JiPo) &by}TQ ŜʧYnlǫ_ce e_slKH<&myGeTfwso9]s#F>iiHVI֤M|KzLh%܄vuR!&BvxB`h[ wFܒ3`9GP82fO8u=Ci8];Q{_ iU9Nyo\! -8"'7y,f.'`w8U2s=ByfG MKJX?ˠ3bJl^!BzE9d f.L#rGrM XnMXH!Q!**E$L8m .=/Ldg<&V=R#rc6ZyԴ6g8ϟi#ڐo/1e'#|A&['x*38?QGl \)}jLדf;v:^}^D!%Lw30wzM#$T1#ƍH] Ȧg[=.3>C;F'yO%.8=ZXsMEqod0WFۻ(MwSw U8<S;;_)n>E5k|lWO "+6^_c?b²5 fj8Wrv$茸ӥF_>t!OϢ)\|GN?m'@s1 rK?ïr~s $̿gO;2S6%8V\ìgkc9č-$] +^&J,,ȸnp࿣Ҋk^bHg SbEьUX2pEml z7;\'qOj.D1-fʲB (CAPb\IMU{|0bo6wh^Ve^YYIZnJr#K)sbb}VL~GzoBQFy>eVұ*#)*bGú Y p0G0LAhۂSF=`xlEzkݖccϨ`rːr]Amyvǘioh;o` _i ӞˬNpA3I%/B @(QԚ,i f2F٨l&.JJ@}R 4EMD!Rש-y5{^TBȌe2(l<~2lrx%3RXӮXPg}O?:xο> z tJWd\H<fm:f'0)o5w ^e2L6XE}J`ZF~%$D]%{oO;W7>ܟ7xXʛWH+3lugQ7`6K2 dd%e {gQ:a. }) ĉ8XΙ|n>{\Coݲ)V= );u7έӚwe[7ssCT`5.)  ZYCR@5fڃ_S4CQs ,f5~ww Tq6I&26E4!}Cܒp:*V bWts]; 6<;N (l֒Gyfv7"f/B R V"3,.ٍ.<О--Uc ~s<\x[pL_gl;kԨ䴎f F'8A@l1 2>8DXJTm$I9NAt@VB'u %s~(F,ck)dÏ;[vΈ2#CE1 ɱD+VçE"XT:3]n ͹pCϠ_XkeKrog&3`V6R\xi+l78D6cצ‘Ie٨կ=ǥ2p9y+8|+QD^QGTnOs nEn9EgF$όqa^^G;G^J7Gv)M$K7/x"HO҉bvYy){ڒyUt[&:*;%z~cɟӜt<" "^I|fxF8ֹ]u9x8'nvcfH!oL3{<ޔމ|clpfP>D1;}hF]z$$FS W;uرarrF<fDQ=bOQvqJNfK-Pe@kY}}<=nJJ !#Qtqe7cXy/T_bTB->=dG%x۟b5$%jx C<寚kTIB?rɲ&WpZgz"mnFY.r|r=sB-w4)Ktd–EqgHrGAySFؾbMJ(.:9+ϜYJʽ_VѢ ћѽҟ:T2 U;mޖ ǯGC)tH-f3V=EEi16Pۦ&gy(ez( HvTY"06Q = ls1]S] v ,]m.Ra\S6Sh>%;}Y ]}32a6f' l5a0X&tFu=_A)m0I<ɥ6B3dZ%w +R?-=Vv]f_ 2(ỶE5&Cԛ.krpnˉxEC.;u4xӫ ]DY&O% GtsmA*evdjm]8z5V2OC} y/=/^4zD3<ɕ_ThGR9ޔFϮpЍ)y"6JfPj;GV%OPl=RUµoc]&ֵLqaK`PTk ; [:h%=.^ggB`hݯVU!g YF+6R,&DkH5n7ަoȝ[~8Ľrkc't^v[Ȣl;rj}9Sc>/sl (*tbF16)kǣ;SAg]h/P鱑iCʟCv~5i:|{X{ Jٷzg3导]X]JyF3Eqfk7{(Mu9gfNԪ%I#{:Jh$҈oM/iF}ɏs.u>e/|8I}>%R H1v-mMwo"~#S>Q nd4īuĘ`Ȏ>oiZݑzm=鼈~↿S־܋.QHuL‰'Y"SytG LרIqnd בg:R™X@FrD,t# 56klݱu~Kn'ԫۭ&Cpp+Hz xVR7~du/)ZE`ٔ!K.H,֡^9Vd+''d.D)E5   +  ]ӌSaǑ=]{; D(ZFz4Tؖ#ֵ#~v6!%BcCicFDMI:uT4XRjtn Sds#Hݴ7&*}707wUE6ZP&";32DS\L!b!=w⃕͞ ۩X (F5XHPEKpj2/2֯Wemi0WٿYT婺kmԇ"a.>\ғC*̟BFG$.י#'ei5c-u"[ާd@dr7 oF~8d>lb>3/gJmlIlC4,InSL[&kҪCO7Tۜfhܛ" {m`HW?‰.ⶫzjp|^aՍ3gF+n&{)۷uZn'ZeKWa{<5r9hՙ&=a`3td,;Ncәւ(Kz 얺 NcOI̶ȱUTMϓuc"Ncmb?kӤ6zF @,AƋ(cGJ"  .E|A'SvekPhtWW VS&ٓ* ٠f;scßq_Cs*iDr{P3)|EּҢy7̻1sCmcCa5ilre4>7*k}2(mT+FUhvmyQmVuQ&_h,N}RS䓙ʱI ZwrY ~y'Y~iBkE4c^.CN$9^hEBʡ%$FOs&{4?#!"EzII@JWiz[\hB[<{2:mȀ$/;Tc_6EE< IWf7h<40<2 ̯/&xsz>W`jړcp!5Ja#S,`*&ډ0R˂RMsH`{.r2-;نj&z?Utttw[Ue XWW_ d=HVtq)rgFSj'W d@i 6E̗4^d{t`'oj*n#7qӸ()"QQ`ey}gW/3J>ȟӑ:n5r]<4 "d>N:rH$QӱY`C}a-l/n-`6Ex޿|({5hbxO-N>eY.1$AwhWåQҫ= 71BV=*q7t=92)BdGj0x@GdqK6wx=ל&W+ul2߾ŧ''VOW;dYؗlh)s15RbB 'r/KF .񼚩z+BS^ P%dp{0GGdM.}]P '=foJJ0#U1r\i5UۢFR @fLd_em׵8_$m垛ѱDBgf"g}G?#NY3cq7O^"nY:XNN4PǠۏpϡ̨@HFp<>ϐvvta2IG%h^hi$nQI\戃5*J,h0@DD@V ү-#3x'11_!$ǤjMkk\nQIoKK'n1B83&^&ݗzrQWѷs)Z}0S:yk$`٭6Gx3~6>}9P'L)W;5w=1 BwgH7plݾMpu-oBjWK+\y<N ^J:|Ek!Cc+o|@[e_chR_hrnrAECJP"!`I(fVD6-mp}£r #w:u;*\] PlJƩO[NWZ.G2?i4n4vX2'CB%AksW{+՟!AjR:>#FV !`CjlNvKH޽[Y!g|\ f.I}zm[K 7*2}f0m,r3ϩlqGbo}c!Sl?xn8>s<54 4'U}]6xP) oTb5g;Q!( Nw_w<]'{W_޳sCDICJR5LAowc[<6N?h[MHk߫~չ[d-#!@HE.EIc(lE,`T'0w%>ϐFn7;iI3)FXVGZ6r &k)"9tqqL;cDZj,iFMݸB"&}~؞u6A1!p|N< '`6$X$ "+gܟnݘxOω/%SA;W'ruZjՂ`h jG?=ш~)]ǬE(BPFlLDCގb#b *4 o/ד[v"0mq!,wvb $BƐBP3\ "ɌlBQ K @cFDfyDK}+kј؞H, bW=fdCtfJ3HEJ$D4I(4 [d%kc3`,Aid i>䖌(0I$F#0b0I$fdfA%'I_ $)"1!J2IEWbLȚ($@TYD! |nNI7|z|:ިg^ߺ0HI9" k?]3k2^{F`ADlLhS~ҌPLD`PHL1$L`z8E02F`l)L$iSS(}ʒ0 J &),$2QX։i 1M!Ce=]cdE `LD&󸙄ȚdF,&*$k@IF 1bIL21Jjc$% ޻ɦHM4XmI%Ai0TdFĄIdQ%XK3 Z"1D 9đK10E3)JM ”cvfL$L ι0 #4h](Y DeFwsᛦъ ؂FR,&(i=uƍnwsc$T$eF2 \LhW2=޸J#%!YH*q%D)-h!LzwL2&lQi=eu!웲-RM4J(#24RjDi@3I(F|x4FM1(!M֝hiME1-3K$PMn豨ҔBbI$$b $-I0X̴klj6JPGv j2IIDHi4D &-4j,b6-$3H*1I{wZmm-+&DB|L*_`~EvD e %-D1HDTĆ@%9f!|CB!/?:/*7{/W-4'F kEEɢ EF"U6MQcYfc`$E2ƚD0D "?D.&XA{?>e0>[t' [ J>`Œ+DSBQ..C6C%a;yDbR'@7RDR@@ȕG(*ʅ *4*(P(ЁB#2hqr:8W˜e1H$XHV$Td8PVITùI@mЗ(fg>K@-49!wFmvQXIvhrS!J $M E,&E]1i0 >^bJf%+AMڡւ`[X*M!q9Vmʊ 뫱ѬB H!ڜ*F P_x-dWLuFۮʅ9>Gud&G*@#8a# Y;93 DXx@Z0L iXCVXEG8Yed`ml8r _ XMX1I( ݮL~(" yl#@H "!TB&X T[*o&/VQMe>PQ OEEh0I:]mI B4]$CьPp,F!W]hM ED$@OK$KV_Y sێ+n;_YH%dݽ]YLR=/S))k⇤Z&ۅ[^^1t*}I~ M ^48ߓJjQJv֜A|zbR s8G?V^Ks=[[-_ÎB8F@' G;2;nc^7Ve>z|ϙ:_ksxK>P #Bc!EnַoZKe2 ާ'M_y"@0RrZ uu(c ?5z\bi f̅[ ELѱif5Iclk2ZűEFi11FHTO d dXLG%V]ԋV瘋^Gv $`?-)yP}*CF NP5JҎ C~q|Xպg1uݿgQL⑲#^IMp[??.P#w_0osJG9:ByA8lj8x @deGBD{@> #` RFVVE@ZT(DmkEUTh"z~"iFWǠ#D#@Yy͋. E,Ф_nc B.ol-&*olUΆJݣȣ (F?HvQ7C2!]7<,n krnJDiD7,/$RMf ϟՕA记ѷT-U aBҌr`Z:$#zP4]r hR74*BÅ O.4#DX4pj$?Kxt}vW ͓UXbhUFkBT8LaY *ᇼ tCNaY qv>{jCmb"U:`V$E$ϯY۰"Q$3 m Cu, 2aB=vN!dXD]M7evoUrAb6C%Gg "קr95Y{o2FUfMCNMk'2l\ny|Trb {nǟT'<Vƭ{ZՍXe=c@(1+oI \P Yi6-4 X61T[Jk]w2F4i6{5T;YCp)( ȠscYd5Ur='5o{w_M\DAEKG >ŸxmPJ/<~sp$OiQ!52Tlh%J611|"s%y֊.-|*lX h*%@]ԢB0ߤ>j(ϵkee>DQ?qt^H7I,*aeTgUpupm Xh)+)Pd`"bE XPAŒLVe"1BCid$fPĆfȐYecIa Vai(F"! Z"je`G!c0 }TO{Ofb2OgBgTBiѫH>ܰC;^mԁ` ITJS$X=5u}ߞuQ=.wmzlcG_ XRQ5`ICV#`#rjq#6FӘP [#}MZ7CN $'^_f#oe)'3D]rHh;8d~1X`un BԺS<;Ȱ>_;i}e`Y`{, /YO(n$:]oɽw҂D$++D_]Ѧ3RtH t( C9Hb~0̱ιt<&sq ,O] N$$69Nc}G)u~+c,݀erpE5k9h (!a* %<.F{^*]>9]iut:<ƾ}=ƭ^e?K6P0@T\'ǚ2Zwy7K/%w2[l:Ϡ}_32%AQh~ N HQ EJ". #R 1!%iCgZ%;I!7€`0 a l / <X91D<ʈ%0e0`$Qws.Anvi]l)aF.OpDa $OjZb !2V+ O 0Ae ND$UT27&Q`IX UX m, גm,%n8''$^TY*T_R혐4E%X3E$ gwR/|UR<##t$)oĮ$IurGEB8؃,v .d,%H2UYhgo.NzmqhLVs;w*:7(ZksK35H~e?FE Et;UY .pT6ZSRAB毫D`?5>fܤ bix稯♄1M&rޔƄ̳:6d=7;x!1"w_ AHDX° |85tRؗu;Yy JU!Mq~#R벟!R@ J!HeGs ?3QmhWM6c:ЌAK EƖ4"LlwultjW:,2Z,m_v'$ݣ8\z l{Ut=ӵEvtGc@F$֦=^z8wwEL]czYޅd9WN ` |ܞ{o~'SAf6j#4 (ZW-qZB8>#輜!H݆!3)d cFvI[8GrƟpyvW !CFլ-s2o]ܛ e$m$G#wupQI,y?)'\ l漕ohy&.hoG:8FbcHP"ptZ\\}c,.sh4={HchQ+hr`fWCJ݁Gb/CG߭*]mQ6;spBl"4z ' :S~d'{G5T˸)4;Ct2 <*r&GfIV+W>, ;TBj*ږ;1-ڰxW*ct!= "aQfͥI:$ k^]ۈc&Ek~poZ=roUXzn)ױ*ǘ^[6Ovit"w ("P2͓|7L3 rN(ddt!Jd5hӀ6EU8 )T A8ɂ<n(ͺlQpܑusM܅ ڋڄ҅20kp~p7<(C-8)sr.(>GvJQ\ߋY1[lqMp {ٿy,x',QVRPc!?4EP :A!c|wiP啖A+,Qi@9A<cS;;-VfbhZi$IEvxL)Dv6{Oc\6D`|{~;nyki#^spA@ 2& 8) !Nx {*k't~7O?qXGʭ7:Tsp W]MHh?>zkfԵ'in:LD4=;-Gƚ&|_ְc\Q5̏ zkǞH_&H-'Z3?^z=xFڔ8wdsrr!љwi@'G-x5F]^_}Wz5ߏ2ͽZ~乻HN=aѳz|^e ӷ/꪿+"ǔjL'U)լX^25: f] y,K&UyP)r)d/JՀTfWL)?m%ӏW1v^Nvi;a=%ff^c\4Ϟ[>.96")=1.PrMf<1aKgek w#"xB +XvI[<7^Vpl´j$Wtc]LE3 $q 20]lKئ6{<(yh9oʽ^Ǧ:-~sWqOø*+hsXKL{t$|u:SXzLRnl+۽ ebٲ3A{B 庬Qn^sE7Y`Ҩ'}?<:YB|V~ ])¬"L*d۸78 [_MâѠ9o?ڏ~Mqߑ}>uX:)ǻἿ~{yzK7)(`{O'\HxOAT, >r?DsMRå+(!,1Io$,zYσsF|PœPuśgo!]5qyIH#!REYjc5UYަ$C)5Ό!|e݃$,!wh!{[lzpM(50m#"+7l2s;cp%YS$FNZg87͢Ǖ@Ҳ@z} ;Kk{f|<\ˆi.i`yz!ƹơ&96>mowCOV$i9;c$]/NDF@,x/ZRpX׍(`QdU ko/0.}~)I!5\g]PkQbI [B Q[ vc)!R\EH0!0$X MLoiDu-1T-{J#1ũcY.ImcM<&0]B:ˈMZZkݷb3b3 \phDh^LL~!t?I'Hp@,$$:M~: a 5{O>ڻk:tx> w px?? _˱*%THYb%kWUf&2PxH-I6O4UzD5Ѯp\z藿PEt駅}C =a9uW\>gepq]m`ʎp{WZ5nՠoº-vþ >[(t(BE@W!\Cy|…(yЈ[Ek?gę9rzY,֏9pmSl3@:":0֎Uwm m4!y,g~Fe]^r*QbIlK,Rd5TRC0LrFiѳx(uү ]`F@3C>]|ЯK(Bq049%F6h1BdVF_OTۼg{/v 5}BT/9X bXʾ-eREѽ̀A"bXৱo/ULCz8 ;TJ!YHe%RtV 7Ҥ ]5WC)fHV5 1bˌ>ҷa'[A@f7QHJ&ŵHsB-,[`L},t) _U/c/s(8m1t";\e\9 B - )@"d@ UFmkQUjV6ت1*6bk굽W*4ov61{  ͳ=\a\ T>,eX$ϓP3p$PEAJcHIM2ƢƖDVg7+dzDZp{L׳6pi1 ~޻6t{?mtZuPM##hQo>Y̬},ʼuZ 3Dn_";h!i &S4 H0) WieA,VW{_qG:~kw }T̯ oE8O2}O#N@O{ٓA]d3x/q޽pćPxYÆg#<)Ư۹jҚo@閶{Y !!Jqlot c(?)< ejg6ĸ&{F3SMK4'Ď_ʢƦ#7a4IiĒ"S!c("N󠚡6RٞPL#T`L1& c|]( 7\e)uJ$agN:ܲ 0!袊TTS,S@B%$Dԅ3[&,F-65EcZ4"KDVKchTF؋Q+lS6lڢhd5BP(\OhiG韯0_팢( @m$XAY#0I$2ՐH]6N )U1 6~;Wuk} u֐ RU8ωNWJ\(x++u洜~ .,w`,b/VfIk@pAN,ep^hsiҺCT#O~w]blCH@w?`Zl}s?3u(P #89cE@<6\nPNWZ¡‰|^IE ~-ft;;f&mg ^ 0&Y*4j%e5JA`\ [W=z9?gV1p yTFQXfƢI0tPBq?UOA$It57dPk} ~)H<iv jȤh+!Lk/5wUN_`+ C] 3$IZˬebe=|\fYc^z7y99 F1k'CĔ J# uI9%Xb0,^di4x&͢S(dr_[?yq-YHr)9à*5!WĈ&a31k5(`,tB5Kԇ(d,5;:@Mmc;>"YU:A0Kдh ?9\l0ǿ~4 ܱͯ/) E\@\Bs.W Y'.LhtV,Hx(2 EznMξM7hp.@D J$+@rHAp\nJD+R?e^fϱm\x:DJ7aiK,h~yDP!BNeS6]y=n9zy[mP*>D=]|1T芉O!?ǁe$?UCAXle$A&4 4Uݗkad$RDL(vQ;?.d줕o,Vxk\g@օ ^pTwM0&0D@碹Q!Ct>}Nz3ML4Y@RRJ ) ߡL0XX=j&~=]SN'X#) a/__ƿ]U_X'էQZwͷ@|tDD~쀋q9/©i~טLX|aMjsA|?$OJFDKw3 P]!OPflT("YBСzY8%4&|oYƗT=7̨Qu5WY֥z VxN/f'K/0xgZ,a[gÐY<qPv+%E71"1&G*$sƎ`;[k68ʭl\&#yt}xbݎ^]_s$dحdU*#ɎL%R(XP4O>DMaTDI]!&TC  VK)2%,XxiQ1Cn& M 4II %ky1*2_URk7{킅q-/Z|LhzHEQbMj'a H biJ %QdPJZں??L'J(pE$@21P+MЭ?7?,[eڧMЄGY`A`8ޛAW)pj,sϕ+d i~9jD;>2h#h5D % p ,CR jRSAD*Q$> dXg]/yc[M:-n]R ^CP?3@S2"c~'4bg46Ho$?Ob;m0њC(8EwKQ4xVG ܖ:oJ [tMv+HkUI%|t(RL_z@LiZQOu9Ro$Tq xU,C*E+yőKmօb?SuFG$~[cˀ9_xJJ~=t xGWe":sO Xr(էѨaP*hdؙ>cR&d8Q=hHCۢ")YiIT־\ 8 zⷼ|_ O_ռa׶=ƬceXϴ{/k~o<}VCt {y`0Nr "$uEsڨQ+ۦ؄!hqGLd8(ERΩP:5kڮLB ;'+'F)"bRO=5v{a {fF@'qG 2(G%J# FwC/NA!0qFd4=]kgZ$4!JpmjӪcዙCG4B Qb]]+%'F=QSyB~C!~ $C考T lfhco (C|j ukA8'(-oVKaY>]#,>cG8QA;h +•]n$9+Sjt^~5d:z뿗18\?o}.t@ǚ[8=-';㻮#w~( e(ʲ;@bۥj ŝ$=4aDP4W P3YƷCld_&mPeQ5( ꮄɳ&sم^Ohn/Nq1\y$O#0D J\Ց֥wks=?l{Lf`<~sƽ<zO56{ogr=]\7|B`xV&J-,۶md@`\Goi~1|=eYuz<V0 3FvĜ3Tɡ[RXEaC8N)|P+QdH]\& )ZC n.@;\w,ϡ{DN%8cxD &!J+ sksg)(3Z%8d%_!Ge@*_D~,w[B5swAi;jd4 %R aD$BT9@U ((fBPe??F~b(#P1 ?b"K4,ÓȻ Vu_,t+ڝ//}z];7"f>FWj]ȥȯ51Idy,YGe@r4MKi ($ ]dwTBQ>. -i:֓h PUh]V4(9|1+)AArڦ:K}:wFJ.%Q!h\?./! $$x8T41jG9' ZN=l4T{XˣIFq*]LYLduu39>Mr7CG΄}U;1^{)®CNj,y+GׯyvzK +|m֟]Eh)9+8#u;p@3OBj4dy*An ":,V&s#JRЎd|7CMW M& *r"R"Ҡ(QN͈+G],jhUĪ%"Y*lOWAbN?e} :|mfXX!LP'\t K xxWA3ot}GHT!pp T>]/yo{|~_нVj:/}shwc=y'$ 8I}PxGD~0r'uYe|"tB%RA QT䋠@(ېz`@ DB&~mJ#͝#?$+@P} Ne(!Q G||dM4 X"ƴnnn8<ο{ۀ߈ACBQs򙡪DS쀠)~v1<#91qH iO}*l`,*0CU:c5O)# D8j(~*Ɯ'UGoOp!'[UyZuۡe߆o4Q" +EE&A#> (AR v޵Vtk2H(7H*ǚS7PrlUcUJ8(PdԨfǒ˽`SKȢX9]Ԭ F  Ne#dH26vfʠ?`[*\(!oS=#b4*|xɸZ=sv3m c}QBz42p Y^r>_nǢ+`t[td kC 65uf6P_S*YR$'EPK~?Ykk6z]yN ' `T]{:uxƙoWs#lgtL"!HBw'a^ B+0*h{u_XqӃ a8@_Q,l'FKNz,v;9tkg !(\aD>2`Qߟ[V`n̫4DvĻגew)c CyX ;,vm''̚i((!DPEHE@;nˡ2զ=mws_)i> 1 .w{_߰o}.oEg68 l^(C7 i-8Կj8KE )1$~Iv2hڌO۬"GwM!7lIRTC *y?)gܳqg0c$_tl5h@bDMeP͊/F($HE%!T>cIbR_p$k#_uQ(|!%5qP6uQ" AwlPj v;'M`kuQ[)Ft7 R=XB b HEBDClac\ee䠱߲@X$m@y.DkMfV6v}G6UOhzf =/F#a<,P8 ^&8͛TVYR-Qb!([! q snBrrw[3woM9APDD8P+Aq* (!PU@A@F#_Y?1Z40 n j!q.J|v)}sU p{O ;;@h05LT aTOY?/dS]a6?*2`\FR,?bx2 0P$dl\qzQ+Ʈ7Z"Go_v"Ī*';ECl?:mE<$* P_76Y^t:Tg:![Lo;+-$ $xJ y34w~z~р }e;1DxA~CK}'GE0J8w9`/{djeeM6;\Yr].ַ3:J@L|*_X Ccf?Q(b2%j!$ ʢAK2>jB(4U-dXBIKƪH]aYƎ+'[EF&E!t"kC2%9],U` FdF4 JHD7["ȀeTDƫ7rppTE$Q\lRHk de$IxMD&$rj ^ pMr1J!D0I J"3J@3ao $V;AEЮrZ4 / g.a{D2PguN|%2icZ5]TU'VcVû ʋ~&N񬟟^j$y>wQte9{Qܨn3juuo'قgG5{]=֨ +nBG&b kbnB)bF9{"iB+)P󓽒K1(QtbPYH%+)]v$gh-T s  q!@UDyD}E?Ui@5p~d3)\Gnyn2\߈@DQ8pA%a!JPGA_Hk@נy) )[_( R6m EkFYeiW?{~9&X¿}§g?Qb7iyc *p )`(aPʣutA=|x`lKfe `@IaB@#R$[FzSSn3|?3yiowJ 1d"p<,"s>q; 8JbH?JCɊL_B[n|OxvP"v]3y’YM2JERI6؈ R(b!&kj޽L="*r&].c/[>`CTeڄ1{t@ |>-((@TRRFM8ZԬ^ UYNpWvѳ L,ӻ#)?Ac֪+mC$T,jCfb̮RvbBikOa*uVdwnoZ(rYجDZáHd*ᠨfku9m2ɫl|e+ICh |Wgl B}SBaQ@7ݔhV4ӹZnhJɢ"ptŖC"ѯʿ5( T8okZ"'`map1 ibTRcs2ztty를 [,Ѵ=LM"B~3Y Udc5Fo3֚+)l;<8p  nL|y_]A4IBp"/=Jf/=, | gf]Y:AaZa%0\ f_˽Q8 Yis ezJ۸"6TSL( (zRA]/oDqx_磻nGH$S6"RG <bl?elgtƖ2gʇ֑bZHh*Q| 0@\ 0DG@?rE8P$kernlab/data/ticdata.rda0000644000175100001440000056755413562451350014752 0ustar hornikusers7zXZi"6!X>])TW"nRʟxq5(БtTR9* ][QodRuqH*֏0rinߥcx=8KlNET.7XI![;)oH\z:*H|W !P#7∮cD8{(zPN䁹 *,{>u+ޤy$+n Ns nA`lkr{#XB =7[lE [IfsiQ0d܃0o]ap lҔ%"wLeFZ)בP@u TDa!j<@"^W٫=0۹ZE= ^n/_@:Qs *p!w##  /_bY:݁SdBBz*iNFSEX\Ӧ L_A=P9R7\D7[ޚ y؎/{9¨8?E##Uk$?eM.V>=3ՋOPGɫD:ktQy>4^s!qm~ۡYn;U#`u2.tVwO7|AtT.-Bi{OSmգ."NG1; K}< cx"|'\#b"%;眑D32 ^IM+JWi/2'3E, Z0 (%eٚqtָx]'hF ՁJׇ(ݻ0~!q- dHeEe"r~ΗKl.DrݨzfHUGN^;nCUMK_*:hT4HZpY*YB &a@z4,&'?JNv |EA7Q L[X-% 8x\oj+h 3҈ߧǂ1hTlL'3{-KLDJ1S2Iq0WB;L"WYJ6RbT|V᫏WXHF 0)a#CBEF: Ib]8?Ħ{v 4򏁟{jiIv3]@|}Jx~M3Ж;yon.ӓFTmH $NB ͤ >HC-x,*l14i#ǏNzs9Q.,Z:0"N;(FR:Q-feg`{kgjtug&xz\XZL~l  $;gRO-q01gokkDQmѠ#NTry8U3Iy[̱ ŜӁ{w7f:LߞV%^ acL\xvBۍKa=UtNeóJ,!#Na_YjTkJFoK ,_]ȁ)kP+xW{s)"v(KS1BF81<ْ%*IFEQ 4E1!oh0tN|$yJQ!Kڹ|&wQO^\7ThN((9`dUc_w~ bG4rgR`kPX/4%X6ϝ~Sa6Qb ѫ`ɢm!"InZLOj4ˌ2AQkk!2/%T?< ݊=C" ܅#QRZ%SbB zuAOfI9az_d|A&WU=|&xThIW/dpx,8Ҥ-]NG:ߤCmVIOJ1yPrw| |ՙ!h.~Ma`wEYKn{3[_Fw މCgn,{"w.s=FhiF[5t2aU҃U;ҌzebQ) jq!z ΢܁ ]&:Կ'~>$UK%`"gQ/u_*{Ls%Rޥ '$֤34}{9IbN6y=1~+M pVLQJF^F%bh@O>lxvH1!*Qwg΅AJJ{LQ4%/.I/|FSZZQq W%ݵ}3rRa :O QR& J_+?eibȵN^/Q'ߌ<Qf krQ1ԯ֠[8-ctfZC"*`bsq zޡiQݞ+ /Sk`B]g9$G Q`y{ǾWtPWP"בa[:z"]c eQlʄ ^8q:oPsd5!AP?BľskVppPMD#_G`-& cI@S_ۄT( \6f ~o:2cNW6AXz CJn\e [ԹtBy`*r.e f-oo>&Y`5RwTz5Sѱa1 4q;>; ĶL3b]&^"; +1mѹ87*0hÉY6y{j%۲'Y~È5 ip \أncP kMw<֓m@j"/ W2.VWaUv%ݏSIiΑŚE~ץ[ .;^!}A%PI S|vgfh="ѷ~Q  c? @ uKnj|o1ޛ(?7N]w$5 Taz\! D;Y=9"[LôqihTlϗ(@aG jH xdz7csWlD]tv>಼+Ɲ{AA-U![Y&Rk@)Puy?zo7J++ey5-ꏿ[T-t'ZS(Z-U/L3DISNZ"tj*Hy͖; lhQ u'`^3 GEB$1+`5Ng«Zwȵ>C~+=u D2nE<3jGzX1Mc-bԴr0ck`VV5dɧ73rΊ|3{*^Am[琠ϾHNCpv˪@No x`m-[=e@K 7(qNh0#݃oʈȲ&l;+κYʞ{+^y}`y fm~k/Bx$?_A# t0(`hYU37Vw:< z)(j&2RQv oؘ즊d(3ie ڡ&+0CEWf`0oKw˦xK;زC!.$ӯ$.Ãa~1_ h#טn<IU8S).ta9R"%ՖWb-tY׈j| uckB\Y6JL2:_"1=,"zG!Te|~7 !kk;IxGHƏɴLF݋x4*NB2b۞z蒖V?IȼG:ʅط@G 0{w߄=}&%~1o!u&* d&z\CX{>| Z_Y&#.Ck.7c#x8@A &tlL6҈`MHSEZbu͆cH|1*rd(šQ Mw Ӻ|\!a 2R)#,9^-T)\#k=Q)p/K8yZl5^}O9hs*ʺ;.sى\&0+OE99P8}'0tS*in%Ө 8pK)ҷ4'ӢMO.7w8NM|ۡUZw2K8-%lvDc-~z7q2:DSǯE6\^F|^oťvkaIbhF!?Q0S-ۮFbsvfjdMyG_So DxJh$$a8E@{Zr4D``p Ā6߮GC)aRN`c5cuulCY8bnϼGE`UX<)!RvUxކxzO!f/)r-V0 D&) z32~Ʊ8>Q{$[^\Yu=J5K_n Zcj[<>ͱ׀&ZeVSp3o7a; 31"mEr;UOs4Yj}:Bѫr&\5z9NTp;qNj)zq$x7SׅR6i5 HGZPP`(n> 8QfљV#}@ Ɲ%f)JEx pwͦ %M!ڰLԸ$c6v{@$Ce7sOâ| oJ?EtXˣo7~4ۥeș0!TMG8'`TRM/- oIq"2ƸefAtAZg^҅\dzșQ*7wf)[^T.5^]?yjyIz>$9^ > ֜`QeWt.L4(I`uFKe&|MVqհhq@ Yrl}<9B*7y[k4ϗE8t8POjzVZ':ٳWoqbGb$0W`a+MTg|q]lC&=>֚h<%ՊhL'DK2b}'#H]&gEIWznPe+Xa8K 8߅#XUpE@%YK!t鐲 [`;N̹z \ܒ-3Xg 'ѮsAjCN?{i)֓';3X|/"F4Ω]7}o+Xf=0zp\z|$fOZ7]@sj/oN OC4mԑ=[ ~HNacbg/a(/ɗ3pj3(c"/Jy2La3fMxpOgQKhS}BAE\TYH.Aﰚ6 ׼ެx*RRLQRycKV$~ >)*a՝&g>'{Eq& GA%W) !r`h>K<_-XۂxȻwH_EmI^-vߌXu=3qCE'ɫ:YQ+aq׌c'gL_%Jie8pV'/! M}ktACadթ4ҾܙOK"/1G£u}ɋ$Sܖ(72G쵙`J恵a~ɶi-27e GA D9D!AE8Q~*QhE#_Uoހ8dVg΄;q`e|I4~o)xW训Y{/=&G};!S' I҆;Y4žq 'ix#Aw p-sZ {l5EsYs6bUvoEK ]ذFKlפh=;{^gdݲ%:J4I  ]^h %>->ˈh.h8C}qnLDJMx}~Bk0S1mFͩ@pdʺR|e]%iub lA8MPjR^^0S^{= 9~^6OxMs_` jD{;=h^>  nMszBﻘd-wM3:b^ij/0DW{;DmΒ|gM tWbNW`'k `B&,__[cal+ q/O܅ɴX0M *273manb1(#B`(ytUtʇ,cwqec`ęgǖ̙ԛ'9sQ80+dhB10T8rdf`aOMT޳CXaqXFjeW{pk&.qAo@Y 7ds-p %xLܬ<۳rܲZZe.qa3Vd Ʃ=^nN$[ayCyO rbm|j[4#*(-&I/f}+hik=]ꙭԏw$ DY f$z oNqʹ[0~5:i8w6y:y_{T.#u'T( #dyZܢVYT. W{H?lNbHmV!p0yKo!5b:!-)Ƿj,Aah)~8q pĹzZ(Cb} ŀ@?"U)p+"^'ZԧgwRP芸Mui]-M' H<Ѥ9n qDt߈뻋^ГpY0p3V~u pEv$|`4mEuU0r){k8'nZ=KHH F) .0-u3-JQ@8n\!{5ﰼcH27fC x4m6Z7px [ڀT3qS&g[vp̡nc+Ij}*j8ZSRHo:;j|Q{En܀vL&1|3"G[x7E>ypD69ut/o~j[xuԍhA6vrF!/n^&F90ʛ] w_uJ?9` ZMvWeV#/w(0|+g[B,KSCݬdxjffr p]ZU)÷aO;}O+_o7sEV L\Mڍקq4J|uGi2|$nj>dQǒMΟȃ a Z<<OfWEK;#,̺\O&jq՗'Kz [)EPrFaxzz9H4B1:g.ZUFvv86Ϫ2@s0 slԂdWnW!AɜQ F^& sօn AoBLT7jrmwm #[ )QBՑD v!)nKQGQE%D;Wc9>[Rd?HT0<[WmKa6<2psmt*jx18̀5ĬL뺛4BsF!!43x>1"~Г"$|θK;>a[@ޘ_e36N[zX|V cy浣*fK8yȴч#*&c {ңp|p @iαfg< ">-*cΗ:J 5eFY(ďz)IRmz` $!oT~a5"Ï;|+Y'Ħr[Zr`lm#$$,o"<$mu3Wy~iZ䑋Eze|>'y-\Hv_V+ 8$)臚%0_bu4Yϣ\FH"DMu4\ Ш[X=2c6ѓvR9p+Yf&M6ZB@1{,*/twCMX듺% m;Do8uhCZb(QrI=V~q 73 D#9:.rU=y2U?po2 qm.sUێ$b8ozyS$%?&==~*fpsh7!o'bU . T\;xF4=_ZCPf|"q.֧TOWAr$k9i+ft>¶,WxEQ5$Th~|LA~ez= FuQy8!~^GX$Z3#-u";ܸ]@c,>;\=2$/mMnmB_;ݛПiȦ45]0?j6[#ם 9_Jb;wC&p$LTKzaW?}N Zs؊,x;@SCzxXfOT?QSaW~M$]|ֻ ǃ?h|DIkVKF'N@Ӻ0=qqXpIy|Ga`~ΟRQe*ilLDWx=!)ʔ]_%]=gF+;&,M0O*p#]+ f,(4W-yBܮ``xT`K'$՜ f?Y/3Jy4ʪ \Rb3pFS UJ *μ1EZVH03/l ʦUGۢ~Img:ݰ m{ 9΂ e'lӟSOjwt$vZؼ'q)Nk e,[;E֢2ҢOyLk/br ~GZ9z1&[,pǠʟ1| ƫ=%5: t .;Ȫ MĄftsEH +\aU3B&JT|FpJki(cWI d.MÊ^r!6IsydZj>\QD}l8A@->тq1!gcnp$&N:BU};X4xÞOư4 @#+ϊ7k|塢 f5:bb*m*0(Bxf}?aJKsr}»)XA[cLFϺODz0+&s4)0݉< @'jQ>y.c˿Mȣ7pW_6aTWǛq8#O<ȜiUeHTh(R#nbLfy0R0jLm_ͽԉ)~qhDwY UwbK'EzH(<@XMb'ݱnSU))=$6m=6Xl/TVs*vJkBP"0@Dm^X˃}RJ^pLw`DvܯN(iF yi{׮w朙s{s qO.#[)*X|_˛*v = #{due&W~ߢ Y"QĪ'\iN'umT3As oea%shqmyր$tJy[.vxD8̬85ݭ@{)GO$׆BK6Zn.gKy?.=)#\. AH縘 -&G_q:͖V!$R#*cDAWlbDgi{֚ sxя$.m.31lZܥID-٬XTn`ɵtjiJv׳wcs18lboȪi֍؅z9ڧϡ(K ?uu_g¹J/yE2e C6hm"@VuU[8˭4gfV!Vrtp\+~/p#ބ;eZ(<X끀@ wuIVNƵalwl:/䑳kAxao fa,k >Fʅ%D`>+ ὜Zl~ґ` O/F-xf'a2з+6ƶ]3RF @SSdX .NHQmW q}rż:-e.AV 7^$Y(VIKl0 o  %Qŭps Iֽ(Q11kޗ"mY|d ށAݎ~R.[ r /b2^Fxg'qO]!!.hB 䏳&e#'K Z(:1``GqZa|N#+s=zQ&1ݺRejCGV}Vvlv'`u-|?Sx{*4W|s>M ARC?⏍8R\+@t_orfq&j# saCScұgНx'eծg]9ɹg(Szl4H-l3>O0bG <-='@8qsAo-ΪQK>l_4^M[`k^ioe gߚtvE} ?e"܅ TcQh\zVd=lH)jTGt))T >WxH8MAÏ*sVdf-ii^)ZLBvg}t+Ok#5T"t7 "?7vޥ`WKFĉ&~˯Q<C Ov8UJBb+?0rs:LR nS/[z7Et#t˟ j~ۍ5U +x($Ƒ2+KxϨ*9Z]KH""Nk1Swh'b݆M9%h1F>rQ*rlV!"*A [MV%G`A (۠64NӤx*mPN_dH`Xeh' 4P$ċY=]zx͝&䦅3LA\ ߥAd&=жԵ ~q~38e=-$K=$(IH]E܀]p+j [ #xunv[27;ü$-q)Hf%|-}]%k %}ຮ8QC}k6 FH%vIB"hk-x $HlIqm,2ͽലE._kJƑ.Ҫ qw@ϋh׾k̀gA9ԁS6=69&_s(W9;A=b;I8lq% H<R=+l"vX=J52fafsyvwA{nVL93qU&ƪCf'굜 *⪨Ң3fTUy0c𲾃HlPc[lU{Z~(e赭Z SO>mȑǮaq7 m;^I.sOoܜ:zI{FoT0kCbLSkg%8~ r> M_6RvdN~evqMcArr/H`b+5*F@H?/0tgM epnbDC+ee0c7JFW X/~2% hmgmM̓7fLsmv(, d4E5Ό(l&203ptFّTh}(npPP;#'(';&zjܬYW)E4ը]״jדv(@ @^`p 6@ wH(>fo% ֪djt~$c>)uR bC3T{Y#.)<]$"Wpnr0Z_~`lz],X Av<f}\pyJx!4L4Vp \{ MzOxM`@g\ 55OdǕD0izqDE14@c1t `l@ٽQ!ϕԻh4hպ~U6<; h Z0 dn}PP["*FhH}\1 :F8,^zLV3n ?\F:G5_L>JIoV^ǡ ViQfΜ كOզ UuLQBvx^H7ϣ'}h 6'SczY!5yY;2 HGz\f<2<=`dvsLEf5AOa ӎ!eHƖ'+6*NXS >dJOH]yZHU6ChԹNL41dV$LwԴo3HCEV<f/izzv?[[oWm rʼnW%v.׫ݘ}N[;+px2ԹXC5 b鰊/d | eDpCGeAH̄ƅ+F\$*I.:(1mىt#^PX sghc22'ڝc@H]XrmW۔$SDbwK%Y0<" 12f|WEFpB̗N`Sb͹GG fk4-$m B/X 2% ㍧2y߹) yR#9:zTnARQ HZ{FNDâ/ 6JL"Yrj[Z.-N(? 6 hIDHtlPBP0fgXXj|Sxc֔mwm3Tp`^:5|@n_-8ѓ`\VyGU|Ub.G9{UKs>IÔ%TXH3R4,ET!$QxiI?.w&)YEI4oC\۳ox` s$ wׂnBiB+6Zִ[~i7LD9j$4d :.wh#ȋ-om6c}Vu*SL85'oZZ~H_^A;lw6vIH mތ>PidDl;.>4)ZH<Ա2?Jo|bҿ/WÛvyogkXA(XEƧ~\fVi-Id)v^yߩ>RQ3B%0yb }v N¥gV"%F{SM`VIMݕ07Śu͸fV\ [kEkG!vԞQ4o\F@$k Y:Eəo Yɇæ OՂјtYd jP䂧RaT'WFjkoz&`Q](j]BldkyټJK]nS/ujrh hzsϔzuƿl>&}J ;n7=ZjwMM`NrZ{i5k.Ȳwe޸Z; bY0< '$s\ilۛh'qoXկ !q0{=qx79EC<PYHEzO hRU@a1׌]$'@d겵dd<6J ~G /{6RP..J6nb# at|İ-DMX-=<2o^3'QhU?nXT0nG=ihWyz#wc,%aAM,Mߨ*z,bk5Jn<8 Yf&irR\ZIA%Cp^4y uYSY"Y;Ǯ=3ĜCO5vk6ABEͦh\ #'^Rl[޴H:ŒR1* -X邹Ӥ}d5/ TdӚٲV:x$*12h0 aܘӄʃ8 ;2Gs}֞Le fJ7x[1SZf5ly|;.4p3FaR^ȠwN[mϰg!7taC9hu@k{)ѕ3GV\8UX=9+NM݈歰X gcnWwPawz*4ow..-uZGpq/HO*C㡚8m>}=_wk 40Vd$巛_)Vp;x YCG 6h':ᅤ|L " 29ߏ4vN*?ni$fP1JӾChAu)[%ДZr:mWQ2;.)wQL;Cͤ((o~nw,u@s\8 Ax7,% ͘T;GJ!`m $6zM^76)yr1FF:3p2k?nJܛWgIF?FߑDD;sƑ;Ņvãs*t*𲢧2tԕ/'A}p-Ikik]LtA_-R(p9 yvdRE=٥Í6G L"dII!*<k2n_km85VlLLMrdkͿ xYr!x nEG Ëo.A_/bv@_6)Kc<%EQiu|ԃ`*,+:,5;$/ m_>Y (U8D鵞\ >Y:uQ7 J˺xyYKJ̗2zTCZnn*Kϭ)K.qRNҜK: F:rQ,4+PrlfB{'&W0:ppçʄ$LLB>7) QA;^WҶslmWwBF81 OvVcK][:iv>JͷuȫwʯUa;v9 q:o85k~SФye:&roHZ=ȅ>HYHcPy6(VUm6l)\s\h^ek%q.&:hSo: ե[/(u]雾gc3x57yI:.pUG<~Av{>1K۾_1M~פ@\jt)P` "\&6#ngI-8PSm/]!K"Y,q HN.A& $dTpu ҢX>7Yrv1."} }b5hG |ޛ.o Hn7v٣ nN'-M sƼȸ?.9j5Uϳ(P󥀭o8!WM#b Ώf_8CKOpkzeʵZQKl!d߬Ί2ZpvDs-MZ KtNĂ7`'6?WHJ1`_t 2Mp-ҕ@Ij^+ٹ{QFX]n6̂4_X!p%YehMP*ÿphjV{E.`4<3hx5mI:eLNE+` u @37{Via>K?s,A `]vuz{ Kuʏ$Z>nhnNpֻ+~[ 5^k8ɽqR;hSnj$#+yltU &fhM!i jlkvg {VXrf + ^dPG,T# 2o4L8a }#9cDvRYOǁu:Ƴe8\rן [UDSf2ic/ɐ_Yuh0p57Vy ׉b fYFoFk|dGm_f+Y6 t%460ho0 UcZf{ 3`-7#%14d 3 ;@Zlĝkɜ! ΢#O$Ѡ@$Q<{~E#ѠD!> r'l@Zv_Jlحl B (s#f?*m YJw-2D(>lU Kya6QLcL1w}ڝ^݊ g&;pyr;ɏ+1'r~ =ǘYRPJIg|̨h=DtI Sj,lH=s6ɡ`n兡=mZV" ^yUP{ /#J/ Xy'}^8YfOBM]V;4-Ť _?O;$0b#휿n) (V@;nJm8bF\̀Fi(`kīy:mv9IvHa-?j;DA2tyWc s3lǸDBXzO[ɎV /s}tb;<@9ШY!ꩈ́> :mt(y0-)w̗ #4wk~?d__r,?N0mW+ApkǐR_Ǵ+&Nn:"ew𳔮+\a^ oQ\-Msҟ>{`lY˩QQ,Ѥ1{Ds PUC#ayKger*l}A5p;=3a cn޴^>,W/ٳid@8eaRf60.^6=TĞ7!kăv&TCYuXG8zAa_yٹ|YΚ%Fy}k)]%zIdS ET{f- 4˟װX5[ pMAJXz[1Ćw3<["f`:bj=)41ވo!?VU@a.|QUD"ensw4b:xXشGY.@.z.c9ы|_}daSQ$`{Uc@ʣIw8SnF賥ӑhOoz%YtAstpO]ˡbD/ ɻ'fpez]5~n$d G%zLiIN0#Pnۥ7x&l" 7mrD#Vg%{%J@GCjPȁ_F-@"ɬhѰ+Ӏyc7KB-@ĸ "WEj Ra{*`[e}ӭT(@V(W0NS@* 7mOṪn]/w>{' Yq/٪?+ {3vϜn8_dؔEi@7Mmf ɞqQ"9w%vEt&Y]+`Bf!rwOCIA' \|kNb]ܩn s ݅F!nκ*qhMcΐ: 7$o{P$ԘXO'3To՝ಉiCtZء^bC¾a&{n"_HFN!j-yO#uDVw @JVӹ,2ܷ$/RFaBKפKST~(^1_n3m ^+چpp5V@vgMc:Fa"F.53V0Q:Xz pLqU8S_%{5}(6{Y~ U6BysB6:e"Jf0yd100Y "|%L[ uM7C^^:F#NKU1<#~{JKb1`O 2Fp1ޅiFqdH ~|{d4j7n1r49m1Lq1T|t,^,#_][&m``DVW=YLZ~ja HƚȬ4IL/[+ZUU^t%Ve!YLTY3e6"$}W4vSR\}vRJi4I6Zn",SaʤV&џ LI\yBJgt5>R+NG-f@SǮJ$ C&rLIAsT̎߸ YWKyߐl:QIL g*?Ƃ0uky( ^Lɸ7 S3Ggmo5! z fh igc]&geƦ{#VOqN;2qLxF1qhSūj k2]lk 2W ]I+هfe\Iia:E`  N0rGP ?d\3=עZg#5v6;:)g <Ѕ&Vv@!8ϖݮp[+G&wU*RJvU8. hS-+,yTeOJ4-O÷1NZtr.4# G@GB |YlĭeVQ!"cڟaͮM@.yP H}ZP51y Fr,LSx3TRp젅Dԑ)'{nj0FH:xq?EDY˔PJ7Fxbvh+C8(T_)oe.\m|&3M$l]/xt/)STX]X*|+_zS4Sd1&O].e*~cZ dz-S@ {a7tg J PlڀgYXf1m'/%!ɹB̝ВDf#Ė1l= 7m=r5NM"ҟO[2r#=grZB!B\8Z]hp C`c$a禺v7, 񸪀^:[ҘN[P̌a ii*G쯅H@Kۃe"fu$EߖFt3ʏ3AI~Bqٻjm7<]EYRJX Gy18/6,K(JcO?lFzSȐWr։gxℓ lZJ,ZR][~3nߥ%(߈-Y'8Yq6;/ki8%ߤx_n0ikvC٠RnHD0MblYY N0D(Ên7pVQ:iw=PXUHچJQ̺dN{h9wp)D*DG{dZve%c Pa@'ϴ@1xY+]XN<ʿONE*K)I!6yd_μQƀeObIb(wsǷIp\7YB:0Wڎs2rIsl)B_e3,p}VjVe`joكVx/pE󒲳:s4@r^MP4s.>.F-\on6B)#YBnf}jMɥĴW\Pjo:\: +4)aqڪMTh{SW~bR Y6bv4em}3Ti-D{ř WzDзPЍM!z foEk msI5T?:msH7,k7 &<-= w/tʧ׷q#pѿ+gp<<޻) dkIT R̰Y9yrۛ]ˈ[Ÿ.?^wɷ|XXSQ xH I"U1/΅VZ& ue<%1o;У35esOk瓢OM%ŏA^ ݩu ƴ3'y=\fi'쳼SE A @8  bZB094VG!߄g K7ř)R;eaT_fC~oܗ N6Lk_Cӥ؇n y$X@`+74==ePCq_Adtb^VZ;'v<>RL6yqUۺuƴc)y_XDð2zOne.RW"(XJMLS|F]*0@PyO?v`r'oCŌմ+d{}!vF5Y}HrC1LFe1 .W-iQH- $z+hR+$mС4I%|iP=JBuVt8U,Fc Ȗd^T|qpc"cdPQd%$bLLm󢎃 d8H+w\QZ:/L̥*%._0ŸF ԡ!u\`/?&SI:Sγ>y0 QG^"!39˻5j'Io²X$ hLaSWt~CAh򃈃7LkU& tNv,\jPBhϦQ:Gg|+bXz^;5[bj~erP%k-0=JDFIwz긡keԼ܁{XHa"qT_r-bZ 6GVP0 =F~> RpbLjz\楣,(')[yeS-U&up:_Tuͭ,sfR)xPV´Adfh)5I/|>:b*}hUesLNZsl ge Z<*_Oh+ ؟-cwٲ1#pIq$,^V8ȝ, ~&))B$PTD TzM g>n 6#E WèNw t+[2~ʦHC&Yζ17 O{,@Qֲ_;:sGuء6"aCj{#2 WYqr>Ug1'30t^Z}zWo޺P 6\XGdL bϋ(?̀p?Νs _rwvF<ziK|9APWY;M_b,fm21 =򻭭ؽ )4 ~U5vZ!Au7l7 #@fqL7ⴙ89.C[m;*{[-$Ll>In1~Äʹ`_x5& ^@ҞAc%7r},74L鬕 )$ݒYoYTnⱱhPY d3V+Kcg10t UIF*^ZT1gݿ .',d:' "EIy$|LhPND+ ~uQhi/ d'U#:]QL/(U*KqvSVneFC|)|ϓ?pEYh^<50#7s6ЕI[KJ]^:p/˔F  #Mz|.!NhZs<TĿe} ȧ֯wyA[ah xor a,'b t#ִ/<-笓23Cp@sM*q.Frkcn*pQǪHۨia'5m8&U :ʵQkMbv k}("lY),'*oEά4}^E3KA\h) ʜ+ l1Kna58$s|:#\Y "0jP'ЂYE ?nif+U+ 5AEIji%:q$pX/feH4 b7qy;I6;4^ZM=mw^ߧ NT4& =MK{4Fcl\Q6cdr6/[\1zG|Nl课j<;4OZVӧ^Z@J"9=ex97&1Ios VoG"dbZ?~JfFڽNB_L'C%,Y?+e QB(X@HT^|$AJ6 w 9RFA{%6]LFj#,rV@VWbIcOKPȊidw%5,bP &듵D ^`/[gA@Pvj`le#SVZ^a!5t^ QB^6lMl]UDa#SZRT5w|p`dFˋcJ"+{4)x+ޑPDZ#dCQaM*ZXȇM?*DnP~' |}qU8_PpBC)6 -*@:+B$8?PىK\U!-,X2Û:W s'ǽ*$oDq$_o0k ]:X́aPMHIm"p,wQqeYZ+G}E1QFlص|'@JwOhҦ#C.88VPCNf@#(^bsGI(ЖCb>T٘‰:VbtdǓ # Eg(<.(@Q)!m˵S/Jo^k(44蚳]՝:kbj`_H`Py(&z;G4ؕ05g%[(tpwΡNqf .0 Q(Y8#"Y LqJ~C[0.WADᓖIi1]B/ev8՗K-pm 웯sԵS'-sR& 4B9ܙƳ¯M:8 .yDkTI,p@~v~nu9*kF@k;ߡ[bV7}U+MVنR=A B'̸NDLӇ(w7[|6R@X3gziA'/-{_|Ww\~DrO,m;"\C ^>Ӥw{\> 0T+7h:C*`ʇw!SjF= h6 /6}۬X7.].AGJS+uQG32s=Y'hVQ^~$- Ӵ$K"T".øx@XD|6ซE_k{yXUTi Jv:r#@I ƔhGndu|\0Aecz&~Ktf&c$4)N\odsX;ҍrʹPAkX(۲>*/-њJ9#E!ۛ}{ R;twke²B%JK"#8&)B 3Hp$oT0bNJ Fʋf[g JoDXuX}zA f0sNj$p`0KGW0ig)XwDögQ 璮VbW!aL͖ˊjڹڠ=Ly@fI+ЍG*6ësch?eH9Ҁd9peXO's2%'$:5;u%[jL [Oi@"g א=~weGW B/@XK\r҆`x@+jo0šK 桟3j2% wܢXyиLX!!֑ʥ ,xSPPR9%MC;&.hVA7!Ar.GUd,cD(/Y.zF뼈_Wr?%*5vc@5Hp_Q2Y: 9J gqzW2tei]:uQbи"CW"R.#ql}Y0̪ za$1RH@L## k1!画~NjXv/F ,Pg4D֠ygsAC/kU|I:)V2Ŏ_m!0ñU}OQ>\O{87P `dl1QVg5կL4P2ſlCg X h"pP?+2 nͷ5U)bƘ3q۠.W& Bs!n5 *Gگ=ҥN֢+w"NW uǩqֆ.!Cf!uTN (ȿ/oO#H*?v{;+Ҭ([j|(ixM`_*K,b--r ˚bM%s5l$Y@W|R˷!Yz:xh)WmV |)ŋ;`*D+H  {R"u<)| 6}"f*x+.g@L?)~k9M+mK~njط=ZW L' #uWCޢ"ћI~p|SڭuO[NLxyT]3ȴ+ d2ư1Nݝ'I/!M/(",/Xc!cK.4l,/M2wrԙ}i2Cn\\c??{%j.QD]- y 3 L{ kyڽ^x IXjpԌNG5cX˫28d;e;!8ĐpĈu$JH_VYJ  ?P h m^DR-oeXa}tU)Z6+dz-Tn ^B^A଍0.v>X[ىA{ic]>ðS"FLGPߖ\m3-&:}!ʈ[' #(rZ^\Fom0^ ^'R8כpC^sKz_L`'~q9Pž 44Q 5 9"Mv /Ӊ~^ wư0-rv\s40OJ'ZDŽ7l%Ͷ#zVy8f_"6@@Cir!B `?N~sFx\KC#>[cogV8Jոn…<ĝdoT9ZiNk !rog,#Q [=9j4aͷ#O_v#񩧳<}s0lY5Y@؄]B.w7e[wz! hLw#j؇,K&ܻGN%a?^S괫%5 dPrQkzvi’?m 2hwQYtejT}@bdl0nXÁ_݂<NI_VTD}SΡ3 .&[CF۶V#ba:; g4'Z+nf TDNN*3!減w Qa f?ЌJQ? g%"+>ptCDi4 6LzoHEcSYD>U]@.>h+jKBn)ei}{GL-O 7JXnS$ {{<6 m Q"kC=5%hvj ~pqqZ3)Ta;1iM܉?7Owy3b~m![ן\'u'J\#yu6~5#6?](ZN0 K/j;ހ 'ޅ}JmR#ᆚo [}s{jmr C u(PQg3d*)7]NmDJ@.&A5T>Ī0gH#[Tûy\t'@U:tRSLwnK#j 08Rua!`*pdFbItȫrrY ̒uo0 \#^ L#-|m5kDIl` dԆNngGc St[/G2c¹pH9H@5nyH ocv6iΣ' ḛu PQg5fS11WЉP3$qdQ JIDIbűz\B2&&{$j'ڃbΟipBًρZfCʎJޚgD&{].LdkkThE6-<)4#3s>r*`[F4IhwYUJ hw}#F^~' &\tK畕+xHK'x,[1lN^>aS$@[m1 x)*3W&r9(o8W[B3'Gѥ7k@ڣi(?L2ۢzMatbO1%^Cv׭Ʒ9@,-[ 9ޮ 7E%Yct*L_W\c¯ i n͌KF^v;[ϜX''s~,|9tW ="W@<Kp>;fTKjb/YV 6@ s6,it#Ұ)Lkґ嵍'*y=,BZ/j+`8{ j6 `>Šg%@X{M@˓IJ#ALw2%¡A`E$S 5sB'=T:`Ọc[FPwf*OK7M2{ .gdm4B)ZoE=ΨՖa66Ae[ci]OᓠA*D^ꉮbG_EsU]9BG@2&0`ͻH*]EV^cFHH`/ÝX7 uQE0)"*RUܜ}27PēQEo6S<|+dfi]+,33!A̺M-+qqωZm"~>aS[eR^+z".2|/ClRLfQ*ns̿ݍy4 r@ƎW8t0&o7ˎ&&5TX;@-}lgT7.G[l+8`%9BEf" u-װ>S}hca ր؉Ղse 52o*omC7&M֫Ep/?<~c|TV%RӠ ZF{~?Uj8]]òg+*99<<: ͚o*+Q ԕb٢&;i 4HɡiNͤU ᛁ / P}Ŝ,5ϼK-wVtM+tRqzI"} 6 ؜tXl`>~\woĚ[aO3mI*-4%s§ߛ!ZAžo :rCN889GU׊cMTi uUБpPxyDu:>Nk7z! D&Pت58$|'*~ Ua{K? J1- > ܼĉ6cgY"bRX@=w^좲z#:]ۆgD8'F9aQXqCOO<>) N{Jtn}PU\7[ޏ2]fK-V;Y}H}Is@'qaS> Rmٹ*jvŒ֜h2'2aA`&tĉrKPSiԴ@9rpwDm!„{g2Aݘx&3B,ޅtG9VߓUיZWJ~Ma#/^p*@g%O_zo6Yg^O*$Y_kڻ=}~q֗lnjWB j ɐLa)˅Dim>^_FOYf.8T' %w lӇF8Bs~G:P4 Qߗ=,b/&Snݓ]hͭ[K#뜌&0;Au ʺADha9,9JA?h|fRcc_[R3 ڞ—mwԥNJxVY =|. 7h66 -09eG{83ޑh׿$BkG7XP7 HJa̝13 HGgJabPrl+ ǘVyV̶f.vf49*\ HIa Cue}&u6_v? 02 y )_ 3O͹E:\ EJA|T nkޮ =q8Uݺ&X.pAX2E<p cNONqbA`=FYB+NaVp^rWF +:I3~ LPкHyMsO o O^i`94 Cxl+K+U y ڵJS= JFiKaadBvuA9r kd*P17E’٣X7A J"z9YBQ͟ڇ1m׹ xk鐵jϐ :5{%&0r d?(X60Nџ#=r˓e;%]l9cFɠo89 LDNH^#j9Q ^KV5Kw85B&*vJn &{L"oQ SmQڭc#W4yr eҪۙͱbU( 3=wTC $_bP֢Ƴ]RuCzIQ *wP@P5tpX$Z`~}Iеl9ZoIYp<+;!Qp .Zձ| >{RݔDoRv;qUN8iU-jrlUYwjÖ|ۭ SYRAPaۙ,:e`nb݆HEWM#3 `uq]v@H(0=Ky%dDe_2TeUB' Hf 'tJoVyJdd|A c0e(RC! D12XЫg-,u6ZDv`X%ğpl ()ZuU`.|x74$`z~ Vce4F>Ktcd.jxt qI%8i휶'h֖h,gS7C&}3)b(>fG8a-Q9koӊiT-@WN 4u-E,.#) |2b(>KtYd-xͩf'etdwUSAva+x٪S-H|4e1l"yK/}@Gg$vKk6~e&:ͮƫ ~B-R ą*em)bn#e_%~@'gMiܽ} yOoHW;6i:Q)?/zY=&q1ARI(n(f#h9lL%VrO@EGCu ?*NWaL>7o8Peҍ0 YfsswoE7&(OO:֒TܞʛC3Ҫ\9gedgpMH$VmQw%q.d\dJ$ 6!}e<!>|sRҮB.WzMg"&mJTb~ /\%W\QHH68 xf6 $fIn'37#n)t. S sI54MmdMH[n',o̥S1R?YthV.PE醐ggxea)֓u_5LaT,gJӟN+6 מ@]mnbM8u=4zQ ́aT vUVd"X.Ư3.b՜qgsg1c`.S fcߥx͍%h)[I:VәT&;hP ?|ӟB) 01F6UWa_}QeLXHك`ѨΆc ήЃshq; 4tpa?N?pIUn첕bkn1EO:S4Lp :ߚ{,>)$TW(5RC6ziҺ2mof wN(Yn̳O5A6,7NfhݵfhEfVS ]KkN{hP Ҍo~Pץ .KQM&ì*Nmĕa>Dp}D T3jVQ&LQ4en%Aob|Ʒzžݍ?ɚ8_ڻh8]&dwҏ2pjR}kx%9O2Wy/B{`֫|<>) ;(/Cze*Y5+( BdS~_[,6)Z{ gqAt4ۊfA񯍑`v{W1,+2 -bŎ% R=宄$H-vԌjAO}D ]+*J)=&q@=Hjl!Ǜ.!)oTqKן=ve~.Ch '#x ۑZHU0(.m)٥ŭe>X _INʜuwB{\f%.j|j2B .j"(9pL1,sQ_7 >JܵB{1PxΉy T_' f`?nÍko>_^"A`ǰ *Qr_x.ϣɻLĪ2YAoI>dBJT:[( Z=ࡎ>]\:g8E2^mg=*XRbJϿC@OK xcĨAbkiՇ]恳M,*u A.+X K`LZB\YPdYlFyƿG9/e8Y)4F ADρ660| bORkz w?&f CxY#<,(*=B~'xu]!O< oVuLQ|k0բ_ E.Amۑr6UJ2gxXCރW TU1jéELHոl5/%~(R}X1:bw}爕_x+=U$g[YV^Ľ ?퀅\qJP+xi',2 8rl&uJt)5c&wu̇V]g?^H0/&(HsOezg_АuxC#1PU,7XR] "_ǻUuy"7J:Pq]c&D#.۴ڏ!mqxF"|Y\qRu@]θa Y:"B˞xT;90힅2C:V9iHzTs"%^ [e Ussl}>(Ckm5Ѹefi% E.%"(aS{vca3zZՒfD{-p.FġTa[aQQzC#"f 5`/GaH3G^vGn˗{B!%VL,k=?H>hdGAUwFe^qi7Mz= vGB$hM(d~=5qܻCv|K$uśxT;wz %A6j?)Lt?0|P# sG^U$3`]dNrV~yjm_Jg|`YčU>G"nUTLj;{7Ҽ_ r=I!4*F)&(@w|Jc&;?kPv7H\D>rS :_H] R3oJ.+? YL isʨb`ɆR h7y#j:Tp wt-)E`۳$+Hw Uޚm;4'2D Hfbe\׆^9dM \ ^yce,VbAr.yVl@YTQAԬkÌҚ^ a[ciYJ!SQC ܨXr}P}vce2/N1x'y|oO}n´iH+ ܈ABmr.ьk|yY!2WC#f$y]gr>vQX܋GoOPHq7"Dj& KKKB0O#,00h@ fZt=܉Qr,^S,S>^>> [ȎFi/ PjB# &gwWf.UR$t͍$ֱq׷ݞ͂Rfn#v|F;&dhUɄ~[x"Hf B lTyĀ5i.e+s0y?uh^xU%樺}ֶ\ $c>GBl—(BIlauxϥ藫/BHRv~\kJLdVeɿ zu4Z*DMqtLc7[P8VИ?)ηͺ^0B~CW^3fCq]u"#ղ X#E|h~ "l;%8"#1[mjy(e[T+:UYGH%LpQ 6lC *2S*n)D4JyS3xN|8KWBa7"DKa0'm= Eb◳_]- u 6eUmmuOSafè~;DZ)U]4c(EތC"y`/y}I-7XRM?/ہ `,\ި靜dM9*ˇh (d-??ңgdsqOɓOmO5Al.зSE! .g# / A1 d#JL j..#]~3`YˤoĹeZxN\bE e],q!.37?;|(+ o{\Op5K`PyzhVH,DreC: x50Y*X2AT}Ps r@x"9zoR5Y'T_`ބX`鬼/Ŋx ̪~|Vc;[QZz:=eSz7eT"}At9XiulFS܆ +y$g3 ˅z9  ๷4X/^|0, bGC Tz=O>jXCq5]J)Dx%Ia5 =l|kbeHlC nBbuy=h+vPKDQ`ٙb) `_"p[C\`6)̬vn^|'\*;Esjqt:Q1L []hK MOAJc>]V3%>4mSg~ietÕ.yKB]|{ƒD8A:r6}}7)l BǪIy!VgX4^IH Ӫ H!;Oc~zgaWRhBX;Α"5 N%EMl$'_^@='կ (RaOQjM 8YE#NhmJ8` pF-}9"%E&5f¼bOxlIJ+E6vgK+~:nX6\Wk:wh>pr]`s6R2dp؋C0!;J"_7$I{IY*uI{њ5-"kM]7h\&ew*Δ&[%]-ZUҦ.? < sUj^q`I(1XvcSmQKN VI:w.|Gavq/@gޗLyUqIU#9Y"Rn!MEf[PK^֏-} -?l,:S&aEr}KZZ 6-eaЌW{/kk\# fdDO4_W˹ˎtB'ATn-ŒBj#>o)uxZ&kb;ў%Fo)ش?e{%}0 CEB XHHPK۵zVyU+N.y67 KRx^xg 8Iǿ ^^Ñ:hb{hDCA)"7O|>\xc4&Wx@"FȺnYןY(7D)[ ۻu2ó2FN<_ۮ WX'vFu$ž\'&(/ e I-oĶJ5OIjQ^p .Kqq:gJ$(cY,ǖ=:1VJő${ lbR9DK&$ay镀$tA&lVf͖lPSOG^p-N& A?OgEĐ$;H=p{η;_ y\ԑU{r \ɃwmC/}Mʣ!`KK2Zp0M]lB!`^lt_a1h Ft)6 5H&s1dÈZ.F#wHch#M1u-d9-;G8tTjHS:}Ĕ+זtio-n9S)h?k3KY $j#r*9^݊Ҡ.69/&ʘBu!y4eVv]\~-9IM6'S4X4xH6 }YXn5 "FcCYTZ29jjsR :]0g3c%oH%9XӠz|ҪWw~b(PSV\4F^=XwuR/-Pl;%< {^`ʝָPomdȽbuGytvayElRS^MP}Go('c1PoXQG1`unwcfZϛEB-Bmkͅ"?VjU̔a.]ă7ܯux Fp-hM&BX|*Df^:RsS 2;_ Y\\1'Up'dz}):g+'DЈ{ ⱙsdwYGpnZAy"/j,D*P3LZU֟@,#GN޻|a^Ya3%_/ҎE<=`1&Fs:YYAUĦ2ڮHt `p }ɓRB]" IH}<9i>o$wMGL_3|,$ y˻&_] ZEVf8')7d~JJyu$ 7=}d*+[^2o)Ȗ4$W!]1la<~ 4z?b?}[;sIi.^$dGf;ѿZʐ{W3%,p+ a$^?tL-=u"TQ$Iȍ}[J!_t;J> MV4;Ѓt2s؋eFJ݆{;m1Lx] urvQ#Ydİ$咝юarrvgΣtQRtN Fj!}3ؿ}ו08mfS|Q xFج1,`k\G@{3IXՍ'~Zu"o@s%?Ɯ^m"w+;^Ux`*_KT) hfcg8wK{ w:=VȟQGl´=T{ZvƵߜprvĒ{ y<\û7))&Hń[Yͱ!n@ T~V9"n?M1),@0lnf0aS*V®WHs*bDZffw ]ς`z0ylUk;f=`e0$<X9buiΤ_e{r *BVPTvobJGPy4 eq,ęl&:Ց֜ Q1>Gо-7, xһ|M H凕 eQN-PDPբ' *=Y31ָֽ֯CzAr)Jhkϭ;-!ku1?7Doa$GcgJwBIP퐅F}ۯC;F4()OUuƔDZ 6. ]8!B;5b/KTU?t\ ofM~c2oF}WQ&wiYƿRBv, mVH^ÞC %O{띝Xˈ&!W𶥡`+UEe Q/X^w18Щ WĒ҈*0OG\.<Ψ7a~OrcDF Nb~,Ѓ" T_~.f ז2c_9k6{"8Çmxn e0CWW d8ɸu%tx=i}47Sp 8@vkܞG+a_1W:~~0Rݨe{02p<Ӓ Eo6W–)yqStUn}ICϋ b0IF []GY2wUQ9 M|ʫ0L8d@Ƌ(W0Hc'-o\&*{*ŅR5z Nd7쌇6_7>w-:_2nw=Jۓ\vge=s)U5qM!>^H}x&8~5 O SZC@xS7~A\f58ɂc>^'rH~*,jH鄒:pmWǐF ,&+Ĕ8~*lL@q]լ$cI}hZ8 ͕tQ8Sx?$;$zL5c9jݗxa3n5au%%{HlDWZ!la4퓮B\O LefP恋6 %D[WJVAb Iٗ./a7wB t[ZsZَ8VTl߶߁"WNC(#XZwS Pj/hrJ$5N&kφ'Mavo,,wjU7A^k{БE;īYߢ0.x`9dPb҅!pzPǫkSW<`ɤ+ܦYN IQ(ހu xuHY:,~.vt HqJu]E_EF-v{h!ŋzI9lw3$kұ'jծ,T04@*ى %4$(]`7) lл/ZZb&*SH1K˧'U#RaWx&0w!,vt%F ]*ҜMQouPR*f @PTr@<j:%0V.WxL ȊuBmS&#_l?{* 2.!WD_i8iHɦCթN<5B Igʽf 8ʔA6R\&9zF3fߵ}F)y:^Vq ޙN!|pgQGnj/ !KH, ׁ>*{$ݬ9>8Ve7lP0e:gjz. %~EK:mNMikVPCXlN"8Y 3YPlIX)Cn0Q&(~ j%H{rP(4ݢ #>Èw^s.gP7tlv"{v]Ӄc;sk*Μ5sԥ0J0Qӧve$nx#b3r<_)h|]gg'׵ڌTsӋ-Myqsq mmS~s%IR7+,GVTQxIbU>RC4K9o)y5 d0FݶEj5jUtVbK*&h(EU <·`R߁k2Sϭ zz։YjUХ%6!*]4 @(勞u^)B6 9Gw; 񓥈nJx<1~7(4#̀jInwAGi2̛+itDAK Ә z &6i|RF0y G='UfnwtK C<gLt(dC{;w*\p&^L|ac| C"84vv^J##cBT!mW ,ju*3+m{Z ަj"% C ;0TQb.'LrA8J%`ẓjA=?]N>zJF=0=a"ڝֵƕtvJ-/ORBH1狼ń>L-=B!0o@|v3?G{^E|bi6DeHCӉ[]$rbn rB$2;Ie#_1iR,dD:Bβs q8UeBS_w6.iU ՖMhvŧ~yQ9^Y}Fo;ȖdH}J8lA>UG ?\9Q~'nKzEAp>a>6!ͳٲfi]+]0&}Ef0%U;v:bQVi̤L=6󠲙U9x+ɂTH .% IϩQ[x':@SҞƳ_`jdUݻQJz V`P9XDH'ۭLOs g콽f!_y\R`.XWd1 fru|Nldd G=*J}f&ؙ֢?8 ~G>)+9~}dz2W|dceI[P]276$[+ M7Bk"W}85x1Qw+.q{MwoȐ +a1\AlM uglw[ Ƚ>ҡÛ# 'P Y}2Av5Jn升^x{ %Ytov.!?/MČ^&L(6۩q2grp]Y.|KN}M5T|&PLb"QB|LpʆVdص`P W_ Oka' 3aWlM2o-V:خr8RӝYQ4nY'f ]ϊwcK$T^hPQwƤ-gB”4?b*RŠ77vpݴ=nHۂLBaL,} "?5r{U e~B_;oVi䀄 -#ʒ8u%|faq h>ɓ노2&.+^[iN`Ncc4{?hJN)T}5S9Ժ4oCѫat!֦._'d^,0W:H^MPe<>d! $e'}8s&2BR]~iEIיL:MdП0%9`xK1a6BLRD]q)# aڴ3;z\[fI8"CoQ,T2&*XnԉrRH8/ALA|eY B>B|RӦ&e( 5U4|xńIby4:62"q`snpw\1#dO2?+\dǡVAnN ꅿ98d3ѡ2p>G5T$*)) ܜ3d5op3ڔ??>$߭$I욬g~X"TANN;Ȕ#,c@kJ2iWXDpwILj1j@qv``e ?M }$~h&>nJ'QijU*CY-aO\4]W@XE>ft ? <9vuB$6ֿF,mjor4\38d+"§r0e#yfVkʤ_[dj0;_(0G,1i.p?ļTr< +(hibqhuY8m,\<<[99`V`Gâ1 6b0lE`X%6@AZYؠbc"T1D`ҋVf F`Fb.j<ﻝtCrxj+T8|(TTwx^ke*i@#I)Wam:y5G5Ƃ,|￵EXah3R 'S,2 I)oVN(t=`4*ZoN*m/=m>F1fߌ*lS8'a)ո kA!ă.uȞzQ=(8yCǩݺc\ѡ56GL'qQxgxD,',[eȥYY"817Ó\l9ϻaY#GKM%(wO}k{S\mFAǨ6g ?gQ\Tޥda=L>cs܃A87_%&GQ:aԅ֣ɘDiAOc+ Š 4Tzzq=)]`f$C;Ozy]*7}֤FaYpW 5u hFE;;gw7jX9ٛ6*` cc\6Q S(:  F53rzy2`c9b||f)1:&~4ֈ5-+Tn|o[?ڰJϛ)0uSe͍ŲzThi=)[s<;Zb ۍQ`5xK+b Tȓ'=YM-ln@D9up0{.Slz8ҺX"{3k\Sw{>nC09mh΍<vz PYպ n g 1x3b27wOroSJp{E)".ƥ#5I1iGvYbuS/Ɩ&nhd' "!r,YY[ rFhw&Up\[)єk%(ĥ>jm,o2泏2n,@kQL{&)I *\m75E5Z~KtwVkXvZT1DQR(tl9B."/3xD :wHO`DC.p,h&t>aW,;zd̺]Թߢ# octՍ2Iv$n{z{{Aj%L4ܦ+ާ:~YuQs2f$X glXR?ͮ9Re~֨qbŸoivd+ܧRsHs<zC5uV,$6ϓoi5@@qZ/$Tiv$eq LR^=Z*SFL֤wۘ5U3Q+"w̟Ma\n2@^6krmLsz/i5!vʐ3Ҟs- 룴T{^RDH8IJ6Ϳ>NzKO7@ b'zO01[U4T*5o 9=嚁V+%ݸ5/D?22鷫ɷ6&4vR^C!4uAo2Wgq&bNks:]bd赓f}$;*= jԟY) ae Aگ=!X@u9H*g4LN40`! vE ϔ4 Ϊ?70meuHTVa/U%ƞ%"Ʀ͡'q?&sYDѡ^O0(y0uFja@clXF@#*b"U)G4ŵ0B%nt!p iK<̩GU*W,'fTus5ZdJY u*NW,Gѯy^/`wi fj78f<Ʌs?fJEx7Ybyrf2Cٽ<[u2g xC b%6i=qP4 ` #G|6q}ybX1+=o |W tǮJ|v"P3(Gq~a|:-oAxh/$nĨ4,+8,;rbqLXB[2q Q=8 xtu9")N݊%u'0_WybghEFb4LMrWAsfyDGϏC(#"X>"3]4vapzB:\v2^_C$;s`TNolV{ N.э'Pm@pQS^NO>+ *`˛O?Av9<N-s5+kHIDww.B֘W5U)mExpΩMD ȬU Qϟ+ZU gwxwh~^ ?u?E&Do`eatZ;O lztw(01n5X9ev̂"Z]Ue.&鼼Ubp})E񄻎&&oWo.?4}jT&BbN~eXH*{ZĜ4@ŁRv0k⽈5a+hz$wʳuV>o`ߣ!K@e|DJ֦v^Aa&rPZ 7?E.p1~rV-%f6S@Zп>6^KxuhXcF 3h<phaKk|7oT-H&)_VQ7`;m8m*2@\+BVTHe۫6fC?I;OцerF1JRV0<~g "q_!Stf'FXxqo/#T5$޸,>6$S%FlHrU0J:2 +.F5QߥkuxBBkK~]PiV6c̜$%8h>'|ZcO."i6QvԏXX,epxc7x”(r6S7Xg:[PC 1=xH@}Qo7cOrcп"fN嘐/iDvPi."KRZ-Fn.; '2ED]UB7ƲG+I8/" j[Qe&~'px:i"*ԓl浮p ף1 4OP@_Kr]*nd=O.!O܁GGNeI%W!x]3W#Lc*`Ӹ `bkܿ Z[5tkeψJ^3QZMY^woq :條 wm%H" IB4´1*hʕ+nH_ x{o_ ;`+JHŘV_,Y'zFe׋&7ԥA2b1`.w9 v 0PDMVnOn}JHu330M ?/ow%3\=m4[*,apaґq게Y39U)ϻ=U,!0wB{?NAqtcˇ"/d*ث,{(~jDH=5rvS[ G,4uc?ßɌ,]P%.k>{BcVTy.Ǜ?iN%};r<ӧ *z[-xܼǥz-4 xZ+/Wqe=8 z%i3#IR~·Ӊ4=m1 EKu\P̧`f;GQ_U |^4=5P2v^~ܧǚFJy7.hQǘ 1I ·{ݙ(emR̠ڇX3ٙh߁^wMfqY&kzFjo)P㸰%ovSQRk>ۣ[H"ylD̓zt?ӱɃ\J0[讅 s#YE-Uj|Wėu:KZb~/BҌ`xuEz}7}F`GھKA:B;1Cn}\@,fHkJ?y;K \Y׫jF?RuT:ʬoCE>EA~̃ؖh(CI4S dFH/ @$DMzZc#( '-n HƤmm2̄q>_`lM-Vf;F|ڮzVi ׊]pߖH Mύ0|l"V7Y= ;IAV M'');;M+.F&<^Bz5P0W  jOX3qȅav܈+A ʟ`[.cbN%oДu\ȟA0z0-sfmEgt z"3V8ͯ{# 4cZ2۵`;}_;IG!IPn #quھ`%-U%`dN0ള,aJl|N =sqSZK[LX jfdQrH<,r&1? V9/I1wb6z.CIԖҐd )T̅rUSܚ?GӲLB 5LU9 TRwVY3i!)2Ѥ=y Z)z^'e31|R f+Lҁo~64`S4qAcZ?Z>=c vy=-Q03W;JPThk=?D$"ⶽwO3~E+؉4z9}6Rhi28W+qGʤӻ)J`>\l|Ɇuh1p}Hg,RV:j;ryet=(X v3mM 0RѷE,܆-z񞕩|6pf+QI ^_˚!׆8b^o "ఐ?|XOK ȃL^;O49ˇmvݨڑ/ZZt-!FduFq%|0z墫YL)MAL$ۈ~FCW.tB2 )p>HL[W8oV#a.a -9Ef0`A]8G.V5 O.~l:)%y)Q[[5{u\= ,U@^D"vvX 㩰٠H?l~}3C L{ʹY;!Zpf;PݱzȮiIԎv|y'U V.-b5DYtHnNg5Bzxs¦#!(фz\\PB)3;1xXNyC ߥETV>>V$z _uNơ7s rԆ:2^xt Xg'PrYzd'%~w$[6r9s`m Ymo6$^9[labJ(iZIXm!( :N{90;>0J0|7G_h;+L-&cN0{EwmfR6_N@2Fd 3^X=P9 :*=DyIm ubM};7G;^'(ıKb|cD`xX/@Yպu6} ~,@{,6L;u< OL/j,nc;!=u*S v׫ šت9d3f0L_]s~{c Dr7M0m+|d3__ݾ'o|)x͹iG&ӭz `%O&:{m3C`_ݭg_[n : ?;Ñ־% -.x\H'2TWP!3mѾ2AH|QEIF bf|=v҇wX#ԋ~@8;K^bZT,b8r*P; %i 7tŞvI[׵Tk%t>!8 =I`pR|DjF :Sݐ6m4qD &K@,Akϖ37d|a.-X#?s[1zn!է*(>Hۿ:.ZArHTBBܸdm >0 K*~X X1@_8n R.Z :vZNg,1 p@gC{ ܥ.}0iX.< g@9åJzWi _FJ,nVJng{] vrv>[^w2Z6QH&S8S+=7pIaQb0%Ho@ N!e35[XPVqPLGpa2-X{ׇzWXg\ sE: };%7^Z)mr;oT_$WR&?<"`^ĥ-rHfFKjW'{A>6NyH3(^%ekL Yҳ_g d5MDzuTTsi+KKKl]cdpHjYۉ5A?`zoC |+/fgH *_zӬ,U%v`1rUx#l634՝wȞr[ a~O@c'f%]zUVd>ٔ"<6{OGca@bpyƴ7E: q)3 H'o V.@%_bm&` #8QO {]̭2" 0 H-Ր;scS ˜rB[c/)9 JD"\/Kj}-hJʺ2LT-(FT'md6a9[!m H_ЅrWM9 Xi\vFcc3"H ̥)҆FG9ۥ̠L 388̙_f BE8C i{+PE,q"$H6oCD{ ?!P=ڭ *ƔyS_B7,vRKG+ܤW~"r6'<_aPZeFy=UckxxRN5T16nw{IM-b,jQHa(S%\Ze;Va->:qK:W3\Ϗ`ՐR,H͔!QFy^l3՘%9#93dv&t0rHdoX"Fʠ>4h9{/Qe"y/~>"ŮIs@02@Da_;f^~$HLKO6b{s]N%v1i3_x&-?-Bz]pۡe_-!1c*ʱcKxFۛzx%1HYC '4]xJү>g` 5 }WZb56S2<$E(X*]z;QGfʿI3|b%,CNjO`]>!N*Ek!f xY<,ӎT@1.~v5 <0A|"s#d&dy\1D⇽˭W^# }Ŗsl͙T!DT ?"[iv轇. >rM4KbFg[hDۘYej 眑Zݟ]KMNaɽAyrJKvϥFݿ%"-B*'3b69.DAшh|h)da38[CӋ]r(^^H 'أRd[7h9r/A!6LT莗EDϨ=9ņ#@8- IQ?8 3M ~1ݾ LN@ۍ˛V<t`?s)pƴ H %Q\͕:9߈%5,Q뎜YNqgiLjX@KC_ȗB5؂3O%;-\ p2_Ě9K*Rjh)ezYE@ۑGXf}Q 0a )K.(uɕ}g!s^s]YFz/tX}2,{iʙ$Ƞ]}PjTFVXE1mnw"l TeL*\t^FV6ù4.ɃQ0<A]BDF /lE/Ôo[# 6: F)(p3+z P"6`ro>Ek6(`]_퀯*/& gxWd{ ^ W GoLo E__'{ ЛWIK:duD&~'4\zp5KJFHMQQ9>  Zɇw/$W78_ I{Vp*E`Toa;9OKpCJÌr2?9zVM%ȪEHooMވH*ѨxȺK V@iϳ?\%zlaOjB:#A@ ]9*:<&GX] mxEzOg 6?,jnQO=>?#CL+zKRItm@ [p V}B \ @eru/㲅`Im'AiN b (#f0܊b 1fK |xGNQ_,7a{Dl.~Xf|gg҇8P U?^#Ayݛ$}ѱd ]]W_ꌘV3)uF%Jzk9SDhX@dEŔrR06I=WH3IUM1ϩiRv`v:Ns7*۶_:c *f-{|Y ̵U S_+N=_۸} ל$΋9hR< ݍo,2m y.2ѿg4L 2BY}526ddGِ|jsUNF^p1e`yŜ8e`Us~˔on]~|v:fN^ N!}LrJy\mؒD%3gaVT <'g> gmh=xy}֜bT"DI]w.&keÄ]r rAm=Wc-KK[PqJ4ʾ!RN$}Z4DB!e4(9oҌR dž@ܫMOH, =xd |Y+ XO7G. ya6tCTމ٨Ӡ^z^}p/*}b%:u?@t܈:}GNx~'4}jͫ6`'Z8I_&_ o酓eI7AW g_iD0ȮpuxXKaҩ]ps74 G'"o[[9e}Sxm\Jen!-L#xY@,!G*0)r|W/p T֧\2(Ȁʖ;L^O%a%>;VP/,Jb$H4Y:xLgSD*(1}gzMM3 * aAMDgހw]`4^ud)s- s7Vհ pd~aj"£)UHtYukUFkE1m˭ :{010 <'06@BV"gh-ð Vl d) wn9i}v ؆YM> ȋ<2f';څMPFY:sW>(Hh( Mbr3SќY bQwnynR;uN <g-Oݐ_1ِǍ F-0[7!%@/DȵpEaN ޭnnB+[n|Iem\w}3j¿lsz T׃K}D/X!:& Rz8l`lLaJQ̀Go.KipSQoy!=25ԛrq^zq?FV3/ m aqղ/K쟍jAvOlo^Zm^$f4k~ ?%Eː_ 3t>)F ^Q1 LH*pyqt& f޼ais麗ItlZҷ֡"3g3'u˟e-SI!/;K ;Y-wy"?"˶1,JǤ%(tqOw:p] RPG?D=#kB~XBB(!kDGEjLW"2T* R{!J-13 >F}=ij:}'"5 `  ASP?/^A"݄w7j $^#TV-mIҴI3BJz^_ٛ/ܶ;oX~XmV842EdξVvN GC]Q…\j8A+7= ?\MT?ڇT-M|&6DsA~Э;3&F5"_A!L,X=TS`?0PIq^9esz=]7k]9,P@]z^V,W_+70/w1i; vC-|6V49oY IprqCق[e7Y'PdpL{;zn:iSmS>;[H @ [d1SveL(rF5G`u}2nkе#K«muh@pN0Glfas8J FNHUb6"tKUgUd7DMFټE^( Čw-k+<n*E^n(2,-Glk{Ai;'7,mHJ +󁲿 >xHy3 S~G&-vBQ):!QSgY8?YJf:p]GY3DD~QV;#JåQ/9d A<9|o;h穴AE:F<@J6T 8Rc "7ߓ0G"ήG$;Ev!E#+yb59$vy;ڣ'~}a^Z-WoXw&l>G]|I#ʈ^wGG$wu>Z"&V) [/Q1З ߗ.x-vBRC(4"Ü+xM0e? I"q80JĈOM#CHm ~rU$?U62wRvg ? p>aB$= FP F$ Koa7y .Od<?%6jk>oqv5H_@ ws%Rg 3|ܤRܪoG><18%5qDET@|:7*v.+H }ݵ4'G>IU{sf]B;wv׻eG#gyWbל8X3w9 ?ֈ6v-]7!^?w3/Lr .Un&}^|c:, k0pQU~J4B*cGaIr SA-{ hdkכoOYLlOh|7΅ѭ YӻNV8i(!;1jd/40Kj<Ӣic*r+iuM澍riЫPţn=Rv!ԬM+X B'[r5ݳJcJiq(V*#чQaz? C=a _:aT^Ij*֠x\Y02Eo X6:㻱DVhiOяWql%^{R,s8'V+V7 ^Q(gK;=:cj[^odJo:0͊Q% 0}4RQJPER=.t–q5A &PVsKb T0S?+#nlRG]FC'nzܛ6\J4ˀ4a4ӞwIWYϸ1']k(JAdEG 7>#yCaLLH?T{~'#ȏu7"iF')ydz]F =!q)}\Ks,%evțs^?J@׎S8\؀=(c[_{rmQpP ~لYGtٸiܸ Cf">+XG4m {Fz oo<⾖>.&z`7ZK[PV_ol(:R䨯˟ aep>*,NT9Ŭ(3>;񻋉]5@L}C aM5)L_γ]/y `qu9o_I;U g ,u^Qϊ$pIQ L!D͹B9(h"#!Yp'w 0DKBɚ7 l6a@rwg(-*N_%; FR 4_-hZ)v ۄSjcd+pVy!=iQj]g=`8˛#`*%i^÷8Ʃ<ޮi7G#(PNiOϠiE>0-1 :X5nMQu.B fk'h́ܵ%_&+GR|jXFSg|Wg]}M0T$29Q7Ḧ@y`s̺28Ν#/ "kaCx={#^# o8#!m~l|=(q)(HdF[ʸd'FpZ}gk^g],K±wnEw\wy ~/`6T 2µ`ƦA|uMNb*kҚqe*Z}^(RGjMsBp亹[ߠ+~p[{R-Txf›ώLG!eOݝ #R/կ6w,U$1ÿt&>:6-[𜗒!dd ׈ʊa fV׽`טtrs)7P6(!ob2̔%qfT̝@nƞ\$+QV&{t@ ?Jz)̞g8K 1[ZXA@ œ6?8CuLHi ~gŝs.B_tBE_;l &p GyU<8D͙@*W wP$B-8Q CG[. neHrhM~ȓ_!aEm|v $1\Vj%pE \ %/OE_c5YA.- Q xp/h|I/MC>?HE`mT.^'u&*A Q⑰G;$U?^IC=e+dM M]0􆿦Ub)>[* `a\mf;aEX["aana Ș:1sIa2o#\o$./b 1˰x< R9Fq5tΨnq+-UL9 %AuC6V̠U(t-L -{oƷ5H[!gO>md+O h2u* ^sr+*8;0D׈i3-~=|R }| ӷ`>ǟ(e]zA{0͂9J"D7',J9tͲ")Q&0[epn'9dVY!Sa'ePzT kv!0yus [Y@,cj5}+`-*hЬJ;wwu6 Ń-sha}`F$ V0GmHGgQ?)4r#Zk I-jd'ʉOzV3jvF xRtϟ"Ȍa!=@ϑ4 å:Qi 7 4/U؄R* &'JՊ%?P0s=@@Ct nuio+dZ[FI,p@ dWuk8 F^%|4&Q4: {e5@(IٛpDcRRP{iV@^GrU4p͕7I@kNg6֩F[ޖ!{NJ'_;/2cH)עClVh2 HC*)9>R8t:<&;zu &o1P8ޭ(ƒuT},{_ mG,b#Xw(\c D~b@AŽm }ED=[;r nTĠ)j$S*]0gնaKCSFnΉ%di(H :\oιS` ,IyCGxNHVtֽ+|p'T4DlK2hW0( 'ޓ·@c״tKF*2&G_Sn89qO$I:fyiJaxGlq̏S2b m D7-%0uv +`rpdާ- M-V2"TO&2u킵q`BZꕢuW {xXHا_֣?#ȝs JcaJյ#z1]e!=鹘t5kq?8jhq35F{= Z;7i@'R/Vw̅X'Br$ΒPԲ2'FXR<>-~ZIJ.t 4`1hIҙX@_ [wrRV@ hr~4ߦ[iʼnP晋ft:oPz.!EIn ʑ6}nk":^OTW/Z_h|]h2\\lݽ(tx6!T(@==PKtER@L>`i\h5Y3bz3 Z:z"6~矷d楼EڡfeCݓF ҡgUrĻ0h85i ]FVG`;q55/iaq'}ġ-oJD#s_2cijun&p, _IfѰ6;0}//%y/O_O$!7d6dPd+4}[K/Xے+k磇az)n}@P_SQ~ǒrkLѷdд$xoo%is{z-*QG PQ^MsJn7*8]qm?8SUIztx;[JPD`YT갅k凂VD :^@M+h0AyV&0>Fi PLg^U `TvXXNTyh,=0T1hFԉuفt Au`*)A9qie(豵FEO!JpϡP@k1o-wm=Q(:r0 %߁05}RA "Kgkv'暄:cu2Wd%,"̩V{iE @.]A ^؅ "'M[J+ZHM^40oަ ?I}nR+B .׳a*y<:^ !~{.Go{H^A?'բVt^T"38aa<[Y |q\ɅḰD*B>Ӡ!&vAzxFձT#s h.B/%o xuE ɦҕP.^0+)ɜAxo2p83Sv9gL!X=/MƟ1\hq9 h$\; ZCbl.* 6SX??hwM4(Y)@!fFnx$,!la3L]&Ǚq \oJRrĨhPTF pXyG/@䉜H@rȫZe- !*+lo>,4mOU͜&RhLa£wUnrEQ%1r!8 ~%\Y[x}^m:Ood-Is ~+3;őX}󌖝)xϧUtN}|;kmpTͻ2aD!>s >7iMvR{x-YdOrӗ{b\lۨӰ/^286 (皴-&Χ$%*lw/W'a#SV 2Gz2V;$l< ~,T|ܻޕ}t")'E')*B*UjRL.Z8B>u@ 4#SrZL9EWhUG{/DY|*(,ФiRoιz, '4)DcRX[S&Iy#kʘ<LxKPDh2S+ˆ6 Du!WyLdS{YK?%}[2bnsr)WWC6 x+T'OoS}qs7Vr4}z~l&qsW6SgLe]~XUevr Ά+c,hyf%{VÀ8&^VTVӔY1ccPI58vZi$:FMKPj&R.R[41B'!r!Dѫ+SMWxFH%K`@Qb $ 9zjjcI,Y'vy6;4[o@["] ?v8FVbUֈSm:v>mЉcaêUBDv9]d"eoSK]ē fw]voGNd,( KoܧnN2lX͠-ѧ23}:,٤jPAMڝ[C6LkJ[dctv [vWQV i734%]le*=gSrG:TzO)NΦ Z_ UY8b f^Q=NR}`Pe HkX\kAܬ)O| D0)sנ yMH*?XPW eǽ 5E\8|tDžM1lM=`Fpȹ Ⱦߓ6ⵆ}6)+LC4wB@ԏ"|v)$MarM:<Iy~#MhA-JFh4+"By{ϥ` Wŭo<"L geb6%|T2faH<\j@u/Q:G[hnoikš﹫ϊD(, M< 9AK~H ECHS&ePcIad!)0ڎu_ T;FQ2VD' q%?92nTQSa`jYdo9ȡНZm-3ɱdjШJC`A5u{Ē>Ye8ͯtϲ:m'[5=͊SPf!_:QFt?ߝ+}Yv*Ö,DHZ|ξ&zpuKd)vxS\Ηه*®-|Wz.OZzJଷN)Z7`LVK0 -*:Mb/B7-05`rQ/ż&b-5Q$-4}*~'"{gbH4%۵ ⣓ H,q3`0":7g_:( ƶť*vLSPW͖1ڛ'ho,̄.Y2 @]ʯ{:4 ;}p}:N^L=䜵U^)ɶkjzm Dw-z}~55tK(OqdcD=Q*Bcã1SBz=%A7v~r,<"PHl}U>Su^a͠!wv (5錱d, uuDuܤ }dp_eFA %?5yiN=f^vΞx/-ҽ|jjY,J/ӬCZ,h&3yڜAH*ƫS#&RzP 9ؘ'hPD1X9|]zou'Q;{Bkg_kTxVI@GvUOij,.d1F7N~덣\ +L` #xQVdfx^-J5fr 7)}G^}NT33Fy3hmU!76xUpVIf *?W.`(]h]e0p(۪iܗsH7|HbSX5YV([mv5U&3[ S5P8tq"&̨P'R#s:=aT*ۄt+U--HO}䛔w j/ <ԂdX'].V(AOskY0+ UeY@!vi%~ As(e%ci/+ yE*iY /liKj*Dxd l@zԓ I<#+XeMf|xacT.p{,~b9:?-X#~jouePӰ0J:qI"!qbD?&T ;m,sߥBqIaNxg=Az9ʈ?z8,}pZF_ǥѼv_"_0<Ku}vtjL&tY1-LZ?+ZȬ$՝k3[KfGJήv%&P QoO8b}ANw)%gys|IX{AwoZ2{y.1-ss9uPAi\ QYIXn#77r _ +4;SR3U.0CJr%_J x~N2yT҂Q7)V31Ŕ7v;vb 5RbƢ{ϻ Ϝ}#yaFy0&B-hԄfݭv.ṔlHW_4togmmKM*zXOxDUH ,וDLZ)!Kh^f0YcgqBp'ĘՙΏFv d0OX8tR޾mS/*9gOjX'V ȜLNGꧪ]y5rt/dbqMlӺkҾ7lY<}g+M|!sur9%H{Zsq]O@#g?Z2$@Cku%zePtD AT}R,FĞ #]J?B SLa?krv%*9~O#x(@r ]n8BIV|!+Hw Ti;,2氺~bsA %V+r9{HIo <]xi u,X끋3t 0{8[ e3ǎt@/#K/"Vd+GEWhe*Rô)\NfeZ$P( &i]}鈍%\_5AE(LARPN0_vLvZAvK1G>3oe|Fʥy7˸PBX臵w +)rc]9c**g/ɫp9eYK< c?h.o!biǥ>X8:Z%TPO;IK TwevCBO4C㧵^[ۍB^<GHHzyC/ҷڴ@@Nά)<3ׂHrڛ=tJ<sQڪl/1a|sTOy_@N,eZv ݇lIq}fm6YrM<\`?(nVF&? ^ƛ׳JhO&r  *,yrЬy١|gՑȑ[I`T3¶?< V`v| xqF:)jīҫhQaUh)Ȭ~oYbO W.q"lڤo 9n %|k%30IM9׹)݁&/QH)Z^{0ui[|F@2°뺸'f v 53H7YIm&^5]5T\ ܿOFۏ7f[ ^ŋEƕuZu6/J[ YE 7բZ@$4"Sopd>kkSL;#߂.)ܦȦVMzI]P'o1568E xB7$Ҹ´i"4Es!Q`#UM3x~1nG\Poo],gzm!кhyץ2jw #%_.`&b+tMi\ 9%=b QI@;$Edܽ{}$`ǂMj`_9NfDtA0}1_PˤζO,J$~u;FԻQc,ېR M7}VGX(UQeL( /:v%2!MWI-#2S"NJRI4,d0"=RRw\Ԫ:xt^X8GGJfP]Trv>ÔYIJ 'Y/o+cy(J|kSvy 񢿫nBl A)OaEBZ㹉A^G[Y' ?.ddE &CbK襨V4e"Eb9=@2zc̕R>]J ٘f]X*_2Zg'Wfm?,]q_xe!|L ~vKg(Ig7fJL2ٝ|^ 9]% cԋKۧ+p<~OQCӊ%_m8N cPfUxudX 3&12c@߬O=<[*'{*N!OLL?I/4'Zm=,<`C\/(nsJK>ܗur{ ɐ$[ HF=$zOq"pHF@Mꔴ-"b\1M NrԦqVuCve 0߂{w~f#BjB?A׳iƭѽ9/vyWȭ ; T@jRwW}"+ccb"8z u'_xvrD.Kp2 'A{19RH=%|ca05=;/z%"Yl̜SU?d_qnF;͚&N5 oVxu4m&); W'4{{FAe2ni|X:>0{=F$I*&Np{-JpH9}&o6\6jQdtt |oXLAQ6?ڃW)*W5GPKNS|N+ ,@x=ZEӨ__wRTc ah:_<(g" ]CƓE_\}(@QUT?Bp'lAU-}V5*[ 6P+FLx tw{r)6Uuf?4b2ޤ틤o?X7r n Rӏ[_^[aeBUq(Nu&u:0BP5-&_] \io 쐈Ij|ߞ젂zё !hTB^R]Gճ՞K٘U& NR,{+.m#q<^X|g#wd.M0?K~C^wU2 ۟ysuU5Mn iּ=ϕ݆70@*NQ=qgB:]HEfiey_\F"Ų~ZSg~LD+,cO"8d͌_5HJ䙴*0XN=4^{Gg]jIu#SȞjgT$ pPL'8SOJ%|t i^B! $f3wDfޅ2Vu=+>:#(Pc'+R` \ƚg*v㉦ՓhZq-ަe䢼T.1q0~luEgQA +?!)9WJ:,V䩐pC >FOtBl^*moTAi`(R(p7H2(`wsD~7sV9Z`](%--Bc= uQEV+?V;/U!Qlm{OX|-,D2''JzR=T7g%"n^OrCUqX3; !G,&h)U.#y!~Q`i-K@xW&yJdЊ0GtɱcFe2KT^idӰQafoXjw)L*8yQq ^[8#G+*$cwB~kǕ:/)WNZ[]ɉ{%m8UjX,1O 0@zEhY-H ^ 10 3gK1{ÕYB=7('"NXw,$wLbyKD}Hy A؉J -Z^BFUL*bczLgZI3S?1<"p3߾M+~NjJ?=tN^gjvj̤JH7ff$K/ʺTwrQ{lh@'u3Pw70F1:E/uac] AU|ơ s3~;g|XFqWhSW#33GO;c/']r\fYQ{Fh!V=NBXas7=e==7˴|N%9֘u9z6cY&b#_{֠^5&aLvo[P湑;C"NQjXBp-߽GoEoڄm7t.(Ezkb`-y/jEbOIݹQTL֥X󊈖[/I<+wG-s:dYG IWkՀM_ #^I,@M6FoD/~|w61ơD}<:5fN34)Ѳv Ba-'F"oSRBZF ?#y}PF]rW-mWV>V*&ȇM4Af]f@k̅͌v7WygEDIl4k=ޥab݌ 5a_CB=i/-O0ߙO!Ubi'\k+X,Ylw!5}n(/!] A<6Fjl`jw-jL}v-U)J.v -bͺQJ|U_ױ6/%A~\-WWhgτB%,X7h0Ҋq@wPͥnK1.bIaRd)DG}$[kT:6usX2FMx-#pc XdvehGCElȇX3Ty;ZҢ@>F?NM ˍd),i ¥(jf2`*扅Qɝ;5#Ϳd,^ٿoO~|^MviFr T 礮ziKk49?+scw7FxVC\h<׃Lq\3q iFrZ"¬  y1usM2J#T-޿‰CADRZq,*6)X[q;. 17T; Gl9T3mzB-s4 ߽w<۝ xx̸rfYZ%bAº?D2Ȯw4&wagW&eXWty `^='o'p;N6qv(]T! HAߊoߏA.lսs_Iύ:9#G?4cú'ﻭ)MxXuE?R-hJ- (_I7mc(R杁x+rpY \_+#FJs?O]ڢY@ш7W5cdQ 9/|XQ:zs@)/o}LOL١zʠb]EK]ЎsGoC)N_c#ImkCll@5_: H ʪ6nY p9)w:pFkdv1^.#Âo$#.<8_׼0Q^IzQߛ9\Α@ W?xK0. 0j4H-~S*.)o]E.ɳ1̷r6P~!͈09zGQ>UW>W384#MD N:H$<}Noz8Ke#~zu @pOe:T%pЦDPE]ժRo`{>roɎlefcD,ML:|_=t+k?=(D[U6-Oן-mV&$5a\//X>8M%g+UDoRi_ v~3lNRXi (C91a+tW5N83j';l?YS\&;7%N;(}WoSz9ڦ+P.6X/aog6#]:!OUc6Zs yɴlaS1^j@gj,wӥwqoe-lme~f*J݀85"''3ȼy:Э{2CL*~ӚZW ^H)]H|먪rMuO&2ڨFra]{.w12׷|E>*܍wy^еG^dn9 җ t675Lzt6b6MPy|e$__|,L IC?tL @Fr0+eZJFGR#ǖ޼/Lj'H ķL0Sv !2R<0[+ ƺ\Τ[\SFurif W㪂wI8-2P;b^ @0MERD[)Ug在qjuo.øg۩*<ʊx/xQ<_QEcp$?ž3Oe!=bl#CDʻT"_n:* _R-~х6t߽ (ge/Ƀ.r} \b.ahha볺Z m=DgO]\h*8xP\=+E![*GJ=~WV\1 _QMn@!aA~Uw#K Ik%3溬?ys81۠v4=GVAv>B}Wd~y|42ܟ.y@^t4?38ō&Ȇ h`I,u/Vf4,x'LA_Nz6I| 7g_AȎH;q2UiYDz*::C#A[L\5CZE'NPǢsHxP°NDTi|eOҨs~-%aC Uc+<2/tFL:Yhff#xgXE%Mp8s:~}:͜$qGw nPGYBwbC~?jS&h㝄RSr}{AtRQ|)5 9)4G<ڃ/v%jȆP*>T:}5) ipL&{}I~ʋ{pN0Ob4% ƳA8M{wsUKآ8D![stTlG6fLTCnEN""zJ }'-}o3䴏,yyTaВ.Z:yD7j@Cct.7uԶW.WykN^;/K-c+n*$h:z YIZpZ̀>Oi@qEnOVTQթ~qv V3*"lHC[-,*?[\sorltheN9;q$sG|Fk$p("d+Ml HENf&%D Up%dH BPڃ5wdnKfۉeP8gj΀GX G릸EI:~|W]gXl~T 7Dە)K[q\JtȐJ:=؃MA̧BYM _kψUYhO jAȶJr86cKe!?fM)u$wb)IS_u'+ȑҶ(UN #I0P-kcqxZ漙LN6-$Q}Gˠ"8> +dOWrSᗵD'RZEK=2J|H]&uGiw(܊GGȴe'䐰9^SvF+Ǒr_"Sw( Ɇaڄ d(i<CGD%lbv$f)`G(@]ɺyOpSN%gsM40^7}lTqysG`6&|>ͥCÂrgwMe50]3@b-GjYRENp#]mŅWDQl.j|&تArX(s'^Z{<6X]PХkt]c7Cޢ.qDv o{N$f*Mn8mڈs4lE#mh'SlHc~fn<ŶytNZ) 0L+%G֨쀺IuFO9MO{FPhSF^;6ɍ`}Ə^;y 1ObknȞ)1ڹow D( }z1aE.X'1V:9]2?=. ˝D6ra!]tWl@yՉg-5) pOm;0Ń]"~ʖf+.ez 8fYHfb.h7Ii]+Us=l#{r,c$eh(O%HCwrȸ%I4QU\hHeC94L5rQW'%@t&<N3,䆋}ɿo PjF;%HЉq1;Q=m-HMSHFQ˭Qbi-#Ūf[5^ G)@:R쐒h ]4"h_S<ҍ%F |/&׮`xes]zP?/ fXZ$l/jqpP5EKyG3P|AT!:D^]MgOA9C } U=tU-Rm_:բyhh&NjNDCzBvys bTW/?7qe42u;?g%)Zؾanzظ-9̱!Jrp<[/EhӔֈ˭+^0ՊQ$71M 8_c^G e^;cN-__`i3u}e*\= ]`Bb WЬP;a&)'tV*ey#+ 4;9C/7Qoۜyh1w?5QP V1Y"}7>S:¦I$Sf{]BWcZKIC}KgN<#8TRHB}p,R'6e qx%iܐG&/ r 1Av' k˪'k 윇ӛIޛI$0Ӹfk%x1dr:VR)HPD,Xz#SmNS/ lWD\XxX e,7J28n ;U2T'3ԡY@:Z+~ %)}'B~\+5'W}<>yg|W%MQ^,,I=Md!wtu!EVcbvkޅ'b$%[YH1b V+~ yc`WIrc}p14V@}T7 MlI:IEM@ȷzԬ ΀,ECY{Y{zxi FyLd)Y8՘ X\La\h|pFN*Z_"KE8'3EVJXbOFYrI܀W[(4+B{`8mTOΠHe܁gV~õwu74,j'l%4]aʹ3p+axƆ^"օ[0:wRI,7 / S q~P.h),͙zmץ" / SO0nJc~W6Ls%Ơ 8CSA$nj6p9)~ }U\ʇwoR2 7kK"kzo,#ŷs)cL cp 4z9;汯)6J10GnAA?]_jh$й5hBQ!}S1M*-#iv(Ӌ̓;`kHp%Mo{ _Ya'@(OxnlSL;"7tj-L~3 w;$+d}InN" ,.WD(M rh-(dz@4b΂&s[jLdLϏƘBu :hBS0~ ]t#5U#mobGGvM bJdtZM+UEr|^~lZʂ\ўHU9ʬbFI&[ }q#()z[h_Pp"BA-lRF!a`Kҳv28\8ih7<[{ꪩwU5N@iFWv$/w>ҷo vl 7d! {9A~=kEJ Q:=FlgŇ< "):@%[V/ ޔ;pMꚚe>pm&Y nGUnő&S"‡28Jq,הlF H?im '7* :zcPx "5DLU/6D hiݠ-->m!J\ ц}z[|f=pt c+i4vRBEP'0p)OnԌm,[L#|q''$䢬d'< 4" Z~H?s+5,|kF:>Ŧǜnkji L{hѦ1{l)A/bP@e+9eR.qxJX͢c CW*2[>`A(05N~v;*6<6Ah B8V/! ;]Wjbʊa%s\J̰& l0oze ^( f@ZyiCItpP mjM7<9N%*j7mSV;Y ɤn*Ȏڽr$w'[ngUP.} No44‡7o\P&, D`lOUh zx2cK.tpY*e}EQ$VK"1QEF!oĽID8:uIfoh& 7: 9edz -ѓKRH%9;&5Rj`6 & Rdtkֳ_Se*1:uəN*>guIhzel7l;`i|@X+r*ʀ)V͞W \qrq)̀3BHD5w\j[ b}䉮 r[:_w7m"4wڟ3s9Xh_f.-!4lNR§e㿹usȖ0Po A8j/h<{\ļ8D.GKZsBBj?n%Uy/nw54ّ=71ف6dM@ExbNXJB!Bu CjPkӕKפgFVGRӐ0%YgB">!}B‹LJ? T4?M?<`,acjX Ȫ"+~^ ހ0 t$Ȇ<°C`I#,?H+- lA{Hre1U&I}$tlbg R>1 ] H'3,Ca|Ss*? ]2pq_ȨN^cG4zG5qNTq.Fp|)i\تCY+%ɬ\~+R5 :%pz =;f+ U`6v`#7+b?.[3힫}fVMn~0n)3/Ot|wrgP;XK"Dj^ ŕ[X9/ҳ|j_J^ GkJaWjX d{\CAȸ>%kDBFV:˴oZ8N쌝ht,c.%8Zf><ܢBBW`x9(,I0)mr04Ȱlj(ȷ(!ӝ,u yvT9+pIoIz?0zM-J*XI&{Ga&S7 򁛔6׋%oo灧Uq ԫX[Rl70싵hV*㝻lȘqkتUPӚ!qEnilfɣfGsLqd=;y.M洧5'Pzj{ XH͢$אт%tMʱoŠ26( n%\x&)=SuOղ~UGOh=kgERvoЃ0U3353!P]=v%V_^lIЭ`dޗ0͏[_ꏞϓj{ x q, [=9"@6@z8= q_;nQ.rQ9t'8@|k{>cvJʕŞ+"_8 * ka=xjH[>;7!K .k@{ z: ƇQ6RIBR\;) ht76 #[e [xpyhFH 2!gegTk[!ED}1;>ށlZ=$W{2'8=GƻJk^v:b44Jb$wꌖ'"gD\mOk5!8{}hDqŘD\mIOjܓLY88˛Z}.y_Hgnp w&aLF稇Sۑ':`.f:ƌ"2C#Ri; "OPf2GBhk[%RIB4R3u\Ni#"82=X[F՞ɢ/1 ![GXSE‡T: Q.&kAOk`J,1X"VN_|+2jUS}-6k->'?tqJpY*&ݟIW+vzJ| x#IQz N϶ 7{#CP Tqs/2Fbnznk -cB0{S_AM&Ջ0-다#ӻc|ijLv/ cU*"_Mц|>MtSs$ @J 篢@p 천7 w(HoK":he(. C~nz7DžȪPs$;˚sĞ*,: zW^MË ^;|6V}]*k&I6ļ_ϼ ,14~(nF@s6SIl9)%:Y,be#93!MvM5W ^K*gq⍱9\C }1qm+~(ƣhN=0E}\V㬎-]#Z5/dF<1)eK~;z_6u|̦̓DP:&$Q Gzo^ z;͖-{F<\ 2-hnApWN,2;b7L϶t [H7;Y5 !A>*;"7SW1J׻>1o197 xCK%8Krcܯz: e;N sנRxȐPߦm5qUr4tpu$9Ζndz{0+|9pgVؚDQ3FoVdi! %g<'KhKGtujǟG~QHoq;G-mxDZ"oUB;x'Q\J3E&&auӥ5:EF{ȱ^Зn ==(Ħ\l?Gs1D1,=HŐGϻT݆y!UKLΜURg]>>!\&8A)ka(0o} ɿdɱo@;v9咐"V( ~BMu`->e&;w,U},Osc%e"'F &?!)0j_tµeFz{.Mq%a~(A#䵪 > eJt9'2/T~_I* uiho p5k:[~WĿCE> ;kaD?&<l%Hdj),p&XRУ<\`z &lF1[-E9 z=PPRsBk.Po69Sª PeoTj'\d;-[)04y*Ҁ[!-IR<"v+o碅(׈itu~rs''Ǥ.lb)ƴg~PT?v$-LUXW+yW_I6&(ۏ_-nX}ĤEH/YN{`5-RTo?n2~8-孇!6YFwi ;,½ȴ Ed4RSZznּtLE=4`%Oe/hDܠ޿p(7^:ڿ$ µY<@%f""6Η-ϡTs8lEF4Jaf$oD~s&!t~\s0dҩByT=sF2,&!/XNwlXzjr\IP:77Wc03VݹEu//IOjĆ!Qgxޮ];:2cD:ݭ)C=[R̵J. bÁ{e4ho:ܻ82,Mҍ~@. ^/}^neA+sW\zJ3r5( ;⮿*hwLQ#]S!.fޢT"nx^$Z<+l[TSk=w kZwzXKB>j~e!L΍ioN@V6U$Ls9iv%a?P+`#a(eJT ݷVoi|QKݹ])02 !RSն4*=1bh3I*Yxwʮ7q#%Ƃw5i=R8g,сcu\Ym^K4Sy4 \c\m6]vGÇ k6 3QGF}蓼Z"q_·!h ^>6.ZÍC wl -Kn9EEsEUU]jzㅛGSYMT@d!K\rP![*jKBW7Q&O7;lXaM@)*/͎L`v>%׽@ܰܗJ@ s7xeYFSmuZ]LN 7OƱNdۛHfp?@U.8ꥭGͭvD.%VM'aUT,~L3hyOxp;چV S8sZsL!&9ٽ&Mb.gFU{kx{o7Mj压?ԅyIPpR8׈qGfӫ31m];2ħ 'Al+hb^^:9jeru} h@a Fr;P1Qځ>c|D0wMqZu@gGxIG ^;ixc\ fU#/yCƔ6҇]zҫ6wqag0gDӴ-M\>&hNBP;qF'PzL3#%`"xi=&[ _Y]$DbA!ж{C\}RkA02N !_I&M'C؈DP!|8*!!{tQXST&"#v] îk>ˇݲJcS\UH[* Lm,ߩH*,x IETRzGӁ#p)1`0D{֭B DN@*1ǀv~o;#ġ;B$7aڬlNs#MƉ:cP!M+`< c]t`c{I;kQCZ]#LY0:YtXv&~ d^g>7P3COM@crNuDև8L dcT]m J;<үKmv R_~ʲy IP=)#M4KF)Z9d(J&Wp`iuZ$( ^lMdlIQ!p0OǃWY*֖2V4?Bp~$XxrA\DwhVx50>&UFb9+'H?℩+ͻ^]?3f&}=)g2ndlsx)dr&qNf/<ޠ{P-Z9Dؗړ  +yn߃PYg9 b.,۹4&\Gvxo7}AWe0SxVaj81`q8ӧљ<:p{7ႷVyk>lcJhUȚo*(.'K5ͭ`ɯJWQQl}fa,uw530,F>c*sc@ʠ WZ ;)6 /~ļ ?!MSOSi48Bp>-@Jzˡj.>jճ"?7mL]ZzQn`e+ ġm^._s6jYݤ Tr&^pikG{+rNi\Lɨӟ$V.y |dL~K(|e %ԽS=t]ʫ9ETrA4s)]/ؽھb{s ) *~l'@̱_ ղ,a6lq"~Mh[*2jہG' R z7?\}Ե9ZVrsk{IŘy_JrgcO ((.-qBu}9A!=ǵ6sPO8#rw]_~@uzԂ s~wphocJc$eE枟 K>q@t2gCfcoqY'.r`@߽a0_lKfeJwŹAR0i4:Nz7ʎ-P&D|PѴZP8/M6u]| +UJ&hfi^Zى42ekNj¨J <Ŝ&rɜ?>Eޓ5cV48Zm8[SH>&k^/0HY,,}Sb1m7z H0ˏIBFɾAYm9[j+y-6XuҼӝlS)xTIb.1#s$rǪn߉G;>v`beJ4KO .8u,S0y|pV &0Oم`}_XbkCz?؈bXh<2 ĕ"6?.NomLV T %*d=]%ᮻ]OcIq1$OePvaMdq@@;ID:4JfTa]>z#>Zd.S I"E؝\Rw%D /.hh<2?5L/qVM8O:MPޓPѸt8>YJE3.AAU|1JgʹZ[u,U+Ci-D F;`/ADtBŃ/H7Ly,s|HU!X2&-m%ku^QuM\Wy۟h\͡ \[".91q1pB + eabF)"5<:Mm.J؍pә} 9 ǟO!%)nwK*F҉LZ[; )|ٟC^^q~2AZ+0y kz5T0%3Q8mPT"ۅ6I:C8sBJÄ]۪df4S dPWE\wE?Bэ3<`h"cB0YE>~Ze.SBj]pӪ=^A"*nVja+J R!* WE"pO3㽅h2 RbGK{߿ȴu[ M_]Bp@@mL8p߷v+?6Rq`91@AƝgK$ !>KzwM?P Y,,L~+$oSP+ޘmmuqhfat1okeQ-]9=0CZk5t~Y̦D£qE Qrpǭwy 1-٫Þ/9K2y&?/BpB[p+i%ÿϾ+h4o,1rh:T9%nBI^?Updͤ}bUPJ9x \oC:F:K=/KɅՖ͆7rtП809"mhvyV&|8 %~#ơLK>i`jRȝc qm!YU ] ,յ䂯=G$Ί\~t~=D/PhI ,}=KP?#hY&{UVd,rm*D8,u|-'T:Pt,HMcв4^Q'\fQl]w. ~Ml;I#RT؀9^BX=$V\,"<ǃ~CЭU|~RVJqtOCZ*}_ Թ tE`w|M,uJ}_*.pS0ߛ7NJ kx%̞oO՝|{&zvYY8~d]h,7jOOf~$FM+@W.]%l y8["L0ugx-~Nvgoلl05MɐLKdu?jydB(|S=u8^:|_oX"I}?$b4rXSoE,.S#&:BEy$LuӞo:[63DKK'KKWFv$= g^Ċ%F#Usi +X{cy7TPݴG@6JUsecTOV}`a~Ǡ;PL블D%*gd'buTLޯf@T3MdX9A셢Fg!ϋ0FBU )HN%劳մ9w3?&>:b2:aj}3\gҀGb\ !:vχz46m#RfJI.OkգGhʐ[&sT`мlyޥcTuLCi/ T|9ϗ˿F6&'EbyֲZ-dgǭ㰿8vE+LD*"/|"[OD8 O}PnQZS.Y-IZFc7O:{6ʩ(" I3Σ\Q5hŤf1c܍~L3eja1ߛ,A\W%Z=.}%*l@0DEYM0Q ~ J>ҕ(07JMextUոO4rhAm{˦љw'R5O/ƭ)l{(|f\'N޾|t\oo1BNz2V-dn)8|I7x1ݹP m3=Ɨz(N>]?Jf썥SE^bb ص1Lp'/̍6E+%= EPfѼb{QeT}Z%-;‹UlT/Ԟ0+Q RV_'6n?SC`אɕr?T ')gHĺcmHT9 EսEaQ0T:h[ZarȮW> ! %~"Q4!\!;Qk !H}㖷?Mkͫ1#Ȁ.̿,9-b_ F‚ vDQs'L%B**(iᎼ]fkY"-"?׿/S>/#c`8!QV/˓ZW=Ħ5~ u>Orv㽖7*qܖ*McO@ KY:ե^GDž@G*t-G~=%guDE?@5暏'4V|-10_\CJnVO}}M59w/>ӠvW]wiD<+Gx 0v+)!yʏ/ó"{6p;z]D# =/$ޡX'a؄$VEyF%r^oŧVi@._#8tbfLh^,PmDν$5:^fw0 IR*5TcQE.*@*=6qs]lkm',]rfd%-tC}B3UH;ljw:v'ֶc0(9gx ._sk*?&&x5 (Qʾ/gFǤX2[бR6?)Ujώș;' ]m6Zx_%3S,N.-0U/ɔ,4U9''d8[EvA!"C&# l-&U8^?_Mlmd-4NtD%p9# [&YBx81-/"^dT@ぴߍ3ժ!AsW9V )_Wd%AK'r?O+/.YO_ʣ{\NʽiPwٺάGYpxJT٨#2EY޻K/>ųi 7H7,>hg`L͐ޮS] -@sfZmM9r9 h~v &\4kݱTY\C#p͂IgM-7^MUֶ77cƸ ƚm y'Xmj+I2#3HI5s'[}KȳeUMN24JZobn\#W>YBO#F"-"'H߾T74J]9E{o3O'S'6:.o4٬Bn: - r&:j>ě[WS5˖>ie֍l:QYM[RNaP|5II/SF\%zNȟo(Cu>7&Ş>39NWIY4xRq_DY<1+͜67|qed!iO?a 𓰻EJ>G=*v 7{нw1-O)蠎wW5&6%f B[&.fe-:2m|qՂfhk-6+:j\N:˘y PK͐MJ܏0 n̤3kn}.Ҥ*Rg~};:Ă%y|&aX^i[L~3G?!EB#i{iI nk%xen.UvMK|4gk0$ K9kqtmY_:s{:xW\gUvI -eTk3X K7L`Bn>}ekHQ ?{ lI|1t6[ŹΎS{p?_un yGn|?ᔂhswMy:բ<,hWWg B6 K.5< JQEJji5| T=4p]-,? ,tBa?0fLq? JKjf2+GWYy%7A|5:p%f;M"jHi*OMn%'Q{ҡJ)Ll `sxC9Ohe䳢>Ht  ~(Pmֵ2m!F"iiFvݙf6Itˋ*R $t4^ .7XALj4֯YQXm˧>3k25 pS`3sE :7\Q=2G p){RȬ䎒ӋǪZ 6kg |Z`$RP֊}AlYL] Zz+G7+Fl%rQqRSz(1n_vU Qm ~wjs ykJ?m'$u}a? KބbKQ99brENj ݽy5F¢ xn&L[;xCܤNs[8"G+g[~@Zk >EZ(woNGW&J?($DU^uI-oQz|Һzz>E|; rSj]cƭyeK6^nlj"D{2ڍg qP5 Xx0UqwXMǨA(Q+]mΝd|[Z벭~2}缹kP;d*G+·rϋ= ıhFFjW k-zvJSK syIjs]ȟc^ˢE/|*$-48qyd8) ORob]sf* lr3++dmrD}RuĦ 5 vOIћȇ!`=3=/BWx^m8.g䇥p@X{*|j +[VY ʊqiTʨ}^{2Ut&wI>TB*Љ:i_tEa>¼&b%0xo>`"?Qrusi~A9:F:\=2U=쿅-+_]eJ!0O8ώeL#59?u5hQ'tkrN$"W^hwRYP@eUV~oНD^^U>â*g/  Qʠ b>0ŕ)"edHH)V }nnV):LڶlP8tQ.Di KzT4KzcMЃ̬:ɦ0iqͶbn%WUYNl)/$cmnĨ;;V:*>` 65sL8TyxvM #J`|%ѶVSȉ9sfVOy^pz_ 2:Ɇr"f nO 0ُͶfں<-*k:+d :w4PF/=1H#qЬH#KHn1@r6#<6IryZ=w}To%A 9tIP|en0A<*+e{fz)SqmBŒ&c$nʌK/UZt-/w)ΊzH#U5(;x^|@VҴ#Þәz'g|rjB|54ۉx?wD: R=9#`ꫮ3]@y>3GѓÈfh>ϊe'Lz9I`ھ^ruY Ku}hi$`]L-XN6|E|.q@͉ A4L. XGYܛ11EJK/ҲN,K֤X'CJ8\v}A?{_}&['ݜ?Y8F$íȪ<5#5VV۳»YT[.'xxAsmSCGyF,|٥t s5@^LpDO 'a l6ؐhs,*wj&2Ѳ%DBh)ULx`D|Zndc$P-|zΡ#C)fb.+2_xF^P%"G#}Z4Dk? 0=AͰVl d]dDpI'1$_rzlΪu\*V/ĬXY]!e%-d瘨rQld򃸻[wOC樳LkQII b53Y]yb8 ?q02f }F,4!7]n+jw+fA61XPF}t5q¼}mdz7k5](@?Bh:H5ڭ|K`qKD;I$ eH7|V.0ÇU ǒՙh% K6 'AT8d׃ LDihpIn E":SADu 3&}£IuMp 9ڃ8*}m`e0E$jJ{qOMUZl@msot(A}kDu>OH_=Q6C"X:c>NUϪ ھvO%n0^uq5e<RWB'y#@Yx0mxZLEUhJS p Aj'[)hP1Ά$R $gHHFm|?k64Y %Ç&r̗ G떺kOJو6y:u ;ï:k2b㟆Yohbt'\|JCqCۥ ;j=.x/cTeToPsS1qPLO9sUMŞH7k@֨XNgj'uoTWZS("6wVa(C"Ydp&i:*{LQHP/HoND@AfFhLح[r}mA?nÆ ;r}@Vm/h2g^)4ܘq7 VÀ=#4EҚ`ԾshPGy NÍ祪J},k5B8 zx<]+`2_~KGW4=RW3w7 y)\ƃl(5W\ T=pwh}%L6 c уf Ja6' [똚G.Gd L|o,c~(.jpC1luΕXLB2CٸhU tJNc  SHP{AVPeiq6/8[޲?Jx GH;%ͨd]ME* :u63+٨w焲_ћDZ+v|!$mopF@~>7Y#WM_nlHk x>+ج7~a;)2=.V,x&WՎ +e UpXB:WQ/ZR0Lq蓫P`۵{$x`y^"An5q'U ^y롿N[G]竒2>K93NJgө0cM{jΪU!(ѲLB}dĠA@Gn,Ű(jTLds=7&>th|i;_IKwC?r#FFf"]an\6-w碲l87ӧT'L&fRt(ocrdPFe'n:mߍ}Ɩ| #2:]rZc7︡h6]Ar\.n)*rEһBΣ1A4Y!@MMDO?|{3{6J)B3| Rݸ[n%cw, gHcT.T{Hfˆ\'G1q6qd{0TY!~65Y@ l{&cHRKֺ۠CT~*xOݟQ+=% /"zZb&i/Wy ٥3T?ӠfmFyv)K@p=HEZ}w$M! gj`k2Rq];Fs\e,4djLH+1yMdQgWPYduq06w R+f ^ş45ڲ?2^4JH'~t'`9Κrv{m9 Q~z00ߝ@csnA[q$¼ǍH"=~YE^'OldC{7Оw֎rHlS'"GTo%nz}{nfЕ]q(cy,[j(#VYJ, ,5ȱ5g⫙\XrpҕqH8 7ݻ !t$m6KoK $&0.vL00h:s+ؕwos}M %M{2~b Z# Օtn+"LbCiZ.CB'_ 2K^H/gf! _.4 _e7:߇+'<"*'LK.rtI{V 6ImmXC%aZah٫/@x/+0_Eϩ[K8bߪ .3'})L' ;"F(ߨMY : [*VM/Â}Z5#S]1 /'Ŷ>'ߎX2^>3K0icӏfx[BRZD}~K*߷{n̄_xzo|ZG,-?Ghl;a1X>MCp@WGbNmX+$d!;X2BVPn"ݮP)o v1-*_<,J>Li\&nÉZ~Pc$.nТ82 7"xLZv*Q8 R.d᭻ȣ*I!4V:%E{s~" k_ojNݑNq2er  $qvI1vbe(8d#ʍנtle)N8KHi̭~u뙌H~h}[(@fݔ9[jp{tg(>=⯑n75\i9RϮR V3e!b; #Nދ"%L?9,#\d\j]nízx>>?J+x}g'[4^z n#T}[+%uMAE!q#Wl 70:6ŎAtd<'dy T53 }ˀwMaKkucrJޔA{ !'1C Q(H -#pa됦wQ}@~#'<E y~WEGfw+w/$h l8ɧ*P$W.h~IQݪ UW j] RXx{97!SHMzHK[ @..M]) U.rKTJ뗃rzmCZvbc{1G3JVpYnVnXGTpQpbr\/cs?eHSL8{<s\-TtZś>K0Pw^X0'wrײ}rO[k 5g;9'pk.;"FzC r讗t 4hj¦+bɚr‘ea7g2 X#S'4KBXOC%Gj0P7u7 8A4TUP.,/ُ."uŃ;'xW` Aj\eW}4n8 a)s=jdEx(LVnT qPlҳ?#t4!s%Za=vg-aZV‘t}H=}X]&zK5;) pBsj%pDMSkTgh>E/kFö'"~Hh}\* 'Wg%SQMTlC:cVhV2ExCAi--`9-BJ] *>{^̀]XωrAb$@>V%_kcPS-%P["r⪱쩽iNJOۑ=V&ުH ό e<~B_Tk!*˺*W|ÒjcX_'0&uhx$2$6x6ӮCoSZ6*C k?1ܲ7[\U<s>ɫYxLq e)o@=Hٗ:9[rvf/9=$w>ƀUcôL^I q y<_0k}:BSCEյO@ZW{9Bv=mWS\!\t6Iջ$w;A Zysu@FʲDrV c>V|S:Ď%(MďB 5߫w{څ_n]p{TS=UazFym:Gͻ~V3s]̖ዕO2V7; Wfz+B;rtMMmT[x8}WnquqQ'f#wcu{UI:kF\ pne{tt/!9UIτ$`,ڤpaLzA7kWyi( qAÜ9ҔGFI8xbeg9koN OYnzڴ[bFZUR~c!YeCoi/eU;2I`s(2J44]>Z O9գAPV0}O^H$Cy܋BSѱeb,SqTn MCNwd:[X!n̘ zk` :_D&7cdLqX%xfl0=p'1cH/F&Q:7pQQfZ~ʘ6kv*0656ft~Olf@!Hg%SUj-vLT!#JjcG[7a}_ü#*S>#f~7Mxn>RAV?x5)ٷ%MYH{LAd#i윍@V 49" a(S!dDP~ ZE:w80V,{d,h#٢1NZ^8"%;&*˶B%x۸/|]2}>Ciܾ`Ff6 !PeG>#ۘڶD'Q_vgeE*=~m `F-w:j;x\[i+"_ypP(0C̓#( }[ԝ~&s{5/-C2zc^-5<~7dv~#u^?6N4?SG$=v.z Ĥ+8z:UE"SYJyԸȨCrRJa&zzN?nS^%G!ccnNG_`9(p!^#_;64̠v+._GtHSwzqM/}O4"Zg4pK3*9w}n{4Ϩ6vrA9[f3XrW2 ;5|]=;& \zsATJtы,)ѷ&~odeffrFAevl:cZ:^nR[K8jMMJMMo}Zav ރ=#P˼Hl^+7qM讠>dmk6ؗ;%Ev荭 bL0}d1/\`7%ye9NBT,BYfwLYs{*~&܊tڦqkq\2m~dxy!kJm^ @@pX3>?+쿋Tbw]J-y?bluc4fY!Lr% k!1)]nR-hTG*xr6f 7mIV u" Ӓ!H6O<*}0 DM/ʐ>OZ+X)Pt8Un3%rHE#R{O'*<1h:؊7_D%<jR')3?m ٘3yh 3D/פ@lh7 lFT5 fJc! mG9~7W~jNw".GLIHC2 <8 uT|o>ˬ|I>H_ tO djU@f㒐QM]f[>&Rh,Ko]tfXc8lHI PHIg$=^Sӯ{]["؅[nFmg^)"z]Eht]ck7Vo 2]Fn' ݒ} 7ڥ6XxKұBmC?oƒn'#^n{+~s2Aɴh:A7PQMMp`d\Cf770KL[wmsI>i3#[(5kwp*LSz3/Pd*cC!꿳ѷvQtkX.ә#cp޻8FDbQvձۺ ZfŞuHdX[{tZ^|Mq.ď;o+~ Eب1ɛ毠qpzC{ȏÏLu<DuY[waE`stW#aλ}O,Lݜ; q\o !xNRe6bl,gR{!p27 [Ξ;k#i>VthQQݳ U~b 3ʤ'|%B8d)OfwU痰KHGt>mR)﹍GI)٭$;v%vWLфe㦛-oF>6f֑P[!=|ܞ_!!vజ<ɶL퍏W}|moZw|(A]YԻ`+aM>fBȶqĘMo+5)?Kؼыd.1ڂƨ֩y#16ǏZDٚ) 77z2aDj5Tzv`ؚe+];(/Ac8mHܚ|aήg~$0hdg|)h)_XLh^On> l* ;#y}dx:}nY K`Bw3^qX;S'NL3>@[w5{bcΉۇ?]7d?˱eZ&f_$L fvAJtMmH(}s9F6vN@HdoWd;*oX`k+~DoHM_& !daCZ4i@:~SV:{Ղo6Ѣjg8! #'b>'㴛j0-Fz"{t8O 7ť⓪X5f;W*  ɣ1K.D`3?;4^'tBARYEK_[B2fm+ҩc.rEb4`7cťO *u˙@kg.3R"ZXAJajhRUm6J (RU#9oI_4tGFNms@E@ E N%-o,(Ճ >xh>>5TL%wUQX˄&hWbɟW GxE_1!ܟLJN|B *RsV܄v\Qvcmd_NjBj̱Q1p^휜2t$e߽RBJ+Kˤ% xFW6uN ?ab Fj-_3YRF;C%_8l)nc f]0z&u1Ht`eb?xn+Gۤvj"ݬE"6K B00t0q\w ߙ,6))?Ƽ!VnGnr'ts\F5ݡE Jnp)мf>v넚n %#3OZkJi:3{I'ΣH;"L ,X<秱'Uޓ~Ȩ}A q{) 4S,jz<΅iKwtylX}#6NLi_$ 9RzJT I7 ˘|[>KDf ^~(-'f T“<&%("AD6w ᓳF&16Ǣ`̔V`~2JU4l1O/1=4`C_C^5Dvr =f\ĢXnѥq]Z0=*d3ز=u &s {z41# |Vf>d-  Ce!4t\şO7Z'1K"sſAI҄cLdU eű{RO I0C^n{x|zď⬤vt P~Qt'v^'A}VC6KH$dS7jpfWhU C(߉|pEsѭ&U9iZiH~ҋ p8%*cR+7MFLzqf =Bz}HQt^3U P GtIP!wX4mҘC+RHf=XرwQJƟ/%%'T;ZX4a-a>MѦ R1~ZЪ6N4c e+ 87%-ڒqdzgz: flz7Lk }@6ߗ&}j{ *~q3&v}łgE\dxtahW҅n;L-i+ɕ*:>e(1.ul6|58#zϒ 8@3/+޹Z,n^x[\ M);M6X5bnzeʓo|#EpK@u;ގ[Ȃ+{ϢF+2gu"A6XqK&'i@Vs5*1XQ4_t;Dm`GORhD{xBJ+GcoeLj1}Hf1$tXb6/G _HK01էsON}M0Go6_K,4vĦ/ 簨\N5'PֻZ"dF}z%$ {U i`yZGqfQtPY^f>=ntwnE`'Bc͟(KxH3i>?"p  D &C.|UBQS1+Pp - kEG3Xӱ).K]E/g+sOwpWGU/l4 Jb4rrC"l|qqKCپj:x~~A.2ḫP5@VTg<η/Cw3¦5H+>dD\BG$>㪡LPE}yJn 3zY4W{$-XFdAJxs}SX~F B,">KSjt6A?j['g:IC;_H+V:v@  i ?qg^ԘC Ej]"Q%: [:,. bOy|౅SF7kQn֐ hD$l."TC̯tmdzJn_kȖBdvoF_JJhjFseq'C;zLD.x!r־xC#lⲟwK+/1辛^|D=U儛J_ɔ]$ cc]ocZdTN ܲΣE&i)SpO@܃$0U6T“(/s5s/o@؀=# h l>0'1t_vZ>dƢҺK9H#-⸞WK;E+AQ8.;_$k%]K؎?/f̛G* $M5D?F*|?튈5|M41PS}GY"ܒCz2^䛪:h-_h}C4P@ǔAHZ VJ1z i݈z>YOu 2aHʻsnr -D:nJQOfJ=hEX#< *f(Gs,{[.MxT4L! HЃSR qv,s/kT' wk 0ۭt}pKRR PE €;1P[.6XfbCQjCfkdԦo_'1M |9t S|,zZw [N )OF;}̂>G&_ţӆq5ܸ~~"~!>.ab_L37MwYe„Gvr| $5?e QeElIM&34Ek,Hh@|Aå|IIsIP"1(y ]W~7-Gp 4!QH@4V\$!sW Um `n򎨽P\c֍wK$o`pk.GF*f01Y6Zy{nH꼉ZcY8YwDCD_^F8HC~h]WzOv]݇[gE`=Pi }|ɔ?c26uur"K&߮壯<1+6a@v+F[/-E,mJIυ8obQ@P+­A|5pR!lvQ mPnB[~N a't΋ZTmbeu?+/9#!>ߢj Y;JPI_,K gB(EюpZCA=o>dff:coߊi.}QiEN5u+QO)M*Ms^ozHoM:Ϯ: hp(O#TEh}򝾹ǚ٢ٙo "gnm@|Ю;'1q:#a[tUk쿝9ZZXo hf+81)^.#xR.Rfy.@)W\:Gzm9UM79}*H\.E8Kb5k3l$pǶ]WY'C$(d3WwˊyqB"=cILPD?b(QeXCg)m~Gcw1˻|Pgƙʑ*SeRw7@# #ky&<&WQk 5ڷ09 {D$n֕m^8+C]L8H | be QxLemƣ5(ۏl|aP畈$ '0*9# ˇG}s5fzۋˌۗWr-M@a+3}OK'Xy ^i):hE"!%=|uE|:0:w|o p9!Evէ=ݬ. cb$v94o3e~`XMXuLihge]G/-8#ܱDy +o$[^͈8TtЫÒ lL: !e)@N4ٻsY2^2aWt(q[]f܇3ظmeV+l,5Y"3}iNGO:h7HVFj=Q,fA ^giI8DD E@7#ȈS Sݘ}كP$' K x|G· F)3L]~o͌Y`]'ևlu}Q엟r~}(DFF9ZTj.oڦ.)c)]A X L{]yB#k6c9W%~RJ 4N$rB? lR4ZeUV  aC)W&~[3]\A嘕'5r|nga97o޳6eed.`1,D*~LUJű,Ơ+w8&mi~/MS1| U7:J@~=t(I?M>tϫI\|&/xz?Uge{˭hDtV h\&]Cz!sAB_Bp٫#fT_դB<5RPiFh'=TV] !_5 }n=Ih9ls&YUMf}HT?;t `F6{z[&#3]jA0>D [3aTuA?V:bXiGD`Z)AF:EܪőAniA:#E/ƅgv^Q, :3zw}1eo$EsQ;Gin|TkI(?L25gָ)rp}y& -;FGq%-˿-\7 LMO$z'B1); Rt…[;5nwo`@/s?']M#)89υ|W-Ų .9oz+NM2dBlMʆu}؀Z#V{~!X %9$t6,>'9<,v"&˵eL.zٌPi쏥nOג_{#]{#Dy3A .炼[#fa3Pׄ| `~rlȟ@`DdO܎;Pk6 Ʈ-2=(k.a|Ȟ԰%5.՚r<зrz|˓lLsfgf͆- S@ #j`ɗ %BHpL]x~7'$(HIx(p}_Ob9ˆ 2=Af|rJ~cQ/JӘCtӾg{X6 ߷Hw04I劫b5#oV'B#8pvf4( ~~_ݨMsbխ +5̭p5U>3TwAz'P~#Y{Z(  ;9%PGzkc?̄Zb22z* igf]dψʱ̷K욆(l;껔rGܛ? ^?岟j|+1>E,r1O0]"kW ;2jA× a-gmosVYNMV\ rKtFFU(8qO1yVK?6E{ClR4^/jNF+=)E|04v$jYLn>'}H҂_b\1r1UGHh=ltq@GxwHot\stl.E/Kc7Aö@HKM`!L̜Ff<{0\1`XuķM]`* OǦJ)#'TjefllrsըXZJ:l9R ForbuGny. "G[zgaaM6-7 ,B/C6Ý^a㋍#TJas kE%O+5ңSgfhƂ^$_|y~uJju<6KHukQb}2 BĒjԇCR+73|qp?̨~OR_MF^˟x[1}HQ_3-̦ f\`s0ߥCrpBn[Hh,,tSͨ+SaAVÑKb:Qf9OB+=ɑv:VUHu;[ A-䋆]fOf!Mq<9+0G,cU]>/J2~MTmWI+)^=XTJ]"w|/Xc;3o<ՈCN `-<Z5!=ڟfrcYt}Dž\M=%gk ^:chFc5 ̺MR ;@ sIs\Ɇd?KZ\9;?SGuҝmj=}kZb:6f"n#H / O?Bu~Aj?NLoqrs^일 ML[/'&md0|5-;lofXdcpЁ"42d „m"aڇ7u#hna;v#.,l5aP87[DËf؇Oچ/=*u:}ǂ]Hf({c-795u,rF0;^]TMwq-V!'-/<#[uW YHfojEr섋'Σ_x*'3=bV1/V1{ДOϑKtݫju7f\3AmO5]~wAgyڦ_xL#+! 9wSi zv퉐 0!A]?p[P< V2'z*osblZ( a>wl9U JrAn GA+s|oTqA*'8gRTUrQQNG~4K S#R8Ն&A0iO3(!)ȣAb?83*-xJFWw`?['TOI7p&H}M^}@~±J::l T6}qfX oEMuo$/VsNmt2pAzdhL_uOx ]eR4yB|'L-5V',+%Qq]޺FzQ9|>%2p78$ %IFN|R 2)b>\ˡv] . ].Uho)@Ui*uQXGZ{%#\vV:FG!Mz7a/{6w7Mz]ӣ*:['nCN+R%6y;X.v7/+ʃԠ2| ?3Gv{P}[,]TWJ2 {On"{BͶA/0пiODŽUT?LLj+2kwY$!7 c46#xy/\(7R'ݹugcs$((`949j%X7 i$`TH&v:#F|9VVv禮ze06G"1{c{oG\95L:I|yDъ'{ڥQiuGVl{|agK} IFBqku7ea(h*@+S4aiaDg}%UOsŇ0bug=ׅ>x4\%tϮDkci@#S.gKV(u=Ljp'%Zk($}m^\N9Fw\{kJll|; IE. tN*5 ?Hj|Q&ƒIXO%9w ?ΪZ$.0tP v^95# *pU7/N3RXX{F W87I}9lZyul %A86噴,#zHwV. .Pwoa^cf$hX3b+B+'q6h};U߽;1iOA'X vueU-sjݖP'xݒ]jْEat $o|64%W-s/I'LBj9N㎅A+Dk)PpmgE c~ϕzxMTBe:QsVC]HsCt3=@I߮?2١ե(@z lomQ-8qΘsa?\)0 ]m]Mq\FԷ5m-=3BuшWZ3Gp.g(Da(hЀj{H.mU(Bs5V|o6nP/l8  #$!jք,miPتXAIw08r|uo[dT,֑q̕P1Ր[€⽡Gܹ!dHY.LU(\~s= GAbPiYZ}]SN0YfWn)C0y'ݷspav[@ӗ~oͽiТOV5h6 H+"$,P}T' i PuhOT`s~B ou]צݖ ECw݇'A Nf)5 rVd)B_4jFa4'̘sڏ78/K8Ċ2 ɞ4+aLL:OشYg|F0qa0%[:J7.ùg8md"?/1fGL aHEl&i0JK&Iy8 +M2Gd`Aj")r,~qy@AXi@k%A8+[?g6y~nlEfޔrg= J#T 1)1ʥ| >Vwc''ݭ4Y]o7]7wK-Mml߇qd> GqVCW%I> CysҬG @e3MTԙ(96Kg9ӻc@m*̫X}6Ś 4L[<炚uAm]ʹ3B?k(Hb <ԹUک-!Y[[}luf?!9X#vA7yvd5ܭ~H;b"vwEz 󢵚&~ʥ.?w"\$SHR Ζy[zf *#Lq>:2GW{a iB9h]|N` %s).@˽z`Լ4 ?`yS/3zKɒY'ܚa<2&%y,hZ#_8L;VA ]`9C&P51io;w 5}-<yF{Ӌ`L Ұ}OV95>( Z8dJ C+-wo56S/u_hh7J8Pr S#_߄ Tz=rTHqxV-MdBA>Cn֞s^fm|GgZ@&E2( XQ`a"'b*7zN!I.g,,6T.Rk;{U"i.t낰ش7J6V[~C[{t6U!b5\]푡At7TŢIjΖYG 5[G۝0 ?2npU-~~]ɐ"^Yé_T 8Z@<$r30Nφb)٦d_d]nnaY*gؔ H_E͝(|+k9jaYK~Q5U/Mj+F& e˪yFꥷk75QW./1 q>ڣO Vfv[Nܙ150 [˞+kb,'5%|\k HDSenj7v_K&a7~,lWc0]N $ Ը"y(;ǷuFbHz1S'\f?Ϗ/&O ӷjnk$J7@7RPT[8_/(]Gf)j:X\;~& EӴE.1 /AO9s%B*SZ%ϟ 5ȅbXbkj`>ƞxM ho'9t4 p G6X??et_|U1jZTQ81ϴb**%NSCdADCIBxND`4L ӌ(OsA:N{]IIJbħ\AJje쥂AZ=#~dr5=h6H)c9 T)9OJGqo32Z} VeS%39H-laꗳ"{]=fgyLMÓU3icb\7!)cC&KBЫ1͋Yq.(qcYȂ kG-pitNSIë+CH5> '` w5@!ND0mvM igUW:lr@|M_\c`F#nyEww:t +ry K3xQx r[{ PϞ.|Q%ϫo:vԛ$m99 >#?ʣNRgBȃ&wk-{@Ba6U3ġV=34z~ucoAǥw}c}ITaֱϩM>9^/:lHW] y)@T`CZ*<*!3Z~5pݼz P Q֧1'#y{N$j3p!_WYv_ǓJ|$VӃC:j>mht%~Ur 頵:o!Nz3iX QKK_{"k :R/_Ef#$;s΄k氝۞iMc= մ1KzBy(O92{S"ߩ R %%TsNh{9+dʎ8'x=@oUYg}MRI;[8(0/=qܪ):!v$e!Chet_0% oȰPczTF 4xAQX.y:x r.M\sOCX64~ɸL>SrvyJ*,ۿz1[eL2˩%&nIvaZz#Fm?؜u[Mv&0 +7F3F2_*럊ڰ^_:Im(@hCDMD+oMv<^I)iHٚ6jX V&@NFbdyY# zHy-]\ƟUQփ`s wm2ٚm9<>[h z>=΃qp@!ŪhZ$1`:λHMehc/G?rx ١ LN>/ Z:[dD]IGkXyAT}rVus6a)(hݙkM Lkųς cLEujXӸe)DƣUF"p3/i)iGβmǦ-2-&J2ߥ^|XSy0{!$s}$g$ ͪ~ixHƪ*lRy)7vwymh"KPa;bgǩѳ$ۀČtJd H[q掞L*fĆ1Š /Zzz[(uփŮxbm%bݻ 7y;3ndFw/>um \Ow o!<=ᱤWiۆJ$74ˉ%F |Fٙٔ1TAhlN[Z;b{boHL3u;oIzr "R P; {&(Uw05eb9K/kO;TNU8K2Ny6?,Ao;Vk[KpJ!bo?LB  iK1P YLփS5 `kK.g0 7h'$_ Re[o9#6/o/XRĜO=e>Zu5_*׍jȃs1aMrl*̾7I }0Dب#j0gi[B (s CJoѦq MłG8BeY,v4{_Ȟrk733,0ϛgtu#m5Fa7:Pxk(ۉ1dyV;‚HYi8Á0.Ϥ'QcAĿʆ Uct o{9txَ֏%^B"|qQڪ'wŻe"`[)3ɴAo|74;?p?@džpI-VZVFVg]U*r-xmݭ.RT`kGgט1}?~bU9 ~Nݵ~![z5㔤}dEHY[\|L^5 ~\DfE/vrq7)Or&{/S\.qt{dp0錨ٯGUI*\xCL'T4&hՎǹs|öR%w8R|@[pWIuө k~!:XchY,4FKuj' +4i2uTOWY9;'%x4F|wtRN"_&@[Lw$+gOE!l~eәM{[3 Ն 9W\ǽ=54@St2Ƅy|ᑍ+&4w/&4?uq n2@R:;IvӃ:Kb2) f>kEȠrn$˄JQ\&䈸X|PԯΟA>d,86 tSA tř:S͋J*^n 7u~9ќiR]ij`5UmsJ9 ʪztkCy2jvB09u\\X#/*4ZcN|vA>1EIHaL N2+txXtKH?FL-IVY<8J:HlgH* ZϜu }WBPLa*_7_H%.xHHeqmSImUqki.i'<̬#ډ aΰ 4Ʌ+ir1k,AP!{V1d$?@͒0+[ r푉*CEXWfE_)IuGud>ȬRLolop,pA𕛫x%n5ֿp[f;'w(U7B/%%F#J.1qu'dG{!=z.nc2Z "ъ,mMnABhiY6u>1O5 xw&GdÅ$/f%wAj_DT~~w)NF=1<)o!xk&Mv,wg5ƚcl ҖS G 9>lԼM]bs'B|Z%w$8*SFmw6Wz֠d `>~Eq&yև&S7[0 y~ם\ oWcA1n|43M[|y/DGBsj4mZii߃}b5wkmXq+&]ҮZLm\džf_PʌݻSTC`xwA&v=-=ޜ0=}h؟veƼii>P;?TjpecpQ bѲ#xHǁ{@hƏRӖ)hgWPg55yKa2Қ>q/ءԮ{r9IҞvIx`#/i@F> I2isPoE0#ﮈo"/qv$NFx,zFzAde _XZځ!!LKTlu/ܫGuXM 3L[Y55+?0~qlʨً3J=)9{xUj ZTYc?1^EF_P7,W nјȤL?ZB*lQӽ'kT6UNR;DxE&[*B葮pQqW$\Nx8t7@qi9menel{_1'KXθȻ5>Ơl8;Бd=5755a^ L]-X-ٜQB}q\ ,V3:WhX'R(✝~j c}l1P}ΐM-)y} \w. ؼ(2naN6탚 ga3 6#Af_WkZ흗 ܲǃ47! t'kuV06B#CFeӹY)H9[3aTA}&)̚ hDQC}Fz3wNc_My߭EXZwhT?YV>֓JCGԣ1;¢4>|3&j_# )yںsͺ"KFOY)EԫwVrjn™EuCbYZL MdoX\f$*v> u<HFf&;N‹mDfd{[5PF޸4&1Ս3ah{Ot+xA"Keyz;Œ֡PC$e%YcX2߹"C|{nDIwavl+B#;nhciB%w! TMy^&A]a- ZAncLq%T怃F_rؖgQSVm]6{0%!U_?BE̾yi6bz4D25l1|W+gPtI(;R=庘51'lfjcP/V 3|"N7dDK~O8ۆwSndVo ;&nLy)(\2wW@ ^F[w{-8P=0OsXc;L kIWL/H5= 2TR9:~RjxΈ[i 4}fϵxVw)v>r>EGUM].5 ;,j|ElhK 5]Er5oE&"gR-o |c " \˔-WdGjШ#S:p8&ҸEô&@hzK8/ӾԾZZ,k^!.>ȰORSVнM SOь={e[PLX/Ath/]O>Oq?}{/}̤ywI[O\s觪߭Lj1YWhF* ں[ӣ%8bqACi:22cULq$W}".Ho-J#vmhao EI->"-|yЖl'Y:TPF?*_.(^A G̨bӌ)9&% &0"| |?ڴ;eV@Lt|eaO`5˳I|zv7.8rqXِv+1W}bYay`<ܷ>ÑBb~ƾ;s8SlY,$bMͰa1[_?c^,IO>ÌGZGlN[|'{T_h}/jLkis*x7o`E:|;ȱhB,O3 O$ BhseHӠ 0r6yvx RK@[YSOTq%M#R;h9xcԶ>̂m6ԺM䂉*>',-?0%Jj8vp"NzTD$L%"hG?[xG]Xgu6>Vxgt˸6+yExYhp'ut{DnJq͎L>꼇O R+(ˋ^? MtOCJ`Ubϙm2!űxyƼR:]3S品OPFOC5Pq\moQo ĺg ji~JБjWNye}eGxҋ;0, h'xo(+O@"PԌEZW AgIf77"ȥ#Rqu_nn:XĪ.ZpPe壥lk n#0i. (͍ق ?y̬5 6<ŋ߃^{&Na9 ŬW:Ͼ4RmyAyl!j#~ ": : alHbȈN @t<-Hvfg )PZ#r] "ja|ːlܿOkʇVv˼d¼~Mc5~(!0Pz5nTj,\VLF)(W5xqベ5W*Ll"Չ76sIl#R5 *hJjA~ Ai-U뵹e?u;6<0h?vcƟ'." Р.8g t_ uxyWunLk#Qy=VUptaCt߳\^z1ПIx&Z9@|":U׹wVɟz9Ǻe5xߴ?iIS:A}WR*?#u`S(T ѳWk-WjE>B HTOZi)gl!j}ff3r)!oQB?QI (ϕF6AʳfŲDpcq]aO?Lʙ1y4T^u@PDMޡ8) AFi],)htԖtA+,t?}˛5DV7[q2; Tc6@[IfB 2}H YtqS9R,-cKmg$#<:fue 9{$?X#OOQL&V>6ũ~1awkq@_xgጯ2"(sz/E)CȩM@b `diei/GR:Obp@Nja]ΧU?ѝ['C: `Djl`՛Kt !4,}0[7FcLEs+[H\ ?h'oEq>p& {OS-~G,GbpjMn83]X8|{owVH ;={&V eakba^ t]PTTj[pMqb~1')E*e} ?zPDKh٥ڲʣt_z9Ɛ45y:Y7}N)قHiQ<|%0C郔lJH;X- , E:aŔ_ l ::U15皯 B&pzyu;9mwo}sm!yEd) E ˜/JH6՟yᠳ9(yK=-nU@n*3:jbs?aYTD^S} Hh#R8`E].")Sb:@n8!Ťe7{ K@ L^quD^(TJ6u_ڷ/B0wQRܢkEEȈ]]LYD|dnWԮlm Q ʀ8=v.!c!#{$.'ec+dDq(Nȟw+Z6}rkM?uܙ=MLA*sPh{@1<)uRr<(Bj~ >ڵ6[q)ڙy4ޚAK/h1kI !y8hyG5>pӿvU61 *R%-*_$:Bhl$A LS' ~=Łm"`5\0U֥LJ+a|Fz"KzIZXbA9~QPg|\&nV#`[#n^GpAH|9i L)(*C4?1~lm;Hypfל)y?N}v1P;#BPNSj`-+\~lߦְW^9u”I0jhRbbfhoi~*w7f"V}Ư#_UQf+U.7&+(urV pf=ohÆW =Ju\UFN2u3Ʊ+|`%_{Njr pIJ\gC?3Gk]*\@43͈n(2Ov.xʅpteqC"d ^1:o&M M6'ѴX Ck%!aȗmeNHdJ3bH3߇d޷?F}XYd"pm!ޕV*l;*w}sF'L<&=Kgi K`U7jѤ2ln-u&-v=<2Dhٗa$Fh摴hQ*DJwȊE`ё ΋MΰUHǀo%} Λe9i"8[Yo]Aq_tgA t/ ) b@ 0fL[7[z4Ƹǘwv" }`.ɧRx=ɧ]ljwvvT\4z1֔2M'2aGSp[d tI1;#u%Kpϻx_JF-zjp.9wcM?#fTa}[&n }՜(0X/Y(@9v5vpW+U;T^swgt8kv/vztbB[QPL E>U'u|C/.IFZ ˎet+h 9ho$X8$.(eY__`M`T:F|'3ROIH ӆ+9d s7I,U`jm3*@e3U[qlD[6p9߫͛} E(W]T`i>lUV5EjPcj[3gԜbaS{l fz ֏*WcXmVF+`]"I l9.#e @)X֜K{#CȀ^jZ,`4WmNhuU#ڝ.n\QYC)tך‘eL.=#- 1԰)b7Q6gQ {}Cg(5&z_71Q P"vxINm1Rt@#=MRT8xe@ɁPr S8C0W#[s q~W @>`ʱMD`ɭlU,òzMp) 7TN5Vh!В+)?T,)oe65_*٦q~ݕ-ӏEt±fѵEMy EF#Ju'Ùᒙź*!b{ðkt_`& L i!7v࠾{kʹ/"E,s}%2fc?lMU)" -.xzjhSv<ꈒZ#z%i $0Nz|C@ #_ހo@ժʒ{Ӈ=֟Gh$G(jGG;wŁJ&&kYrB LY.~I#_'Nj+x8*Rˋ؊'c(i] "kF{]r:\Pt/_CoYM\ǿDWYݽD'D3wagy? 谕 Wf `wW!.@ѮdK@N7Y^ 73+d }lt&jUpcJzPJ֠9,g}"Xs|7pHM2վtDnާM3ү9&fC,D5QDcpA+q\Ϝ?ۄ/G8^+Ҕ$:²vIXMVhw#Nlj]?2*=IUghb#'1$:6Y f7[+~BQ8ha*l2~{mfoR>oCx n^7ttCYhQNdFMk {~+'[arp.3*󹐻tR-1y=މ"/4n6v3-Fhf{"blNamvfv qzyEGIQQқ<6⣛ֵ#5F8~t~EL@&-A@<D0Tvck +10QvQzoƱ&y+˸>priw{bmDퟪ2$NҜz*5u&f#FM(E%]Edf"9ȣϡ#ύR'Vc&x7Ơzѧ_Cyvn,E9,hAiQ[ݨw'.yBݍ.W`J/ȓ֥#Q5w6iB-~:2uDrz/M E{j~^:#肢*PEqb(粡QU?AZ-PN[i1~< sؒSuOZ.&x Xt|o%)fqeק3 0bS  Zw)㛱#NK?Ka=3 h~JEğmmW\[ *ٓm,qZ@6v{bX})[qBR5Wƭ'p҈1-A_?Ah :+tj"hBfIB+ I{~z1Zu ý$$NAW Y&QF 4#wo@#Jl7Lz`Ґ‡ &F u]1Dc;`RrYs_ e87&}$ bkI[k`rAEp_֚xFأG^D]"?لbYCl:> ex{KϥD'"*z9pՂ8kft!OTy8{JumE짧9@.j \DUBPurԈuG2!c`Aӻ/rM˞T6'CԜ>9 \CUL;m>ʈʙ%K{(k[p)SV:ڕڀR/Zb2)jw^_\x<@(eJE3hzT0b=޽^I[ 7c ĉP&+~p@6:ށfi}4pfȤE:##1脴H蹆Ts+-MYimt?B]KæAO!Ꙧ\յ% @g5寏t=lFdwJ4'Wfj q%3(O/NW};S\};fES | jѾ;-3 Cك+Uv'~_\hN^R&~v {տ2qwΤ s \f{eT.ggAk~\%>C񠖨ne\Ƅ@(M2, 0 ȖQ t;\2G\-`|)a) &]{w³a` 4>f3H8h?UDs|=J,!']]!Эt# ,嗻g3&S35E*lLذ6My隨B@s["dhtĴl"ݓSnFph1+ x]<Ɠ}#NO0.& R;NЯ& ΃Dh?wJ^%J~RlDΈԻ0nfXu(x8?15ǨVŻKA(#w_:&Q>ڧc 164ssU+a~ϟ#IF7*Ϻ:E&QCO FnTM+k:7d?QmyO\W_hk+4s(e8ВNjMy=ݕpy@z}J fgKKA^Vno:0y*#G:=6oѪdX8(;;̟Q NZ-f1Otg<'u <5PqIj*B1mLcub;wmwT#]NC~I?!n9]J斿KYi`=;'+9p{ӛ]|}޸kPI ."{? dY U.+٘> \mcVBodx+!#Ej3yH N,طt9d6wA)}7 ~Xs+ƌ#fZ(p #$IK.ʋ;CdԷp40k mV0}<~_i-ƳԺl4s)̮6{[H9ۅ],<h{Z ^=ԌZ(67k)`kQC{.aF|!qA!LTO_thO0ѩ=܂Book7%'#F*/Cc-l~E].ނpѦC" um1&0x+|f8oTS,D|Vlt Gp[  K>ma cŀK<𙚆TyfT*() Mq _3V} 6ٍzy X-k7NTlkԕ|€c孡 'zjkxq#B:|M.U":PhhUQ@un0u-K}Yl88F1&m .P:%'Z/W|(AIl HQ0Czj 33А]Մ1/C8pFPXlphU㑖n96mzPf;b&!m}:)vҸk\Nq{fYfC|"i;Vna^s,*Xa^ uh@, 6kICɀ]f5nC[MGx=c jG @MxFK5=^~_iT)Ct>Tb9F9@4T{!4 `3U q"^E< kl=rNtO+5["]$LJ;hD;qxQS ^ĭxS٠]|uFŹl߹T>C|c]7ocuLШ4N0@) 'S{ENC  č^`gDpq[o^ms2)۽&iEʏ;^N;JSxߐ-gTzY}5$zD8~2qyw:ZᘸZ::twTz A_s1k]6q;E_Fb<_=q}Ů%zdE)~03V|ш~%9.یk{QB>-Xz25$rN8S'lZFju+ړN"jy8KHZ]IRhΉq ȉϯtLn`VNGDe0-[7 Dx :H`)/;FŅo?ͮ0s#P:a'T:M.6{RG\ap"a=]"kHp%\W>\zh/StQoҒPޗt̢b[Ohov`vz?h[$,k!*mX;1Ecۻ{X ~c l*"}kvL|tɴh^&@CWGdJg& rSPO-j8 ׽ v[R632c`3@̊q竑jn^NJLڭ'8ߐN35c8,J|ƀsB؞gX{N(Gٷs>LvthK5V vZ_GLuH{evcG.DZla>GO}EswŁ!wIqZ#- Di$ZZfs붽LkvE6\e# ѴkITĄM|NE aڪ?!<ԇ)y!kݲ"&ݨ3c|{G5?+"T(漫UKav6jȰ6Ii2#́h]b$5C/Ǯtd_.A+Hd9T͍g]֝,gwR.0VóYoֶ22o%n:Qӡ0-r0ׅLw3 -pL7Z_EUx m_ɏ &rS_q,xI`}YJm 9I^ Qg.M6`i[LW;rtLjc) >euU72SV]t#72+7ָl ьPwx|u9r+cQrҋ FaaM8;UKD UfS"d2LaΙ<`#%WH,G٫UZmgun'v&Ո"9#Th&d':۽۫Z >N6)VsQJ!]@ArYC&̻6ӊkVۍ8Һ%cLj}eϐO{·$VDJ2(n$/rSi0˧pJP4-v, wy65g>t%YrE]#`EFt"(/AVijD ܰSQk Iۗ9I`aK}sp4@?PyUz<6Gڋaܯ5N̍M "ߘ .Ld 3J{Pw?F׶_HÑ;eŪo&6"55GPIfy1*a:2?`p=RVTˢo M Z&,Wxث-JނU;,;xb'ꟚCݨ=]i>QtP!u"d1ȁtknab{؞VSκ[po_U'RZ[8ڠc(LC?6GZ%׏Gc!w$9D|);cR|؁,fvXQykx7WA(؇ȄD$D0tA;a0ָItՇE9VNPQ;#!>$emϰ4x~6{Ƀ\,WsppJ7h(0lǻ3&)$ZpWhB%EYdd 'e\IxhM(: hDwOoE_HƇYZJ | _j _Џ 8? ]s^dOL:u+dN?E1x 莯@qQsTOt8]?iXM-gbdBͰ%0H޺ UV8LtV7k[` jI1!4r ܨ?݃}o&"WB"SnwDp()9A](YuY'^< ^ȉ[|TkDg$t ȅp>'emΉٷnCh*Hr%_RkȾmv&$*a)y=W"ҘrcfC >ˏ:Ν"(H 7G˰ιEC^m!WgLX RML<$}u;KO9BzRpqȱ4IƿbZ*Uo7w1C>V{% %JʽMмV)O"r_ٍshzTRp]4iH%9Kzd谢}qIQĤT6VJBz]Wvb0|GEGH>o[Qքc9)nl~(鷘 ?jW6?9d]M'6 %L>b+6U%ϣ?4y( wR p>uWYS ILu1qZ73 kj1[Nȃeж]_[A#ʳ/8p3rOc{uShXh(=d:* jU+A}DT/g*F͂ko,.x)y;~T;ZB-ݏoi _ ĖFRK[mԕb7jXh#!M;(2֙;߻SxھH1 G釠(voW`8ɢ>?5DٖSؑhef(KmMu6fp&S4;_^2{K$όX\c\ш E/ %6rK:תȭn-;)"Z׹+( ^7Cb ȓMlUMx!`3n4'dž:xbߺ~*5>`!4AG"/{:hV˱C_ZLi "COXG;LB^]@z&O̩-i5TqDV]~͍/XѹO,+`Wiz@g =M{}Cp%.o2+.sC; 4=LضM}84<޲f|!fp\i] Tr9+vm)38yt4\X rDn?z۲񝆭9Qy$Aqi_JѼa5{jmQåQ%h(^BQ,.Y{ KYu=gqu}oȾI_@2PeW;8 $+P~\f]dW%PmEl4 }{j&HT)m-m\4U| L5E@FI͊B+|9ā2A9/6& 6tY]&7cm{j6,%z6y6} _ySLYe-fq`y<Ѯ8") Z'lY0~O.xHƬǟ3^4LZ}ʞ5@Ȟ`q|N 1Ff8XFF,I!c.r ?5^-Ry3M~1{ͩ ={9h^8υ@PQb+e($ӭ&K!P%-w+?E c#Ë_sW ha]ewI*Dge|Y4yX5p#ٕ޷e`4&gIb$hG-٫ʻWcYJ7 `BPmMn6|UY:e+t(\м;La ,kKlrWt?rMȭb C3.֩u~$o^ xe+`v#EŒDCm߹DᩃNb#Ėd3CbnX8Q=֏.q-H34n;g|E󙥆~ d,1X*|hܮ3"fpq#0zd}Mnq'~s=g^y늷d5w뾧Gc(o;c_{lO|yRgb8NceO 3%s'ױ?zxo3]`07 80@aOIfγAdEIfIDq@BG? ƥ6^ږ>B[|2~gUq/Nڭ/LHBey\?Ka$"b+UfM\؄bMO羖~nN.IG'},/##9i1|x]2;y8dSzVȻ|Yٹ{'9eQ_x 63E;$(@„PIǿ{l ?<7?vG7]P*YEcDb*m12"Xqx|MHWtk7_REn?d9\^k8ZSRj AkqZhm_6dA{"]6יDvS& K,(!f'S W mBG'9Ƨ݃2WC@n^$BIM"1kĬL=S#w.N2(-9N ݕbnMAl{EEF4 lUJ9vzgNJyv,<TgAS'4!UjNw6Mt]9jͬ w#FrO/=_D@K&(8ü]-I'ak a HKT eO#EILѤhmws E%U-G*C=ٸ?<#xgR,Q"y.2'RM=/N1qf&Hpq6k7_ڟ!˜coBͽ .S_:k sH{ևąِWė1NבdINh{zW'lFN*zÞ9uF3d=/lvC3%okh5EDf:D _L@0/ylV5S6G-bp!k#-{۸ s7 Jm AQ|ɱZ*,aN)b-%浹# _kYGjni{n.(8iW~PAye%J#4h6~q]d}>T_[N^=\d_"R *NUuttN&_:$35Dq~TRz7&.#:4kbDm0[WWT:cpeOy5RPæ%OLޛ7|Xg(ξs% czDZMw%UK_;͎x#M-(e 7tٴT`.2$6N78!Dl1VE:fJ>u2"lN zDjTU)wZJ ڦǢr:8D,{:ϝipLBq0)#w=C?A' ىMDrGIEհ H䇓&u~v9 D|ݪ'N/D'k+BmC#ܠ<lͬV%c =ޥ&)X[P] *ǹiFԷT.B&hiBzC1tGD~Ra쀣u )N$fAIjZ^94dq&w贒, 2 M\\XƜ;/ZglEdl4T8ed+ Z49cTЊ[E0{a:]V`O!uMK8}z޳c9GN%.Iow8Cv~ @׼!Y@ 0+RG83Srw:op|D7$. 7D;v^*_ :2BBgSs2Bm! r\'g/޸H 8lSJ2xwj٥2 %G"48yHC-\bWv[z6>A䮼&N'r Sj5UeXd/,0PSqFwx"g1$^p|(؎@+T1ka6(;[/>+%bSW70H[ +X/1kY!DPiςcNUd_kua=j3%"ilip7lU @PDE1 znk Ni@6*O+"-R[@T`$R/"`56r@hXj竷+~Gwa;F.Ӣ0?$O?5#`OG9]}+!b? sWփV0n0lS@Nv6Z۹:}X 9gxE6GEƿ%I d;χh T&C8f?ws 㼎qUUl( O:O 9iQE 4 #ӆiSG=Gm'O/V0@Ǩ ~Q&|Oaj1xؽs+ ( gmGEroe{DzӚ $"{X\FU z eZΡT&,fhςE&Oҍ bD[tlY% E(Xs$ eES|fkh8JGK@f+MYcWe"p;31ۜ{zE5PՎ+kԃjmpfmlW7&·AI]iߟ I2090AL ۷2#Qلq'~"NY^-ܑ"~ :ߝP(*0az HE5ٱС)dNN4-M'j#j_llb$C!:*\O~,2Dә? e@D|[rp;ʁM+-]!sR*ӎ`9L@/[U4]p[xtWJp( q|Zυ ChMέC]m"$lHB\~Qr.h dN̾Qng f:ԋ6ѽRȾ cRf70x649wLs&Ib+z Ö~+.iiP_4̡YskdnR[ ®|5մD ?( l7|;RYڃľq-<`.6kWCG#E@*XDj7:kvǢ9Kx4Q A@q<QGWʵ0<*P٤릦9T6c"1 d2 t{b܏/ꩡZCͧ$+/)w;tl&Pf-2ƍ<ޓe kk꾂8^]3YtεRnlA=[_U4;RMS)[H8ٌyVUTlv-ҽ+v˔Gp[,lmnXuҤ.n]?)t"GPEР QRJS/yOhas>)`(OpV+5A҈/nG%t9{1oZoP̞% vM&BFa Uf89#V5^@9 D'F1,a}Aߗ4qQ_q:5?i=_I8jcް}s/m.<~?p 0Q/Vd"'zRJ3_,jNLb;PU'|aP:nDG3]UlTo69y .Z01 ˢ Mdg +dTv ^w_Cg}%#f$W+,Qʓ~Αodmhy5ʫq1HFO<$HdQb~QmcX~N.Eaا1@Rv<1zb`I8C=!%r;EnJZpr+^(ۯ gOe1uVRW.@Sr3qr^w5< qJV~~܏Y?.z~55ցfClBƆũU| q ;%X+,K@Xj&jaXo}N ޫ51_Ϛ}|3}͜u!,`|=z[٩#MdT"=o/fFor`=)ʿt_[WoJ*Hff/O(Lql53Ҫu5?ȩ"Dր#/B 3i WJ8n+e1-5l㏕~1r[2KUQ(cn0=AN yaΩXUDo&@L:ϝEmTJk/T}["6g]1,0RcMdQXü-ri3Rs9J2f+!PؼD#4:n; -$:7$zc|۳msY9'_v ,䁻Jnsy2Dh#R3 QW{|V6dxarbzRH$Ni'Fba}ȩ-j^Jj M dY{ȫChr[.[N0틪gvqSq7&fyʙ!6b z41M^HnWޖnuN 穋z "35ظ%%v_PS/w29nBtJS-Vͧ[FYOō>}у#ȪwJFƭY^q"ZZumR[0Z="FTc]98ޝd0lHNcox0#Qȴސ$muk73Qˁtz_f}Cq_ `aw} JV8H:W4߾[zZR [7P)QhTJ+l!Owh4,#v:*Mա.`~! I B߶hr'bH.09u1P."%ENI{rl @7$ɘ) C*-ޝiojZ]MO o 4[}}A4<\cw'ꃸ7=]!'N}\,#|39 dUu#7`5*+>37Q,:A5|/qrb94M!IAO@:O(  \O!1$ɨhtM4)<0>gB[jWAaߴ/jN";& ߍQ@YZkernlab/data/reuters.rda0000644000175100001440000003773113562451345015023 0ustar hornikusers}ێ$GՆ A@B@V޽h.V{]ErfSɸTu6$ z՛~Ao_cYY0Z ;9Ūsss3o&_|ŗ_|˯_)v]F~Ͽ'Zի̊rӴnդ/id,M6]fM:u]:Sji-yy0>ΎRq:7TPqY]:~^_fy3>$OܷiaE.jV7ChkHonWlVD4ƕAh.]Vl b 6E|vg 4VΡTe'➹i5fHg8-Ffosy&x[g3D^LC t͉l۪j7|ʇn]A{EX%pW@ ˨?K4ogN7oN7]H&g"APgԼl'Qᒿf3iqn9zp zm-zj3Q89UY)ϨXݥ_7NR-4n02%I_L!{r`7E8\j_YW̅8V">lKW&L6)mKn#0o._ q㟞m6r뼂mI^' o󸋻c;rlJef#Ko569ꢬJDa',H$<'@%ǃֈS qBOb[3lm\e9(oZeū6L+"bgBNLnB) H/:]xf@`Xʙ#&9TO:je[ޥ@d3Q"^Zҕt[x!s _q% 0L'KC:X'Oss/ask`|ʖUf\6.[p^gFO#0򲫮8X8}pFVQMfkʇ-!g[5;@ƌ(yg^Wfp%AX N& %A»8Vc*]jS'i9-{'-.ɦ`%[q5̰'Gګ cC8rH%,6C,Br3sj5]eqab˟Dʪ>2KRO?{:4rK^1;yמ#~oo+)10a[vPNbwԨέ?!kDD~b]q;rGfڭKnY/;Y4_E:APX!yK|S*@'4B]AB gN ~[.:|h)ЊoWÌ q]5,hD4զj-\yGC:YR_ѕP'Pfs^U{m#(ɀC# J†V7p9w" ?l; 8$zц߬ c^B8^f/фCD7 ĭkA.+SK$jp B%jL|^=?{D5􇮘L@uγ Tx)̔5َ[_M&73OBU,X>%@#\N}A_E(\ ^؈ y UU&EKFzb03J;hV| A 4hhpS(rJ[w¢jE¼UZ>G^_fZTF^x'F!iʀd|Cacۑ)y-[xxz9O }hU5xϾtKl`*%X$̑t!FivMBWZFz}μِtos ޵'|¬kz%.3IeHݪ¹TK9x8_%Δxk'nd>U@5Sf;gs[Sn;qf3oPo+b6n lTq#vU9|ު,Pb#=7pL;d\&0{cXڐ /4>MPs dX;Wįf2A pEټU1)7j?Ӽk }!Rʽ JɌbXe$3עŦCx`$/dԋtH#*܍VKmgX b8XM:+(A%sRi‹F~=\~9!Ʉ_*(HK?Be&"J{ эϰ DDZN9qQ@HJ7HÎ, jmH]C g';z/.Vt)lgՒtt>u6z%!~Xz5cL-X:pr< keL&Q4N@"}]E5E]-z?}]Qs"oijDRf28_-L}=R>X 0hz KXZ N[b7qZ*y23_yg0UC37B@}i5rg \v׿6rqiʆt  Q:+U,*{MN j lv-veQ}pTEN2q\>ژov ;qp[3 V]ε6[)JI;˸BD't[mD@lD\Ъ-ip 1Q){K#{x\D-GBvJœ$8(2ZLkmYvďŝ4˳Wǜ69IE s'Z6a?Pg-D*UU6 ?/VBF1> FNw&.kw7rްVЕRK雇 S+1M-oϑ*!3^y,`GBh Db25J Q5x ?x|$ܑ90"a:յCVQb%b^xm A䕪MLVF .F!|Y8 ;QDl_G~=*7$D?CQ4`5UW,a/T6 _G)$b+=x. _8vB!*'{m13X򦡏=-/W2e!"rN~MsI_3 ^@$__[>^Fy/d ~j9!O"hLJO$1>GLJt%Mp^/2m޳:w5UϜƳNCbDh%:ҢDv`t*k|CMKy2>.leޣڣmBI>g ]8F_-[#XwQ_MCHqeJlk[B!*=/GIT")O/|ΗYQTz"r Y!k0><<8p=7J$k'7'2*+V]3Ȉ&DO~+@101'h c' 8ȼK龊f7}gH˘NTu!߹o_~ܝ|gܒw{.w_:H,VEHÎE tSɑVN;<=wyw7 P<=œ]l3 @EUmMy'wAwFTyޱTةA>u)>]üx|#ܤp#〡B0X7(f{i ҋ1tdG-_X5c@ķELWRS^U1l\V,y[AF6 >Sfo.;^𿟇bL*Tď;?yУv0ӥ /[Gk O^_yyz[e.6_值EϾz>}Օkd&h_a=~|#Ɠ!9`l 3Sք{ci@I5N9'bA۟4~jB鳯/4dpTD'e%ACCj'&2HJ W<  Ew(p_ԙSӓ'{x6&yp"l2gWڇJ8զIG"Ri*Ԉi[$MQ ]GȦP~ղ2Iz`NO Y[p`}v4f?^]|E`wX΅s}}&OŹ 6< ]QWu~Gb!O&k_yv^袚蝉fMӪP(dJx!{HV} g|:R XVW7k;x)4EoTGF^&krW>xQx;{ݳ\n'==æV|x_,T7S?ѲmE\Y-ۜ/Js8x!<:8J5NDu>6CPݙgfbܔO<W`,5q_c/\*C%OlmeENޑ邻D4&&ܼ }x$W$q $}C zPV֡;>fe`\:sGA {oKIRvfAn23TPթ6'sgyV͋)7[; 1Dl1b?fmG6L*W#qߠ!b.g|*nӻ?AwVVDA(=z/]^Qk(GhtO/_矽Z D@m#^2[#q!Tc _˿>բ/j3h\y糞y0Z,kE[md/ Uxo_{WiY"b 6ӊ,<LbH+$TeXO a:Cg2#Xl;Hes.7a+%`,-6'9@ ?3Ϛaӗ?\_{˼t1^AɈxk0o $||֚&mPM^ˋkE xl$}p,Zj7OTMOW,1ڼ+B )H!)ث'F T2#l;'q(1,V@M`BҦB'lloP U${,>J,M Жǜyd] 둴!OvfB\l]ͼ \Yo :> {T2YeM<*TakdnIz0Ssy/U d8@\Oiߖ);1W?ַN0gHllvp1/"!ʋR=OᙨBaίG%JL( "'G[ΞڿWWn悧F3Ŕ%XOZ~uh/;DIyz/[BD}xVgխM`EU7D'4Y/9>l޽84Bׁ .tPa"26-ӵe Q4V5Y4hS0U&|=²G9eE.}, ++8~XLxD"ZiR"(c-ނ9n6;&jW i[ [汿E;b^_Ӯl%c&v%(0)|~l@(O(g4p4t5Q)gm /k(eHv?.,FF<2Y7Ow7EV׺šeVT}bF G"Cfb7{HNd%ڹuL}H3 lcu5JŀܿPsn!B^Ԑ9PgBNJPht) p ]U≊0)L឵1Y_: ZA߻%VJ%z,:3JHy̠0(QP;1b/5R6WE0չX2*kWԋ [u#q +bF:cڲ[Lj?2bM? ڈeMK%[zj/m9YIZ=vB ŤH4vaFb 7~Jlvl2O:sivM;4|5x_*5 lTˋ&,zY֐r{g|@ 0,cgEu (#8<q(:Z$ކ.ƺBZNw8&bREgMB)A멅i݉6Nܻjûu^6VGtP `Ӱj= 5V[-殦m}CKncBwgn{ߚ^v__=പECy3&hT0F}]e}^Y2JH6 $*aV ɇ zjaVZbxD]1MIΰRj#= >F)_+C [v>EV#LU}\GQf7ycGY7,Ѹ@^v5X;Vc \cHV֪9 Q;h`|:cf|{00B.p-pw[W Q5f]oyN[ ޕDM>NECE!-Bq^MMoAkkN:5UA Gpk"];pZxt_h^^;elG!HE>=-KvnBn&LfSNn(OF!PEދ5]-;-$oHoO4''"m6ݑNR.s~ߞ"tAEG@3CMlnE,~6e[^"OLN CD".tEe~hpsԣcoi ~NIS,_C-r/2PgClP2MwdSׅO:j%-Ml;ij(Z QOFA譆 >5zGygN.׎NՔOF6CxR,V[`s2 EVQ,.` T]g5LY*њ}M][&К*#NCתG$W2fh !7KY.ᰋ 7I&ڨi{sMGQCaRư1.̻{SHfz#$'}>i'DAtJiܒ6^o>n>! 3$SjwXe9Ъ=I΍'FGn Q\W6t#@/StGx[-$Z)`nI}Q~Zo4o$5P=#PjXIr>CGB鷏=ל |?.5y(iqB<$Cj==zra֏>ƧǏ}p]GW>;I3|׽$joh>բq7|nכt3Jde b h85`jm6.t|`Թeծ\jѷĝ~"E6*e:C+}=er6u٧,LPY}'O^W>>lf?iL+C'_c| _IȵD4PY"o<ѩZ鐼&pƁpy5s7m+6E61l[>(kGllm'M,ly8AU[S4_1Ԟ22N xtjpwew{Ү[LFXlvIeH$FF+㍉hQ[-  < ۮZm=LySvnx;HvhϬꙂh0ku2xIx؄czLzs]eOONOi%rR:X<4Vt+Z$[\Z6 "2F+}p3J}24  9aٍofMմcb3ӵ]۾Laq;ͦeĴYf$2LZ( /r{ڳ 5k!ڷrg:?DZKH >c;^٫Yy @qt] uD@kl^*陮SofGJle ½ŰI?*k}z" ?Pj.Ooke*`fM@x7zg-nVBAoǜ!B9υˍN2Į0=Par[ݷN֊ _UxډlML;#}}Go\slgX\WH#wB.n7EZ:jO\ha?҉,E=^D/&L|dp?3]N]f vy`Q1&0=W{",-orrld"lO ?BХ&=ķ'o- ^};m7G5"BᩉB@T6XVnhźk;y-bpv^9^ekB3b"8Gϗf=ˏ躑6rСTD%{їATJs{iYqҋe9ho߽n$N߼yL~c=<%:_:|Oq4V.q"1 ۤfձポ^A ɶX֣b}FB9 qQwqig@ӾO9fQ"6T$_)jATn_3CcAQ CStbxwXO1k(@L~l>a]6.+'̉8fma}w *@{X+M# Hc㯨=˖L@ VI? " \/{e* %.5tG=ղ#وt!yWF񩏟pQUwA185X0G#@wHːiY2ϫ]9EG?[_l+f\ ]yi?b@G78;tgߌbyIy(d%,%Zq_0-Wk,8{ꚿٺOu]J~~}`3G Y+ߕbvvvmÿ S$m;F7<=z?(?~hgeg}Ш$?׍h?[gֻvh k6H?|(#lQ2ĊgbpcAvF+ie1{1BX^!:4$K7XL?/._ /^CDyWJKTA/"B([[d\F*:с漊CͷYL5烘kȾJʬ^}xy+;QBYWAK/*%GǫL&b4ؼ'ODl Bv"<#1TFG{xڗR@H]WDLqq{!XVd`>ߝYvtYj\3hI E^(Q0;9?T6$r"K{Heva^lh R+0OjB/|w.mb#LL'Q@-Rg6 _KFH Kc:AO= ІhRXH!ӵQC\N1uѵJM0sSC(wgE3N#ۓn9[Ӣur%{僚 C:i+|:Jv0~፫Yc]GÙtVb3n~=ѱP(0w>f[ZF[Oߕi A|߽> F[>Tl5;leax?[P9m>$nҠ"*bn U蒹h`v{hmJ6Eg~"˦or?j WLBO=/TH,h1-UFw#J\X{Q!Ed{>mSmwf 1Å3d$a>N878\ٿiq:Yۮqx>ѱtONoXǰN~|9]ubt ٛk40yiQK:KJoxܶ3ٕV- Dί@F2JcjS=mMih^`qf٨s, ,TSlK(n|`{gr;)r}QğB<y0.V#oDw$lSv{ v?L$gp s&a[aZ+s:›sˑMzn%,og2QE5Rl3S?TGޒSm'&q܆ƾ+-n(֟_/8nFUgHLD Pc='Fd_Ãn(8~?}2JMU8$F =(P`4;7o9 , P/3n&<C1ux ǜ&>LѭVD_3ϯ??_qZ7~U6~ ַdZdRx. 7kernlab/data/spirals.rda0000644000175100001440000001115413562451346014777 0ustar hornikusers]xi8n dl!)eP[JQI%)PJ E$Dَ`XfA sϹs]>ױ67!!>#,!,[ĥޞn^gݽ-=0kJvaNeod~ ۇ~)Vì1ɶˠ#+;Ct'mOcկ$eKб-4!N:%Wȫ08tQU>M '3*aw)8_Ys# iK4/SΜιl;*t׏9U5+&]H{oh3?35z9@,"e0)ŏSr+7s :a9gDѬ[GMeXBZudv2q'7>8'nWEVGCiPeV%/ [@q}=K>p"[a%?\^蝄q>; ,{!/nB@*_A h#qYasZ𙍱k}/8^5>R\aOcg1Ld_V5ŤlWscv(f?myJ>3Ͽqa8߳d+ˣE_ -O2XGۈuŧ4X$ydnvm$ͼ(lIl^$BR,A: n޺diw.Ylp5Ƽ܏.욢^!·mOVTOƾKNE0FZv~BC΅B7Vޯ-8b@%#+xWN!1f9Vk([R̥d"H+0d6 6+̐vQC&`ɑP}6T%s]:zx(d,6${3n W<[=<@Iy <3^]ϾnƂ7]VBuc(D HVvWU{/菙j,,z `z~ CQnJ!=箾8K # Iz yk?0kKUT|qX= csʿڌw: j8G Xz }~W;# `9Zh݃ώD]%0)&&pa"=&7?(Z;= KeYI=%uNڥj+sd̑}v!8vg\6 ʌĝ;ߨSdWqx>52oX ͎*2gzFIguG`"f0!2NG_:i gb(2#;Rk $юBPͳׁr'z2jٚJ`p7H"[lC[IL K7)d!{XT们+ٌ Қ Pt(j~źw- Ph>ޝ5O#q E8 3#\J{lb2*'PeVh{*}+kΎAkxެid# 0E??yϴ@p_E]ͣ&J!ݙ3@*NX<\|i&C# ofp0^5z%*HX~')sKK5,?K<2JGwh-V :-Lюv7ګGq@սR;ؠvGMRK㽁to&:MAܦE쫴鹫p X,E9eijB?ys0j4/hG늊! Q^MfӁzX&u(cdAӡCoZ/&_jϧT^y1['M ^t d B[ }{v% ԫ ='3UaXѱ}Py_t!IәZW.tke'dU5"o&|+vb=1&+4> {HYnyo)3> W^ fԺ)]aд,"L`jhZDfF3Ka 7 n]f]X_3 19խ *ɤ58h"^Tg֡mېtyZ? xA8XP+<>d: sERVj d_"t; OdUNDq琼Jod|smFA8f+*Om__) q|#7]v4oņߤ{5_8*#~a ީ!Vح<;n_J"˓84ᜬui1׆HۼLD#Q.ث'U8SmY+9iEu&2K9VUcfؽE^a yQ_Ϛ fCuؖ d`5*|HawmRأg󅦌#޿ٟ=$ZXVt1u _iؗakB{3PtߖYJyGi-O%fݕ2?ۑzθvҸYTO@Q'^'P/j!v!b\Vd:"'P嬌4^?Ѻqq2h.dDS-Yy6]y$QKۈ5!0~3ɟ!,U.&-]X6e   WYDcdrdL \Z,{1Rj𶱻 yrJŁk+&Z]8k¬x܄ą,U5/ .GVɠc`x=,=?cB$~~uj ̊Ҁx);ޫ5 ]M2t?<4D]-vka-P M =*0(k[ӁS\b6o&kd֏a+LaRPID[`[Vsk-`&&0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the accuracy rate for classification and the Mean Squared Error for regression} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: \code{TRUE})} \item{\dots}{additional parameters for the low level fitting function} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} } \value{ An S4 object of class \code{"ksvm"} containing the fitted model, Accessor functions can be used to access the slots of the object (see examples) which include: \item{alpha}{The resulting support vectors, (alpha vector) (possibly scaled).} \item{alphaindex}{The index of the resulting support vectors in the data matrix. Note that this index refers to the pre-processed data (after the possible effect of \code{na.omit} and \code{subset})} \item{coef}{The corresponding coefficients times the training labels.} \item{b}{The negative intercept.} \item{nSV}{The number of Support Vectors} \item{obj}{The value of the objective function. In case of one-against-one classification this is a vector of values} \item{error}{Training error} \item{cross}{Cross validation error, (when cross > 0)} \item{prob.model}{Contains the width of the Laplacian fitted on the residuals in case of regression, or the parameters of the sigmoid fitted on the decision values in case of classification.} } \details{ \code{ksvm} uses John Platt's SMO algorithm for solving the SVM QP problem an most SVM formulations. On the \code{spoc-svc}, \code{kbb-svc}, \code{C-bsvc} and \code{eps-bsvr} formulations a chunking algorithm based on the TRON QP solver is used. \cr For multiclass-classification with \eqn{k} classes, \eqn{k > 2}, \code{ksvm} uses the `one-against-one'-approach, in which \eqn{k(k-1)/2} binary classifiers are trained; the appropriate class is found by a voting scheme, The \code{spoc-svc} and the \code{kbb-svc} formulations deal with the multiclass-classification problems by solving a single quadratic problem involving all the classes.\cr If the predictor variables include factors, the formula interface must be used to get a correct model matrix. \cr In classification when \code{prob.model} is \code{TRUE} a 3-fold cross validation is performed on the data and a sigmoid function is fitted on the resulting decision values \eqn{f}. The data can be passed to the \code{ksvm} function in a \code{matrix} or a \code{data.frame}, in addition \code{ksvm} also supports input in the form of a kernel matrix of class \code{kernelMatrix} or as a list of character vectors where a string kernel has to be used.\cr The \code{plot} function for binary classification \code{ksvm} objects displays a contour plot of the decision values with the corresponding support vectors highlighted.\cr The predict function can return class probabilities for classification problems by setting the \code{type} parameter to "probabilities". \cr The problem of model selection is partially addressed by an empirical observation for the RBF kernels (Gaussian , Laplace) where the optimal values of the \eqn{sigma} width parameter are shown to lie in between the 0.1 and 0.9 quantile of the \eqn{\|x- x'\|} statistics. When using an RBF kernel and setting \code{kpar} to "automatic", \code{ksvm} uses the \code{sigest} function to estimate the quantiles and uses the median of the values. } \note{Data is scaled internally by default, usually yielding better results.} \references{ \itemize{ \item Chang Chih-Chung, Lin Chih-Jen\cr \emph{LIBSVM: a library for Support Vector Machines}\cr \url{http://www.csie.ntu.edu.tw/~cjlin/libsvm} \item Chih-Wei Hsu, Chih-Jen Lin\cr \emph{BSVM} \url{http://www.csie.ntu.edu.tw/~cjlin/bsvm/} \item J. Platt\cr \emph{Probabilistic outputs for support vector machines and comparison to regularized likelihood methods} \cr Advances in Large Margin Classifiers, A. Smola, P. Bartlett, B. Schoelkopf and D. Schuurmans, Eds. Cambridge, MA: MIT Press, 2000.\cr \url{http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.41.1639} \item H.-T. Lin, C.-J. Lin and R. C. Weng\cr \emph{A note on Platt's probabilistic outputs for support vector machines}\cr \url{http://www.csie.ntu.edu.tw/~htlin/paper/doc/plattprob.pdf} \item C.-W. Hsu and C.-J. Lin \cr \emph{A comparison on methods for multi-class support vector machines}\cr IEEE Transactions on Neural Networks, 13(2002) 415-425.\cr \url{http://www.csie.ntu.edu.tw/~cjlin/papers/multisvm.ps.gz} \item K. Crammer, Y. Singer\cr \emph{On the learnability and design of output codes for multiclass prolems}\cr Computational Learning Theory, 35-46, 2000.\cr \url{http://webee.technion.ac.il/people/koby/publications/ecoc-mlj02.pdf} \item J. Weston, C. Watkins\cr \emph{Multi-class support vector machines} \cr In M. Verleysen, Proceedings of ESANN99 Brussels, 1999\cr \url{http://citeseer.ist.psu.edu/8884.html} } } \author{ Alexandros Karatzoglou (SMO optimizers in C++ by Chih-Chung Chang & Chih-Jen Lin)\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{\code{\link{predict.ksvm}}, \code{\link{ksvm-class}}, \code{\link{couple}} } \keyword{methods} \keyword{regression} \keyword{nonlinear} \keyword{classif} \keyword{neural} \examples{ ## simple example using the spam data set data(spam) ## create test and training set index <- sample(1:dim(spam)[1]) spamtrain <- spam[index[1:floor(dim(spam)[1]/2)], ] spamtest <- spam[index[((ceiling(dim(spam)[1]/2)) + 1):dim(spam)[1]], ] ## train a support vector machine filter <- ksvm(type~.,data=spamtrain,kernel="rbfdot", kpar=list(sigma=0.05),C=5,cross=3) filter ## predict mail type on the test set mailtype <- predict(filter,spamtest[,-58]) ## Check results table(mailtype,spamtest[,58]) ## Another example with the famous iris data data(iris) ## Create a kernel function using the build in rbfdot function rbf <- rbfdot(sigma=0.1) rbf ## train a bound constraint support vector machine irismodel <- ksvm(Species~.,data=iris,type="C-bsvc", kernel=rbf,C=10,prob.model=TRUE) irismodel ## get fitted values fitted(irismodel) ## Test on the training set with probabilities as output predict(irismodel, iris[,-5], type="probabilities") ## Demo of the plot function x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) ### Use kernelMatrix K <- as.kernelMatrix(crossprod(t(x))) svp2 <- ksvm(K, y, type="C-svc") svp2 # test data xtest <- rbind(matrix(rnorm(20),,2),matrix(rnorm(20,mean=3),,2)) # test kernel matrix i.e. inner/kernel product of test data with # Support Vectors Ktest <- as.kernelMatrix(crossprod(t(xtest),t(x[SVindex(svp2), ]))) predict(svp2, Ktest) #### Use custom kernel k <- function(x,y) {(sum(x*y) +1)*exp(-0.001*sum((x-y)^2))} class(k) <- "kernel" data(promotergene) ## train svm using custom kernel gene <- ksvm(Class~.,data=promotergene[c(1:20, 80:100),],kernel=k, C=5,cross=5) gene #### Use text with string kernels data(reuters) is(reuters) tsv <- ksvm(reuters,rlabels,kernel="stringdot", kpar=list(length=5),cross=3,C=10) tsv ## regression # create data x <- seq(-20,20,0.1) y <- sin(x)/x + rnorm(401,sd=0.03) # train support vector machine regm <- ksvm(x,y,epsilon=0.01,kpar=list(sigma=16),cross=3) plot(x,y,type="l") lines(x,predict(regm,x),col="red") } kernlab/man/inchol-class.Rd0000644000175100001440000000315211304023134015317 0ustar hornikusers\name{inchol-class} \docType{class} \alias{inchol-class} \alias{diagresidues} \alias{maxresiduals} \alias{pivots} \alias{diagresidues,inchol-method} \alias{maxresiduals,inchol-method} \alias{pivots,inchol-method} \title{Class "inchol" } \description{The reduced Cholesky decomposition object} \section{Objects from the Class}{Objects can be created by calls of the form \code{new("inchol", ...)}. or by calling the \code{inchol} function.} \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"matrix"} contains the decomposed matrix} \item{\code{pivots}:}{Object of class \code{"vector"} contains the pivots performed} \item{\code{diagresidues}:}{Object of class \code{"vector"} contains the diagonial residues} \item{\code{maxresiduals}:}{Object of class \code{"vector"} contains the maximum residues} } } \section{Extends}{ Class \code{"matrix"}, directly. } \section{Methods}{ \describe{ \item{diagresidues}{\code{signature(object = "inchol")}: returns the diagonial residues} \item{maxresiduals}{\code{signature(object = "inchol")}: returns the maximum residues} \item{pivots}{\code{signature(object = "inchol")}: returns the pivots performed} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{inchol}}, \code{\link{csi-class}}, \code{\link{csi}}} \examples{ data(iris) datamatrix <- as.matrix(iris[,-5]) # initialize kernel function rbf <- rbfdot(sigma=0.1) rbf Z <- inchol(datamatrix,kernel=rbf) dim(Z) pivots(Z) diagresidues(Z) maxresiduals(Z) } \keyword{classes} kernlab/man/csi.Rd0000644000175100001440000001231012560414652013530 0ustar hornikusers\name{csi} \docType{methods} \alias{csi} \alias{csi-methods} \alias{csi,matrix-method} \title{Cholesky decomposition with Side Information} \description{ The \code{csi} function in \pkg{kernlab} is an implementation of an incomplete Cholesky decomposition algorithm which exploits side information (e.g., classification labels, regression responses) to compute a low rank decomposition of a kernel matrix from the data. } \usage{ \S4method{csi}{matrix}(x, y, kernel="rbfdot", kpar=list(sigma=0.1), rank, centering = TRUE, kappa = 0.99 ,delta = 40 ,tol = 1e-5) } \arguments{ \item{x}{The data matrix indexed by row} \item{y}{the classification labels or regression responses. In classification y is a \eqn{m \times n} matrix where \eqn{m} the number of data and \eqn{n} the number of classes \eqn{y} and \eqn{y_i} is 1 if the corresponding x belongs to class i.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class \code{kernel}, which computes the inner product in feature space between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well. } \item{rank}{maximal rank of the computed kernel matrix} \item{centering}{if \code{TRUE} centering is performed (default: TRUE)} \item{kappa}{trade-off between approximation of K and prediction of Y (default: 0.99)} \item{delta}{number of columns of cholesky performed in advance (default: 40)} \item{tol}{minimum gain at each iteration (default: 1e-4)} } \details{An incomplete cholesky decomposition calculates \eqn{Z} where \eqn{K= ZZ'} \eqn{K} being the kernel matrix. Since the rank of a kernel matrix is usually low, \eqn{Z} tends to be smaller then the complete kernel matrix. The decomposed matrix can be used to create memory efficient kernel-based algorithms without the need to compute and store a complete kernel matrix in memory. \cr \code{csi} uses the class labels, or regression responses to compute a more appropriate approximation for the problem at hand considering the additional information from the response variable. } \value{ An S4 object of class "csi" which is an extension of the class "matrix". The object is the decomposed kernel matrix along with the slots : \item{pivots}{Indices on which pivots where done} \item{diagresidues}{Residuals left on the diagonal} \item{maxresiduals}{Residuals picked for pivoting} \item{predgain}{predicted gain before adding each column} \item{truegain}{actual gain after adding each column} \item{Q}{QR decomposition of the kernel matrix} \item{R}{QR decomposition of the kernel matrix} slots can be accessed either by \code{object@slot} or by accessor functions with the same name (e.g., \code{pivots(object))}} \references{ Francis R. Bach, Michael I. Jordan\cr \emph{Predictive low-rank decomposition for kernel methods.}\cr Proceedings of the Twenty-second International Conference on Machine Learning (ICML) 2005\cr \url{http://www.di.ens.fr/~fbach/bach_jordan_csi.pdf} } \author{Alexandros Karatzoglou (based on Matlab code by Francis Bach)\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{inchol}}, \code{\link{chol}}, \code{\link{csi-class}}} \examples{ data(iris) ## create multidimensional y matrix yind <- t(matrix(1:3,3,150)) ymat <- matrix(0, 150, 3) ymat[yind==as.integer(iris[,5])] <- 1 datamatrix <- as.matrix(iris[,-5]) # initialize kernel function rbf <- rbfdot(sigma=0.1) rbf Z <- csi(datamatrix,ymat, kernel=rbf, rank = 30) dim(Z) pivots(Z) # calculate kernel matrix K <- crossprod(t(Z)) # difference between approximated and real kernel matrix (K - kernelMatrix(kernel=rbf, datamatrix))[6,] } \keyword{methods} \keyword{algebra} \keyword{array} kernlab/man/kcca-class.Rd0000644000175100001440000000345511304023134014752 0ustar hornikusers\name{kcca-class} \docType{class} \alias{kcca-class} \alias{kcor} \alias{xcoef} \alias{ycoef} %%\alias{yvar} %%\alias{xvar} \alias{kcor,kcca-method} \alias{xcoef,kcca-method} \alias{xvar,kcca-method} \alias{ycoef,kcca-method} \alias{yvar,kcca-method} \title{Class "kcca"} \description{The "kcca" class } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("kcca", ...)}. or by the calling the \code{kcca} function. } \section{Slots}{ \describe{ \item{\code{kcor}:}{Object of class \code{"vector"} describing the correlations} \item{\code{xcoef}:}{Object of class \code{"matrix"} estimated coefficients for the \code{x} variables} \item{\code{ycoef}:}{Object of class \code{"matrix"} estimated coefficients for the \code{y} variables } %% \item{\code{xvar}:}{Object of class \code{"matrix"} holds the %% canonical variates for \code{x}} %% \item{\code{yvar}:}{Object of class \code{"matrix"} holds the %% canonical variates for \code{y}} } } \section{Methods}{ \describe{ \item{kcor}{\code{signature(object = "kcca")}: returns the correlations} \item{xcoef}{\code{signature(object = "kcca")}: returns the estimated coefficients for the \code{x} variables} \item{ycoef}{\code{signature(object = "kcca")}: returns the estimated coefficients for the \code{y} variables } %% \item{xvar}{\code{signature(object = "kcca")}: returns the canonical %% variates for \code{x}} %% \item{yvar}{\code{signature(object = "kcca")}: returns the canonical %% variates for \code{y}} } } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kcca}}, \code{\link{kpca-class}} } \examples{ ## dummy data x <- matrix(rnorm(30),15) y <- matrix(rnorm(30),15) kcca(x,y,ncomps=2) } \keyword{classes} kernlab/man/spirals.Rd0000644000175100001440000000054311304023134014416 0ustar hornikusers\name{spirals} \alias{spirals} \title{Spirals Dataset} \description{A toy data set representing two spirals with Gaussian noise. The data was created with the \code{mlbench.spirals} function in \code{mlbench}. } \usage{data(spirals)} \format{ A matrix with 300 observations and 2 variables. } \examples{ data(spirals) plot(spirals) } \keyword{datasets} kernlab/man/musk.Rd0000644000175100001440000000257011304023134013722 0ustar hornikusers\name{musk} \alias{musk} \docType{data} \title{Musk data set} \description{ This dataset describes a set of 92 molecules of which 47 are judged by human experts to be musks and the remaining 45 molecules are judged to be non-musks. } \usage{data(musk)} \format{ A data frame with 476 observations on the following 167 variables. Variables 1-162 are "distance features" along rays. The distances are measured in hundredths of Angstroms. The distances may be negative or positive, since they are actually measured relative to an origin placed along each ray. The origin was defined by a "consensus musk" surface that is no longer used. Hence, any experiments with the data should treat these feature values as lying on an arbitrary continuous scale. In particular, the algorithm should not make any use of the zero point or the sign of each feature value. Variable 163 is the distance of the oxygen atom in the molecule to a designated point in 3-space. This is also called OXY-DIS. Variable 164 is the X-displacement from the designated point. Variable 165 is the Y-displacement from the designated point. Variable 166 is the Z-displacement from the designated point. Class: 0 for non-musk, and 1 for musk } \source{ UCI Machine Learning data repository \cr } \examples{ data(musk) muskm <- ksvm(Class~.,data=musk,kernel="rbfdot",C=1000) muskm } \keyword{datasets} kernlab/man/stringdot.Rd0000644000175100001440000000631111304023134014755 0ustar hornikusers\name{stringdot} \alias{stringdot} \title{String Kernel Functions} \description{ String kernels. } \usage{ stringdot(length = 4, lambda = 1.1, type = "spectrum", normalized = TRUE) } \arguments{ \item{length}{The length of the substrings considered} \item{lambda}{The decay factor} \item{type}{Type of string kernel, currently the following kernels are supported : \cr \code{spectrum} the kernel considers only matching substring of exactly length \eqn{n} (also know as string kernel). Each such matching substring is given a constant weight. The length parameter in this kernel has to be \eqn{length > 1}.\cr \code{boundrange} this kernel (also known as boundrange) considers only matching substrings of length less than or equal to a given number N. This type of string kernel requires a length parameter \eqn{length > 1}\cr \code{constant} The kernel considers all matching substrings and assigns constant weight (e.g. 1) to each of them. This \code{constant} kernel does not require any additional parameter.\cr \code{exponential} Exponential Decay kernel where the substring weight decays as the matching substring gets longer. The kernel requires a decay factor \eqn{ \lambda > 1}\cr \code{string} essentially identical to the spectrum kernel, only computed using a more conventional way.\cr \code{fullstring} essentially identical to the boundrange kernel only computed in a more conventional way. \cr } \item{normalized}{normalize string kernel values, (default: \code{TRUE})} } \details{ The kernel generating functions are used to initialize a kernel function which calculates the dot (inner) product between two feature vectors in a Hilbert Space. These functions or their function generating names can be passed as a \code{kernel} argument on almost all functions in \pkg{kernlab}(e.g., \code{ksvm}, \code{kpca} etc.). The string kernels calculate similarities between two strings (e.g. texts or sequences) by matching the common substring in the strings. Different types of string kernel exists and are mainly distinguished by how the matching is performed i.e. some string kernels count the exact matchings of \eqn{n} characters (spectrum kernel) between the strings, others allow gaps (mismatch kernel) etc. } \value{ Returns an S4 object of class \code{stringkernel} which extents the \code{function} class. The resulting function implements the given kernel calculating the inner (dot) product between two character vectors. \item{kpar}{a list containing the kernel parameters (hyperparameters) used.} The kernel parameters can be accessed by the \code{kpar} function. } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \note{ The \code{spectrum} and \code{boundrange} kernel are faster and more efficient implementations of the \code{string} and \code{fullstring} kernels which will be still included in \code{kernlab} for the next two versions. } \seealso{ \code{\link{dots} }, \code{\link{kernelMatrix} }, \code{\link{kernelMult}}, \code{\link{kernelPol}}} \examples{ sk <- stringdot(type="string", length=5) sk } \keyword{symbolmath} kernlab/man/kmmd-class.Rd0000644000175100001440000000415311304023134014775 0ustar hornikusers\name{kmmd-class} \docType{class} \alias{kmmd-class} \alias{kernelf,kmmd-method} \alias{H0,kmmd-method} \alias{AsympH0,kmmd-method} \alias{Radbound,kmmd-method} \alias{Asymbound,kmmd-method} \alias{mmdstats,kmmd-method} \title{Class "kqr"} \description{The Kernel Maximum Mean Discrepancy object class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("kmmd", ...)}. or by calling the \code{kmmd} function } \section{Slots}{ \describe{ \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used} \item{\code{xmatrix}:}{Object of class \code{"kernelMatrix"} containing the data used } \item{H0}{Object of class \code{"logical"} contains value of : is H0 rejected (logical)} \item{\code{AsympH0}}{Object of class \code{"logical"} contains value : is H0 rejected according to the asymptotic bound (logical)} \item{\code{mmdstats}}{Object of class \code{"vector"} contains the test statistics (vector of two)} \item{\code{Radbound}}{Object of class \code{"numeric"} contains the Rademacher bound} \item{\code{Asymbound}}{Object of class \code{"numeric"} contains the asymptotic bound} } } \section{Methods}{ \describe{ \item{kernelf}{\code{signature(object = "kmmd")}: returns the kernel function used} \item{H0}{\code{signature(object = "kmmd")}: returns the value of H0 being rejected} \item{AsympH0}{\code{signature(object = "kmmd")}: returns the value of H0 being rejected according to the asymptotic bound} \item{mmdstats}{\code{signature(object = "kmmd")}: returns the values of the mmd statistics} \item{Radbound}{\code{signature(object = "kmmd")}: returns the value of the Rademacher bound} \item{Asymbound}{\code{signature(object = "kmmd")}: returns the value of the asymptotic bound} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kmmd}}, } \examples{ # create data x <- matrix(runif(300),100) y <- matrix(runif(300)+1,100) mmdo <- kmmd(x, y) H0(mmdo) } \keyword{classes} kernlab/man/kpca-class.Rd0000644000175100001440000000455712117363140015002 0ustar hornikusers\name{kpca-class} \docType{class} \alias{kpca-class} \alias{rotated} \alias{eig,kpca-method} \alias{kcall,kpca-method} \alias{kernelf,kpca-method} \alias{pcv,kpca-method} \alias{rotated,kpca-method} \alias{xmatrix,kpca-method} \title{Class "kpca"} \description{ The Kernel Principal Components Analysis class} \section{Objects of class "kpca"}{ Objects can be created by calls of the form \code{new("kpca", ...)}. or by calling the \code{kpca} function. } \section{Slots}{ \describe{ \item{\code{pcv}:}{Object of class \code{"matrix"} containing the principal component vectors } \item{\code{eig}:}{Object of class \code{"vector"} containing the corresponding eigenvalues} \item{\code{rotated}:}{Object of class \code{"matrix"} containing the projection of the data on the principal components} \item{\code{kernelf}:}{Object of class \code{"function"} containing the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel parameters used } \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the data matrix used } \item{\code{kcall}:}{Object of class \code{"ANY"} containing the function call } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed on NA } } } \section{Methods}{ \describe{ \item{eig}{\code{signature(object = "kpca")}: returns the eigenvalues } \item{kcall}{\code{signature(object = "kpca")}: returns the performed call} \item{kernelf}{\code{signature(object = "kpca")}: returns the used kernel function} \item{pcv}{\code{signature(object = "kpca")}: returns the principal component vectors } \item{predict}{\code{signature(object = "kpca")}: embeds new data } \item{rotated}{\code{signature(object = "kpca")}: returns the projected data} \item{xmatrix}{\code{signature(object = "kpca")}: returns the used data matrix } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{ksvm-class}}, \code{\link{kcca-class}} } \examples{ # another example using the iris data(iris) test <- sample(1:50,20) kpc <- kpca(~.,data=iris[-test,-5],kernel="rbfdot", kpar=list(sigma=0.2),features=2) #print the principal component vectors pcv(kpc) rotated(kpc) kernelf(kpc) eig(kpc) } \keyword{classes} kernlab/man/ipop.Rd0000644000175100001440000000531712560414652013732 0ustar hornikusers\name{ipop} \alias{ipop} \alias{ipop,ANY,matrix-method} \title{Quadratic Programming Solver} \description{ ipop solves the quadratic programming problem :\cr \eqn{\min(c'*x + 1/2 * x' * H * x)}\cr subject to: \cr \eqn{b <= A * x <= b + r}\cr \eqn{l <= x <= u} } \usage{ ipop(c, H, A, b, l, u, r, sigf = 7, maxiter = 40, margin = 0.05, bound = 10, verb = 0) } \arguments{ \item{c}{Vector or one column matrix appearing in the quadratic function} \item{H}{square matrix appearing in the quadratic function, or the decomposed form \eqn{Z} of the \eqn{H} matrix where \eqn{Z} is a \eqn{n x m} matrix with \eqn{n > m} and \eqn{ZZ' = H}.} \item{A}{Matrix defining the constrains under which we minimize the quadratic function} \item{b}{Vector or one column matrix defining the constrains} \item{l}{Lower bound vector or one column matrix} \item{u}{Upper bound vector or one column matrix} \item{r}{Vector or one column matrix defining constrains} \item{sigf}{Precision (default: 7 significant figures)} \item{maxiter}{Maximum number of iterations} \item{margin}{how close we get to the constrains} \item{bound}{Clipping bound for the variables} \item{verb}{Display convergence information during runtime} } \details{ ipop uses an interior point method to solve the quadratic programming problem. \cr The \eqn{H} matrix can also be provided in the decomposed form \eqn{Z} where \eqn{ZZ' = H} in that case the Sherman Morrison Woodbury formula is used internally. } \value{ An S4 object with the following slots \item{primal}{Vector containing the primal solution of the quadratic problem} \item{dual}{The dual solution of the problem} \item{how}{Character string describing the type of convergence} all slots can be accessed through accessor functions (see example) } \references{ R. J. Vanderbei\cr \emph{LOQO: An interior point code for quadratic programming}\cr Optimization Methods and Software 11, 451-484, 1999 \cr \url{http://www.princeton.edu/~rvdb/ps/loqo5.pdf} } \author{Alexandros Karatzoglou (based on Matlab code by Alex Smola) \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{solve.QP}, \code{\link{inchol}}, \code{\link{csi}}} \examples{ ## solve the Support Vector Machine optimization problem data(spam) ## sample a scaled part (500 points) of the spam data set m <- 500 set <- sample(1:dim(spam)[1],m) x <- scale(as.matrix(spam[,-58]))[set,] y <- as.integer(spam[set,58]) y[y==2] <- -1 ##set C parameter and kernel C <- 5 rbf <- rbfdot(sigma = 0.1) ## create H matrix etc. H <- kernelPol(rbf,x,,y) c <- matrix(rep(-1,m)) A <- t(y) b <- 0 l <- matrix(rep(0,m)) u <- matrix(rep(C,m)) r <- 0 sv <- ipop(c,H,A,b,l,u,r) sv dual(sv) } \keyword{optimize} kernlab/man/rvm.Rd0000644000175100001440000001565412117366150013571 0ustar hornikusers\name{rvm} \alias{rvm} \alias{rvm-methods} \alias{rvm,formula-method} \alias{rvm,list-method} \alias{rvm,vector-method} \alias{rvm,kernelMatrix-method} \alias{rvm,matrix-method} \alias{show,rvm-method} \alias{predict,rvm-method} \alias{coef,rvm-method} \title{Relevance Vector Machine} \description{ The Relevance Vector Machine is a Bayesian model for regression and classification of identical functional form to the support vector machine. The \code{rvm} function currently supports only regression. } \usage{ \S4method{rvm}{formula}(x, data=NULL, ..., subset, na.action = na.omit) \S4method{rvm}{vector}(x, ...) \S4method{rvm}{matrix}(x, y, type="regression", kernel="rbfdot", kpar="automatic", alpha= ncol(as.matrix(x)), var=0.1, var.fix=FALSE, iterations=100, verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ... , subset, na.action = na.omit) \S4method{rvm}{list}(x, y, type = "regression", kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), alpha = 5, var = 0.1, var.fix = FALSE, iterations = 100, verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ..., subset, na.action = na.omit) } \arguments{ \item{x}{a symbolic description of the model to be fit. When not using a formula x can be a matrix or vector containing the training data or a kernel matrix of class \code{kernelMatrix} of the training data or a list of character vectors (for use with the string kernel). Note, that the intercept is always excluded, whether given in the formula or not.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `rvm' is called from.} \item{y}{a response vector with one label for each row/component of \code{x}. Can be either a factor (for classification tasks) or a numeric vector (for regression).} \item{type}{\code{rvm} can only be used for regression at the moment.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel "Gaussian" \item \code{polydot} Polynomial kernel \item \code{vanilladot} Linear kernel \item \code{tanhdot} Hyperbolic tangent kernel \item \code{laplacedot} Laplacian kernel \item \code{besseldot} Bessel kernel \item \code{anovadot} ANOVA RBF kernel \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well. In the case of a Radial Basis kernel function (Gaussian) kpar can also be set to the string "automatic" which uses the heuristics in \code{\link{sigest}} to calculate a good \code{sigma} value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic").} \item{alpha}{The initial alpha vector. Can be either a vector of length equal to the number of data points or a single number.} \item{var}{the initial noise variance} \item{var.fix}{Keep noise variance fix during iterations (default: FALSE)} \item{iterations}{Number of iterations allowed (default: 100)} \item{tol}{tolerance of termination criterion} \item{minmaxdiff}{termination criteria. Stop when max difference is equal to this parameter (default:1e-3) } \item{verbosity}{print information on algorithm convergence (default = FALSE)} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: TRUE)} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the Mean Squared Error for regression} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{The Relevance Vector Machine typically leads to sparser models then the SVM. It also performs better in many cases (specially in regression). } \value{ An S4 object of class "rvm" containing the fitted model. Accessor functions can be used to access the slots of the object which include : \item{alpha}{The resulting relevance vectors} \item{alphaindex}{ The index of the resulting relevance vectors in the data matrix} \item{nRV}{Number of relevance vectors} \item{RVindex}{The indexes of the relevance vectors} \item{error}{Training error (if \code{fit = TRUE})} ... } \references{ Tipping, M. E.\cr \emph{Sparse Bayesian learning and the relevance vector machine}\cr Journal of Machine Learning Research 1, 211-244\cr \url{http://www.jmlr.org/papers/volume1/tipping01a/tipping01a.pdf} } \author{ Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{ksvm}}} \examples{ # create data x <- seq(-20,20,0.1) y <- sin(x)/x + rnorm(401,sd=0.05) # train relevance vector machine foo <- rvm(x, y) foo # print relevance vectors alpha(foo) RVindex(foo) # predict and plot ytest <- predict(foo, x) plot(x, y, type ="l") lines(x, ytest, col="red") } \keyword{regression} \keyword{nonlinear} kernlab/man/specc.Rd0000644000175100001440000001420712560414652014056 0ustar hornikusers\name{specc} \alias{specc} \alias{specc,matrix-method} \alias{specc,formula-method} \alias{specc,list-method} \alias{specc,kernelMatrix-method} \alias{show,specc-method} \title{Spectral Clustering} \description{ A spectral clustering algorithm. Clustering is performed by embedding the data into the subspace of the eigenvectors of an affinity matrix. } \usage{ \S4method{specc}{formula}(x, data = NULL, na.action = na.omit, ...) \S4method{specc}{matrix}(x, centers, kernel = "rbfdot", kpar = "automatic", nystrom.red = FALSE, nystrom.sample = dim(x)[1]/6, iterations = 200, mod.sample = 0.75, na.action = na.omit, ...) \S4method{specc}{kernelMatrix}(x, centers, nystrom.red = FALSE, iterations = 200, ...) \S4method{specc}{list}(x, centers, kernel = "stringdot", kpar = list(length=4, lambda=0.5), nystrom.red = FALSE, nystrom.sample = length(x)/6, iterations = 200, mod.sample = 0.75, na.action = na.omit, ...) } \arguments{ \item{x}{the matrix of data to be clustered, or a symbolic description of the model to be fit, or a kernel Matrix of class \code{kernelMatrix}, or a list of character vectors.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `specc' is called from.} \item{centers}{Either the number of clusters or a set of initial cluster centers. If the first, a random set of rows in the eigenvectors matrix are chosen as the initial centers.} \item{kernel}{the kernel function used in computing the affinity matrix. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{a character string or the list of hyper-parameters (kernel parameters). The default character string \code{"automatic"} uses a heuristic to determine a suitable value for the width parameter of the RBF kernel. The second option \code{"local"} (local scaling) uses a more advanced heuristic and sets a width parameter for every point in the data set. This is particularly useful when the data incorporates multiple scales. A list can also be used containing the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{nystrom.red}{use nystrom method to calculate eigenvectors. When \code{TRUE} a sample of the dataset is used to calculate the eigenvalues, thus only a \eqn{n x m} matrix where \eqn{n} the sample size is stored in memory (default: \code{FALSE}} \item{nystrom.sample}{number of data points to use for estimating the eigenvalues when using the nystrom method. (default : dim(x)[1]/6)} \item{mod.sample}{proportion of data to use when estimating sigma (default: 0.75)} \item{iterations}{the maximum number of iterations allowed. } \item{na.action}{the action to perform on NA} \item{\dots}{additional parameters} } \details{ Spectral clustering works by embedding the data points of the partitioning problem into the subspace of the \eqn{k} largest eigenvectors of a normalized affinity/kernel matrix. Using a simple clustering method like \code{kmeans} on the embedded points usually leads to good performance. It can be shown that spectral clustering methods boil down to graph partitioning.\cr The data can be passed to the \code{specc} function in a \code{matrix} or a \code{data.frame}, in addition \code{specc} also supports input in the form of a kernel matrix of class \code{kernelMatrix} or as a list of character vectors where a string kernel has to be used.} \value{ An S4 object of class \code{specc} which extends the class \code{vector} containing integers indicating the cluster to which each point is allocated. The following slots contain useful information \item{centers}{A matrix of cluster centers.} \item{size}{The number of point in each cluster} \item{withinss}{The within-cluster sum of squares for each cluster} \item{kernelf}{The kernel function used} } \references{ Andrew Y. Ng, Michael I. Jordan, Yair Weiss\cr \emph{On Spectral Clustering: Analysis and an Algorithm}\cr Neural Information Processing Symposium 2001\cr \url{http://papers.nips.cc/paper/2092-on-spectral-clustering-analysis-and-an-algorithm.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{\code{\link{kkmeans}}, \code{\link{kpca}}, \code{\link{kcca}} } \examples{ ## Cluster the spirals data set. data(spirals) sc <- specc(spirals, centers=2) sc centers(sc) size(sc) withinss(sc) plot(spirals, col=sc) } \keyword{cluster} kernlab/man/kpca.Rd0000644000175100001440000001207412560414652013677 0ustar hornikusers\name{kpca} \alias{kpca} \alias{kpca,formula-method} \alias{kpca,matrix-method} \alias{kpca,kernelMatrix-method} \alias{kpca,list-method} \alias{predict,kpca-method} \title{Kernel Principal Components Analysis} \description{ Kernel Principal Components Analysis is a nonlinear form of principal component analysis.} \usage{ \S4method{kpca}{formula}(x, data = NULL, na.action, ...) \S4method{kpca}{matrix}(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 0, th = 1e-4, na.action = na.omit, ...) \S4method{kpca}{kernelMatrix}(x, features = 0, th = 1e-4, ...) \S4method{kpca}{list}(x, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), features = 0, th = 1e-4, na.action = na.omit, ...) } \arguments{ \item{x}{the data matrix indexed by row or a formula describing the model, or a kernel Matrix of class \code{kernelMatrix}, or a list of character vectors} \item{data}{an optional data frame containing the variables in the model (when using a formula).} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{features}{Number of features (principal components) to return. (default: 0 , all)} \item{th}{the value of the eigenvalue under which principal components are ignored (only valid when features = 0). (default : 0.0001) } \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{Using kernel functions one can efficiently compute principal components in high-dimensional feature spaces, related to input space by some non-linear map.\cr The data can be passed to the \code{kpca} function in a \code{matrix} or a \code{data.frame}, in addition \code{kpca} also supports input in the form of a kernel matrix of class \code{kernelMatrix} or as a list of character vectors where a string kernel has to be used. } \value{ An S4 object containing the principal component vectors along with the corresponding eigenvalues. \item{pcv}{a matrix containing the principal component vectors (column wise)} \item{eig}{The corresponding eigenvalues} \item{rotated}{The original data projected (rotated) on the principal components} \item{xmatrix}{The original data matrix} all the slots of the object can be accessed by accessor functions. } \note{The predict function can be used to embed new data on the new space} \references{ Schoelkopf B., A. Smola, K.-R. Mueller :\cr \emph{Nonlinear component analysis as a kernel eigenvalue problem}\cr Neural Computation 10, 1299-1319\cr \url{http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.29.1366} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{kcca}}, \code{pca}} \examples{ # another example using the iris data(iris) test <- sample(1:150,20) kpc <- kpca(~.,data=iris[-test,-5],kernel="rbfdot", kpar=list(sigma=0.2),features=2) #print the principal component vectors pcv(kpc) #plot the data projection on the components plot(rotated(kpc),col=as.integer(iris[-test,5]), xlab="1st Principal Component",ylab="2nd Principal Component") #embed remaining points emb <- predict(kpc,iris[test,-5]) points(emb,col=as.integer(iris[test,5])) } \keyword{cluster} kernlab/man/specc-class.Rd0000644000175100001440000000315311304023134015141 0ustar hornikusers\name{specc-class} \docType{class} \alias{specc-class} \alias{centers} \alias{size} \alias{withinss} \alias{centers,specc-method} \alias{withinss,specc-method} \alias{size,specc-method} \alias{kernelf,specc-method} \title{Class "specc"} \description{ The Spectral Clustering Class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("specc", ...)}. or by calling the function \code{specc}. } \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"vector"} containing the cluster assignments} \item{\code{centers}:}{Object of class \code{"matrix"} containing the cluster centers} \item{\code{size}:}{Object of class \code{"vector"} containing the number of points in each cluster} \item{\code{withinss}:}{Object of class \code{"vector"} containing the within-cluster sum of squares for each cluster} \item{\code{kernelf}}{Object of class \code{kernel} containing the used kernel function.} } } \section{Methods}{ \describe{ \item{centers}{\code{signature(object = "specc")}: returns the cluster centers} \item{withinss}{\code{signature(object = "specc")}: returns the within-cluster sum of squares for each cluster} \item{size}{\code{signature(object = "specc")}: returns the number of points in each cluster } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{specc}}, \code{\link{kpca-class}} } \examples{ ## Cluster the spirals data set. data(spirals) sc <- specc(spirals, centers=2) centers(sc) size(sc) } \keyword{classes} kernlab/man/inlearn.Rd0000644000175100001440000000600712117362575014414 0ustar hornikusers\name{inlearn} \alias{inlearn} \alias{inlearn,numeric-method} \title{Onlearn object initialization} \description{ Online Kernel Algorithm object \code{onlearn} initialization function. } \usage{ \S4method{inlearn}{numeric}(d, kernel = "rbfdot", kpar = list(sigma = 0.1), type = "novelty", buffersize = 1000) } \arguments{ \item{d}{the dimensionality of the data to be learned} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the \code{kpar} parameter as well.} \item{type}{the type of problem to be learned by the online algorithm : \code{classification}, \code{regression}, \code{novelty}} \item{buffersize}{the size of the buffer to be used} } \details{ The \code{inlearn} is used to initialize a blank \code{onlearn} object. } \value{ The function returns an \code{S4} object of class \code{onlearn} that can be used by the \code{onlearn} function. } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{onlearn}}, \code{\link{onlearn-class}} } \examples{ ## create toy data set x <- rbind(matrix(rnorm(100),,2),matrix(rnorm(100)+3,,2)) y <- matrix(c(rep(1,50),rep(-1,50)),,1) ## initialize onlearn object on <- inlearn(2, kernel = "rbfdot", kpar = list(sigma = 0.2), type = "classification") ## learn one data point at the time for(i in sample(1:100,100)) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) } \keyword{classif} \keyword{neural} \keyword{regression} \keyword{ts} kernlab/man/kernelMatrix.Rd0000644000175100001440000001254111304023134015407 0ustar hornikusers\name{kernelMatrix} \alias{kernelMatrix} \alias{kernelMult} \alias{kernelPol} \alias{kernelFast} \alias{kernelPol,kernel-method} \alias{kernelMatrix,kernel-method} \alias{kernelMult,kernel-method} \alias{kernelFast,kernel-method} \alias{kernelMatrix,rbfkernel-method} \alias{kernelMatrix,polykernel-method} \alias{kernelMatrix,vanillakernel-method} \alias{kernelMatrix,tanhkernel-method} \alias{kernelMatrix,laplacekernel-method} \alias{kernelMatrix,anovakernel-method} \alias{kernelMatrix,splinekernel-method} \alias{kernelMatrix,besselkernel-method} \alias{kernelMatrix,stringkernel-method} \alias{kernelMult,rbfkernel,ANY-method} \alias{kernelMult,splinekernel,ANY-method} \alias{kernelMult,polykernel,ANY-method} \alias{kernelMult,tanhkernel,ANY-method} \alias{kernelMult,laplacekernel,ANY-method} \alias{kernelMult,besselkernel,ANY-method} \alias{kernelMult,anovakernel,ANY-method} \alias{kernelMult,vanillakernel,ANY-method} \alias{kernelMult,character,kernelMatrix-method} \alias{kernelMult,stringkernel,ANY-method} \alias{kernelPol,rbfkernel-method} \alias{kernelPol,splinekernel-method} \alias{kernelPol,polykernel-method} \alias{kernelPol,tanhkernel-method} \alias{kernelPol,vanillakernel-method} \alias{kernelPol,anovakernel-method} \alias{kernelPol,besselkernel-method} \alias{kernelPol,laplacekernel-method} \alias{kernelPol,stringkernel-method} \alias{kernelFast,rbfkernel-method} \alias{kernelFast,splinekernel-method} \alias{kernelFast,polykernel-method} \alias{kernelFast,tanhkernel-method} \alias{kernelFast,vanillakernel-method} \alias{kernelFast,anovakernel-method} \alias{kernelFast,besselkernel-method} \alias{kernelFast,laplacekernel-method} \alias{kernelFast,stringkernel-method} \alias{kernelFast,splinekernel-method} \title{Kernel Matrix functions} \description{ \code{kernelMatrix} calculates the kernel matrix \eqn{K_{ij} = k(x_i,x_j)} or \eqn{K_{ij} = k(x_i,y_j)}.\cr \code{kernelPol} computes the quadratic kernel expression \eqn{H = z_i z_j k(x_i,x_j)}, \eqn{H = z_i k_j k(x_i,y_j)}.\cr \code{kernelMult} calculates the kernel expansion \eqn{f(x_i) = \sum_{i=1}^m z_i k(x_i,x_j)}\cr \code{kernelFast} computes the kernel matrix, identical to \code{kernelMatrix}, except that it also requires the squared norm of the first argument as additional input, useful in iterative kernel matrix calculations. } \usage{ \S4method{kernelMatrix}{kernel}(kernel, x, y = NULL) \S4method{kernelPol}{kernel}(kernel, x, y = NULL, z, k = NULL) \S4method{kernelMult}{kernel}(kernel, x, y = NULL, z, blocksize = 256) \S4method{kernelFast}{kernel}(kernel, x, y, a) } \arguments{ \item{kernel}{the kernel function to be used to calculate the kernel matrix. This has to be a function of class \code{kernel}, i.e. which can be generated either one of the build in kernel generating functions (e.g., \code{rbfdot} etc.) or a user defined function of class \code{kernel} taking two vector arguments and returning a scalar.} \item{x}{a data matrix to be used to calculate the kernel matrix, or a list of vector when a \code{stringkernel} is used} \item{y}{second data matrix to calculate the kernel matrix, or a list of vector when a \code{stringkernel} is used} \item{z}{a suitable vector or matrix} \item{k}{a suitable vector or matrix} \item{a}{the squared norm of \code{x}, e.g., \code{rowSums(x^2)}} \item{blocksize}{the kernel expansion computations are done block wise to avoid storing the kernel matrix into memory. \code{blocksize} defines the size of the computational blocks.} } \details{ Common functions used during kernel based computations.\cr The \code{kernel} parameter can be set to any function, of class kernel, which computes the inner product in feature space between two vector arguments. \pkg{kernlab} provides the most popular kernel functions which can be initialized by using the following functions: \itemize{ \item \code{rbfdot} Radial Basis kernel function \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} the Spline kernel } (see example.) \code{kernelFast} is mainly used in situations where columns of the kernel matrix are computed per invocation. In these cases, evaluating the norm of each row-entry over and over again would cause significant computational overhead. } \value{ \code{kernelMatrix} returns a symmetric diagonal semi-definite matrix.\cr \code{kernelPol} returns a matrix.\cr \code{kernelMult} usually returns a one-column matrix. } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{rbfdot}}, \code{\link{polydot}}, \code{\link{tanhdot}}, \code{\link{vanilladot}}} \examples{ ## use the spam data data(spam) dt <- as.matrix(spam[c(10:20,3000:3010),-58]) ## initialize kernel function rbf <- rbfdot(sigma = 0.05) rbf ## calculate kernel matrix kernelMatrix(rbf, dt) yt <- as.matrix(as.integer(spam[c(10:20,3000:3010),58])) yt[yt==2] <- -1 ## calculate the quadratic kernel expression kernelPol(rbf, dt, ,yt) ## calculate the kernel expansion kernelMult(rbf, dt, ,yt) } \keyword{algebra} \keyword{array} kernlab/man/onlearn.Rd0000644000175100001440000000467612560414652014430 0ustar hornikusers\name{onlearn} \alias{onlearn} \alias{onlearn,onlearn-method} \title{Kernel Online Learning algorithms} \description{ Online Kernel-based Learning algorithms for classification, novelty detection, and regression. } \usage{ \S4method{onlearn}{onlearn}(obj, x, y = NULL, nu = 0.2, lambda = 1e-04) } \arguments{ \item{obj}{\code{obj} an object of class \code{onlearn} created by the initialization function \code{inlearn} containing the kernel to be used during learning and the parameters of the learned model} \item{x}{vector or matrix containing the data. Factors have to be numerically coded. If \code{x} is a matrix the code is run internally one sample at the time.} \item{y}{the class label in case of classification. Only binary classification is supported and class labels have to be -1 or +1. } \item{nu}{the parameter similarly to the \code{nu} parameter in SVM bounds the training error.} \item{lambda}{the learning rate} } \details{ The online algorithms are based on a simple stochastic gradient descent method in feature space. The state of the algorithm is stored in an object of class \code{onlearn} and has to be passed to the function at each iteration. } \value{ The function returns an \code{S4} object of class \code{onlearn} containing the model parameters and the last fitted value which can be retrieved by the accessor method \code{fit}. The value returned in the classification and novelty detection problem is the decision function value phi. The accessor methods \code{alpha} returns the model parameters. } \references{ Kivinen J. Smola A.J. Williamson R.C. \cr \emph{Online Learning with Kernels}\cr IEEE Transactions on Signal Processing vol. 52, Issue 8, 2004\cr \url{http://users.cecs.anu.edu.au/~williams/papers/P172.pdf}} \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{inlearn}}} \examples{ ## create toy data set x <- rbind(matrix(rnorm(100),,2),matrix(rnorm(100)+3,,2)) y <- matrix(c(rep(1,50),rep(-1,50)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2), type="classification") ind <- sample(1:100,100) ## learn one data point at the time for(i in ind) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) ## or learn all the data on <- onlearn(on,x[ind,],y[ind],nu=0.03,lambda=0.1) sign(predict(on,x)) } \keyword{classif} \keyword{neural} \keyword{regression} \keyword{ts} kernlab/man/kha-class.Rd0000644000175100001440000000450312117362716014626 0ustar hornikusers\name{kha-class} \docType{class} \alias{kha-class} \alias{eig,kha-method} \alias{kcall,kha-method} \alias{kernelf,kha-method} \alias{pcv,kha-method} \alias{xmatrix,kha-method} \alias{eskm,kha-method} \title{Class "kha"} \description{ The Kernel Hebbian Algorithm class} \section{Objects objects of class "kha"}{ Objects can be created by calls of the form \code{new("kha", ...)}. or by calling the \code{kha} function. } \section{Slots}{ \describe{ \item{\code{pcv}:}{Object of class \code{"matrix"} containing the principal component vectors } \item{\code{eig}:}{Object of class \code{"vector"} containing the corresponding normalization values} \item{\code{eskm}:}{Object of class \code{"vector"} containing the kernel sum} \item{\code{kernelf}:}{Object of class \code{"kfunction"} containing the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel parameters used } \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the data matrix used } \item{\code{kcall}:}{Object of class \code{"ANY"} containing the function call } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed on NA } } } \section{Methods}{ \describe{ \item{eig}{\code{signature(object = "kha")}: returns the normalization values } \item{kcall}{\code{signature(object = "kha")}: returns the performed call} \item{kernelf}{\code{signature(object = "kha")}: returns the used kernel function} \item{pcv}{\code{signature(object = "kha")}: returns the principal component vectors } \item{eskm}{\code{signature(object = "kha")}: returns the kernel sum} \item{predict}{\code{signature(object = "kha")}: embeds new data } \item{xmatrix}{\code{signature(object = "kha")}: returns the used data matrix } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kha}}, \code{\link{ksvm-class}}, \code{\link{kcca-class}} } \examples{ # another example using the iris data(iris) test <- sample(1:50,20) kpc <- kha(~.,data=iris[-test,-5], kernel="rbfdot", kpar=list(sigma=0.2),features=2, eta=0.001, maxiter=65) #print the principal component vectors pcv(kpc) kernelf(kpc) eig(kpc) } \keyword{classes} kernlab/man/predict.gausspr.Rd0000644000175100001440000000416612117365151016076 0ustar hornikusers\name{predict.gausspr} \alias{predict.gausspr} \alias{predict,gausspr-method} \title{predict method for Gaussian Processes object} \description{Prediction of test data using Gaussian Processes} \usage{ \S4method{predict}{gausspr}(object, newdata, type = "response", coupler = "minpair") } \arguments{ \item{object}{an S4 object of class \code{gausspr} created by the \code{gausspr} function} \item{newdata}{a data frame or matrix containing new data} \item{type}{one of \code{response}, \code{probabilities} indicating the type of output: predicted values or matrix of class probabilities} \item{coupler}{Coupling method used in the multiclass case, can be one of \code{minpair} or \code{pkpd} (see reference for more details).} } \value{ \item{response}{predicted classes (the classes with majority vote) or the response value in regression.} \item{probabilities}{matrix of class probabilities (one column for each class and one row for each input).} } \references{ \itemize{ \item C. K. I. Williams and D. Barber \cr Bayesian classification with Gaussian processes. \cr IEEE Transactions on Pattern Analysis and Machine Intelligence, 20(12):1342-1351, 1998\cr \url{http://www.dai.ed.ac.uk/homes/ckiw/postscript/pami_final.ps.gz} \item T.F. Wu, C.J. Lin, R.C. Weng. \cr \emph{Probability estimates for Multi-class Classification by Pairwise Coupling}\cr \url{http://www.csie.ntu.edu.tw/~cjlin/papers/svmprob/svmprob.pdf} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \keyword{methods} \keyword{regression} \keyword{classif} \examples{ ## example using the promotergene data set data(promotergene) ## create test and training set ind <- sample(1:dim(promotergene)[1],20) genetrain <- promotergene[-ind, ] genetest <- promotergene[ind, ] ## train a support vector machine gene <- gausspr(Class~.,data=genetrain,kernel="rbfdot", kpar=list(sigma=0.015)) gene ## predict gene type probabilities on the test set genetype <- predict(gene,genetest,type="probabilities") genetype } kernlab/man/kmmd.Rd0000644000175100001440000001223512560414652013710 0ustar hornikusers\name{kmmd} \alias{kmmd} \alias{kmmd,matrix-method} \alias{kmmd,list-method} \alias{kmmd,kernelMatrix-method} \alias{show,kmmd-method} \alias{H0} \alias{Asymbound} \alias{Radbound} \alias{mmdstats} \alias{AsympH0} \title{Kernel Maximum Mean Discrepancy.} \description{The Kernel Maximum Mean Discrepancy \code{kmmd} performs a non-parametric distribution test.} \usage{ \S4method{kmmd}{matrix}(x, y, kernel="rbfdot",kpar="automatic", alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 150, frac = 1, ...) \S4method{kmmd}{kernelMatrix}(x, y, Kxy, alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 100, frac = 1, ...) \S4method{kmmd}{list}(x, y, kernel="stringdot", kpar = list(type = "spectrum", length = 4), alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 150, frac = 1, ...) } \arguments{ \item{x}{data values, in a \code{matrix}, \code{list}, or \code{kernelMatrix}} \item{y}{data values, in a \code{matrix}, \code{list}, or \code{kernelMatrix}} \item{Kxy}{\code{kernlMatrix} between \eqn{x} and \eqn{y} values (only for the kernelMatrix interface)} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. \code{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{lenght, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the \code{kpar} parameter as well. In the case of a Radial Basis kernel function (Gaussian) kpar can also be set to the string "automatic" which uses the heuristics in 'sigest' to calculate a good 'sigma' value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic"). } \item{alpha}{the confidence level of the test (default: 0.05)} \item{asymptotic}{calculate the bounds asymptotically (suitable for smaller datasets) (default: FALSE)} \item{replace}{use replace when sampling for computing the asymptotic bounds (default : TRUE)} \item{ntimes}{number of times repeating the sampling procedure (default : 150)} \item{frac}{fraction of points to sample (frac : 1) } \item{\dots}{additional parameters.} } \details{\code{kmmd} calculates the kernel maximum mean discrepancy for samples from two distributions and conducts a test as to whether the samples are from different distributions with level \code{alpha}. } \value{ An S4 object of class \code{kmmd} containing the results of whether the H0 hypothesis is rejected or not. H0 being that the samples \eqn{x} and \eqn{y} come from the same distribution. The object contains the following slots : \item{\code{H0}}{is H0 rejected (logical)} \item{\code{AsympH0}}{is H0 rejected according to the asymptotic bound (logical)} \item{\code{kernelf}}{the kernel function used.} \item{\code{mmdstats}}{the test statistics (vector of two)} \item{\code{Radbound}}{the Rademacher bound} \item{\code{Asymbound}}{the asymptotic bound} see \code{kmmd-class} for more details. } \references{Gretton, A., K. Borgwardt, M. Rasch, B. Schoelkopf and A. Smola\cr \emph{A Kernel Method for the Two-Sample-Problem}\cr Neural Information Processing Systems 2006, Vancouver \cr \url{http://papers.nips.cc/paper/3110-a-kernel-method-for-the-two-sample-problem.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{ksvm}} \examples{ # create data x <- matrix(runif(300),100) y <- matrix(runif(300)+1,100) mmdo <- kmmd(x, y) mmdo } \keyword{htest} \keyword{nonlinear} \keyword{nonparametric} kernlab/man/income.Rd0000644000175100001440000000370611304023134014217 0ustar hornikusers\name{income} \alias{income} \title{Income Data} \description{ Customer Income Data from a marketing survey. } \usage{data(income)} \format{ A data frame with 14 categorical variables (8993 observations). Explanation of the variable names: \tabular{rllll}{ \tab 1 \tab \code{INCOME} \tab annual income of household \tab \cr \tab \tab \tab (Personal income if single) \tab ordinal\cr \tab 2 \tab \code{SEX} \tab sex \tab nominal\cr \tab 3 \tab \code{MARITAL.STATUS} \tab marital status \tab nominal\cr \tab 4 \tab \code{AGE} \tab age \tab ordinal\cr \tab 5 \tab \code{EDUCATION} \tab educational grade \tab ordinal\cr \tab 6 \tab \code{OCCUPATION} \tab type of work \tab nominal \cr \tab 7 \tab \code{AREA} \tab how long the interviewed person has lived\tab \cr \tab \tab \tab in the San Francisco/Oakland/San Jose area \tab ordinal\cr \tab 8 \tab \code{DUAL.INCOMES} \tab dual incomes (if married) \tab nominal\cr \tab 9 \tab \code{HOUSEHOLD.SIZE} \tab persons living in the household \tab ordinal\cr \tab 10 \tab \code{UNDER18} \tab persons in household under 18 \tab ordinal\cr \tab 11 \tab \code{HOUSEHOLDER} \tab householder status \tab nominal\cr \tab 12 \tab \code{HOME.TYPE} \tab type of home \tab nominal\cr \tab 13 \tab \code{ETHNIC.CLASS} \tab ethnic classification \tab nominal\cr \tab 14 \tab \code{LANGUAGE} \tab language most often spoken at home \tab nominal\cr } } \details{ A total of N=9409 questionnaires containing 502 questions were filled out by shopping mall customers in the San Francisco Bay area. The dataset is an extract from this survey. It consists of 14 demographic attributes. The dataset is a mixture of nominal and ordinal variables with a lot of missing data. The goal is to predict the Anual Income of Household from the other 13 demographics attributes. } \source{ Impact Resources, Inc., Columbus, OH (1987). } \keyword{datasets} kernlab/man/sigest.Rd0000644000175100001440000000631712117366220014255 0ustar hornikusers\name{sigest} \alias{sigest} \alias{sigest,formula-method} \alias{sigest,matrix-method} \title{Hyperparameter estimation for the Gaussian Radial Basis kernel} \description{ Given a range of values for the "sigma" inverse width parameter in the Gaussian Radial Basis kernel for use with Support Vector Machines. The estimation is based on the data to be used. } \usage{ \S4method{sigest}{formula}(x, data=NULL, frac = 0.5, na.action = na.omit, scaled = TRUE) \S4method{sigest}{matrix}(x, frac = 0.5, scaled = TRUE, na.action = na.omit) } \arguments{ \item{x}{a symbolic description of the model upon the estimation is based. When not using a formula x is a matrix or vector containing the data} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `ksvm' is called from.} \item{frac}{Fraction of data to use for estimation. By default a quarter of the data is used to estimate the range of the sigma hyperparameter.} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally to zero mean and unit variance (since this the default action in \code{ksvm} as well). The center and scale values are returned and used for later predictions. } \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} } \details{ \code{sigest} estimates the range of values for the sigma parameter which would return good results when used with a Support Vector Machine (\code{ksvm}). The estimation is based upon the 0.1 and 0.9 quantile of \eqn{\|x -x'\|^2}. Basically any value in between those two bounds will produce good results. } \value{ Returns a vector of length 3 defining the range (0.1 quantile, median and 0.9 quantile) of the sigma hyperparameter. } \references{ B. Caputo, K. Sim, F. Furesjo, A. Smola, \cr \emph{Appearance-based object recognition using SVMs: which kernel should I use?}\cr Proc of NIPS workshop on Statitsical methods for computational experiments in visual processing and computer vision, Whistler, 2002. } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{ksvm}}} \examples{ ## estimate good sigma values for promotergene data(promotergene) srange <- sigest(Class~.,data = promotergene) srange s <- srange[2] s ## create test and training set ind <- sample(1:dim(promotergene)[1],20) genetrain <- promotergene[-ind, ] genetest <- promotergene[ind, ] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot", kpar=list(sigma = s),C=50,cross=3) gene ## predict gene type on the test set promoter <- predict(gene,genetest[,-1]) ## Check results table(promoter,genetest[,1]) } \keyword{classif} \keyword{regression} kernlab/man/ipop-class.Rd0000644000175100001440000000313311304023134015011 0ustar hornikusers\name{ipop-class} \docType{class} \alias{ipop-class} \alias{primal,ipop-method} \alias{dual,ipop-method} \alias{how,ipop-method} \alias{primal} \alias{dual} \alias{how} \title{Class "ipop"} \description{The quadratic problem solver class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("ipop", ...)}. or by calling the \code{ipop} function. } \section{Slots}{ \describe{ \item{\code{primal}:}{Object of class \code{"vector"} the primal solution of the problem} \item{\code{dual}:}{Object of class \code{"numeric"} the dual of the problem} \item{\code{how}:}{Object of class \code{"character"} convergence information} } } \section{Methods}{ \describe{ \item{primal}{Object of class \code{ipop}}{Return the primal of the problem} \item{dual}{Object of class \code{ipop}}{Return the dual of the problem} \item{how}{Object of class \code{ipop}}{Return information on convergence} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{ipop}} } \examples{ ## solve the Support Vector Machine optimization problem data(spam) ## sample a scaled part (300 points) of the spam data set m <- 300 set <- sample(1:dim(spam)[1],m) x <- scale(as.matrix(spam[,-58]))[set,] y <- as.integer(spam[set,58]) y[y==2] <- -1 ##set C parameter and kernel C <- 5 rbf <- rbfdot(sigma = 0.1) ## create H matrix etc. H <- kernelPol(rbf,x,,y) c <- matrix(rep(-1,m)) A <- t(y) b <- 0 l <- matrix(rep(0,m)) u <- matrix(rep(C,m)) r <- 0 sv <- ipop(c,H,A,b,l,u,r) primal(sv) dual(sv) how(sv) } \keyword{classes} kernlab/man/ticdata.Rd0000644000175100001440000002013411304023134014350 0ustar hornikusers\name{ticdata} \alias{ticdata} \title{The Insurance Company Data} \description{ This data set used in the CoIL 2000 Challenge contains information on customers of an insurance company. The data consists of 86 variables and includes product usage data and socio-demographic data derived from zip area codes. The data was collected to answer the following question: Can you predict who would be interested in buying a caravan insurance policy and give an explanation why ? } \usage{data(ticdata)} \format{ ticdata: Dataset to train and validate prediction models and build a description (9822 customer records). Each record consists of 86 attributes, containing sociodemographic data (attribute 1-43) and product ownership (attributes 44-86). The sociodemographic data is derived from zip codes. All customers living in areas with the same zip code have the same sociodemographic attributes. Attribute 86, \code{CARAVAN:Number of mobile home policies}, is the target variable. Data Format \tabular{rlll}{ \tab 1 \tab \code{STYPE} \tab Customer Subtype\cr \tab 2 \tab \code{MAANTHUI} \tab Number of houses 1 - 10\cr \tab 3 \tab \code{MGEMOMV} \tab Avg size household 1 - 6\cr \tab 4 \tab \code{MGEMLEEF} \tab Average age\cr \tab 5 \tab \code{MOSHOOFD} \tab Customer main type\cr \tab 6 \tab \code{MGODRK} \tab Roman catholic \cr \tab 7 \tab \code{MGODPR} \tab Protestant ... \cr \tab 8 \tab \code{MGODOV} \tab Other religion \cr \tab 9 \tab \code{MGODGE} \tab No religion \cr \tab 10 \tab \code{MRELGE} \tab Married \cr \tab 11 \tab \code{MRELSA} \tab Living together \cr \tab 12 \tab \code{MRELOV} \tab Other relation \cr \tab 13 \tab \code{MFALLEEN} \tab Singles \cr \tab 14 \tab \code{MFGEKIND} \tab Household without children \cr \tab 15 \tab \code{MFWEKIND} \tab Household with children \cr \tab 16 \tab \code{MOPLHOOG} \tab High level education \cr \tab 17 \tab \code{MOPLMIDD} \tab Medium level education \cr \tab 18 \tab \code{MOPLLAAG} \tab Lower level education \cr \tab 19 \tab \code{MBERHOOG} \tab High status \cr \tab 20 \tab \code{MBERZELF} \tab Entrepreneur \cr \tab 21 \tab \code{MBERBOER} \tab Farmer \cr \tab 22 \tab \code{MBERMIDD} \tab Middle management \cr \tab 23 \tab \code{MBERARBG} \tab Skilled labourers \cr \tab 24 \tab \code{MBERARBO} \tab Unskilled labourers \cr \tab 25 \tab \code{MSKA} \tab Social class A \cr \tab 26 \tab \code{MSKB1} \tab Social class B1 \cr \tab 27 \tab \code{MSKB2} \tab Social class B2 \cr \tab 28 \tab \code{MSKC} \tab Social class C \cr \tab 29 \tab \code{MSKD} \tab Social class D \cr \tab 30 \tab \code{MHHUUR} \tab Rented house \cr \tab 31 \tab \code{MHKOOP} \tab Home owners \cr \tab 32 \tab \code{MAUT1} \tab 1 car \cr \tab 33 \tab \code{MAUT2} \tab 2 cars \cr \tab 34 \tab \code{MAUT0} \tab No car \cr \tab 35 \tab \code{MZFONDS} \tab National Health Service \cr \tab 36 \tab \code{MZPART} \tab Private health insurance \cr \tab 37 \tab \code{MINKM30} \tab Income >30.000 \cr \tab 38 \tab \code{MINK3045} \tab Income 30-45.000 \cr \tab 39 \tab \code{MINK4575} \tab Income 45-75.000 \cr \tab 40 \tab \code{MINK7512} \tab Income 75-122.000 \cr \tab 41 \tab \code{MINK123M} \tab Income <123.000 \cr \tab 42 \tab \code{MINKGEM} \tab Average income \cr \tab 43 \tab \code{MKOOPKLA} \tab Purchasing power class \cr \tab 44 \tab \code{PWAPART} \tab Contribution private third party insurance \cr \tab 45 \tab \code{PWABEDR} \tab Contribution third party insurance (firms) \cr \tab 46 \tab \code{PWALAND} \tab Contribution third party insurance (agriculture) \cr \tab 47 \tab \code{PPERSAUT} \tab Contribution car policies \cr \tab 48 \tab \code{PBESAUT} \tab Contribution delivery van policies \cr \tab 49 \tab \code{PMOTSCO} \tab Contribution motorcycle/scooter policies \cr \tab 50 \tab \code{PVRAAUT} \tab Contribution lorry policies \cr \tab 51 \tab \code{PAANHANG} \tab Contribution trailer policies \cr \tab 52 \tab \code{PTRACTOR} \tab Contribution tractor policies \cr \tab 53 \tab \code{PWERKT} \tab Contribution agricultural machines policies \cr \tab 54 \tab \code{PBROM} \tab Contribution moped policies \cr \tab 55 \tab \code{PLEVEN} \tab Contribution life insurances \cr \tab 56 \tab \code{PPERSONG} \tab Contribution private accident insurance policies \cr \tab 57 \tab \code{PGEZONG} \tab Contribution family accidents insurance policies \cr \tab 58 \tab \code{PWAOREG} \tab Contribution disability insurance policies \cr \tab 59 \tab \code{PBRAND} \tab Contribution fire policies \cr \tab 60 \tab \code{PZEILPL} \tab Contribution surfboard policies \cr \tab 61 \tab \code{PPLEZIER} \tab Contribution boat policies \cr \tab 62 \tab \code{PFIETS} \tab Contribution bicycle policies \cr \tab 63 \tab \code{PINBOED} \tab Contribution property insurance policies \cr \tab 64 \tab \code{PBYSTAND} \tab Contribution social security insurance policies \cr \tab 65 \tab \code{AWAPART} \tab Number of private third party insurance 1 - 12 \cr \tab 66 \tab \code{AWABEDR} \tab Number of third party insurance (firms) ... \cr \tab 67 \tab \code{AWALAND} \tab Number of third party insurance (agriculture) \cr \tab 68 \tab \code{APERSAUT} \tab Number of car policies \cr \tab 69 \tab \code{ABESAUT} \tab Number of delivery van policies \cr \tab 70 \tab \code{AMOTSCO} \tab Number of motorcycle/scooter policies \cr \tab 71 \tab \code{AVRAAUT} \tab Number of lorry policies \cr \tab 72 \tab \code{AAANHANG} \tab Number of trailer policies \cr \tab 73 \tab \code{ATRACTOR} \tab Number of tractor policies \cr \tab 74 \tab \code{AWERKT} \tab Number of agricultural machines policies \cr \tab 75 \tab \code{ABROM} \tab Number of moped policies \cr \tab 76 \tab \code{ALEVEN} \tab Number of life insurances \cr \tab 77 \tab \code{APERSONG} \tab Number of private accident insurance policies \cr \tab 78 \tab \code{AGEZONG} \tab Number of family accidents insurance policies \cr \tab 79 \tab \code{AWAOREG} \tab Number of disability insurance policies \cr \tab 80 \tab \code{ABRAND} \tab Number of fire policies \cr \tab 81 \tab \code{AZEILPL} \tab Number of surfboard policies \cr \tab 82 \tab \code{APLEZIER} \tab Number of boat policies \cr \tab 83 \tab \code{AFIETS} \tab Number of bicycle policies \cr \tab 84 \tab \code{AINBOED} \tab Number of property insurance policies \cr \tab 85 \tab \code{ABYSTAND} \tab Number of social security insurance policies \cr \tab 86 \tab \code{CARAVAN} \tab Number of mobile home policies 0 - 1 \cr } Note: All the variables starting with M are zipcode variables. They give information on the distribution of that variable, e.g., Rented house, in the zipcode area of the customer. } \details{ Information about the insurance company customers consists of 86 variables and includes product usage data and socio-demographic data derived from zip area codes. The data was supplied by the Dutch data mining company Sentient Machine Research and is based on a real world business problem. The training set contains over 5000 descriptions of customers, including the information of whether or not they have a caravan insurance policy. The test set contains 4000 customers. The test and data set are merged in the ticdata set. More information about the data set and the CoIL 2000 Challenge along with publications based on the data set can be found at \url{http://www.liacs.nl/~putten/library/cc2000/}. } \source{ \itemize{ \item UCI KDD Archive:\url{http://kdd.ics.uci.edu} \item Donor: Sentient Machine Research \cr Peter van der Putten \cr Sentient Machine Research \cr Baarsjesweg 224 \cr 1058 AA Amsterdam \cr The Netherlands \cr +31 20 6186927 \cr pvdputten@hotmail.com, putten@liacs.nl } } \references{Peter van der Putten, Michel de Ruiter, Maarten van Someren \emph{CoIL Challenge 2000 Tasks and Results: Predicting and Explaining Caravan Policy Ownership}\cr \url{http://www.liacs.nl/~putten/library/cc2000/}} \keyword{datasets} kernlab/man/ranking-class.Rd0000644000175100001440000000261612117365252015515 0ustar hornikusers\name{ranking-class} \docType{class} \alias{ranking-class} \alias{edgegraph} \alias{convergence} \alias{convergence,ranking-method} \alias{edgegraph,ranking-method} \alias{show,ranking-method} \title{Class "ranking"} \description{Object of the class \code{"ranking"} are created from the \code{ranking} function and extend the class \code{matrix}} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("ranking", ...)}. } \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"matrix"} containing the data ranking and scores} \item{\code{convergence}:}{Object of class \code{"matrix"} containing the convergence matrix} \item{\code{edgegraph}:}{Object of class \code{"matrix"} containing the edgegraph} } } \section{Extends}{ Class \code{"matrix"}, directly. } \section{Methods}{ \describe{ \item{show}{\code{signature(object = "ranking")}: displays the ranking score matrix} } } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{ \code{\link{ranking}} } \examples{ data(spirals) ## create data set to be ranked ran<-spirals[rowSums(abs(spirals)<0.55)==2,] ## rank points according to "relevance" to point 54 (up left) ranked<-ranking(ran,54,kernel="rbfdot", kpar=list(sigma=100),edgegraph=TRUE) ranked edgegraph(ranked)[1:10,1:10] } \keyword{classes} kernlab/man/as.kernelMatrix.Rd0000644000175100001440000000230411304023134016005 0ustar hornikusers\name{as.kernelMatrix} \docType{methods} \alias{kernelMatrix-class} \alias{as.kernelMatrix} \alias{as.kernelMatrix-methods} \alias{as.kernelMatrix,matrix-method} \title{Assing kernelMatrix class to matrix objects} \description{\code{as.kernelMatrix} in package \pkg{kernlab} can be used to coerce the kernelMatrix class to matrix objects representing a kernel matrix. These matrices can then be used with the kernelMatrix interfaces which most of the functions in \pkg{kernlab} support.} \usage{ \S4method{as.kernelMatrix}{matrix}(x, center = FALSE) } \arguments{ \item{x}{matrix to be assigned the \code{kernelMatrix} class } \item{center}{center the kernel matrix in feature space (default: FALSE) } } \author{ Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{\code{\link{kernelMatrix}}, \code{\link{dots}}} \keyword{methods} \examples{ ## Create toy data x <- rbind(matrix(rnorm(10),,2),matrix(rnorm(10,mean=3),,2)) y <- matrix(c(rep(1,5),rep(-1,5))) ### Use as.kernelMatrix to label the cov. matrix as a kernel matrix ### which is eq. to using a linear kernel K <- as.kernelMatrix(crossprod(t(x))) K svp2 <- ksvm(K, y, type="C-svc") svp2 } kernlab/man/gausspr-class.Rd0000644000175100001440000001041212055335061015535 0ustar hornikusers\name{gausspr-class} \docType{class} \alias{gausspr-class} \alias{alpha,gausspr-method} \alias{cross,gausspr-method} \alias{error,gausspr-method} \alias{kcall,gausspr-method} \alias{kernelf,gausspr-method} \alias{kpar,gausspr-method} \alias{lev,gausspr-method} \alias{type,gausspr-method} \alias{alphaindex,gausspr-method} \alias{xmatrix,gausspr-method} \alias{ymatrix,gausspr-method} \alias{scaling,gausspr-method} \title{Class "gausspr"} \description{The Gaussian Processes object class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("gausspr", ...)}. or by calling the \code{gausspr} function } \section{Slots}{ \describe{ \item{\code{tol}:}{Object of class \code{"numeric"} contains tolerance of termination criteria} \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} contains the kernel parameter used } \item{\code{kcall}:}{Object of class \code{"list"} contains the used function call } \item{\code{type}:}{Object of class \code{"character"} contains type of problem } \item{\code{terms}:}{Object of class \code{"ANY"} contains the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"input"} containing the data matrix used } \item{\code{ymatrix}:}{Object of class \code{"output"} containing the response matrix} \item{\code{fitted}:}{Object of class \code{"output"} containing the fitted values } \item{\code{lev}:}{Object of class \code{"vector"} containing the levels of the response (in case of classification) } \item{\code{nclass}:}{Object of class \code{"numeric"} containing the number of classes (in case of classification) } \item{\code{alpha}:}{Object of class \code{"listI"} containing the computes alpha values } \item{\code{alphaindex}}{Object of class \code{"list"} containing the indexes for the alphas in various classes (in multi-class problems).} \item{\code{sol}}{Object of class \code{"matrix"} containing the solution to the Gaussian Process formulation, it is used to compute the variance in regression problems.} \item{\code{scaling}}{Object of class \code{"ANY"} containing the scaling coefficients of the data (when case \code{scaled = TRUE} is used).} \item{\code{nvar}:}{Object of class \code{"numeric"} containing the computed variance} \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{cross}:}{Object of class \code{"numeric"} containing the cross validation error} \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed in NA } } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "gausspr")}: returns the alpha vector} \item{cross}{\code{signature(object = "gausspr")}: returns the cross validation error } \item{error}{\code{signature(object = "gausspr")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values } \item{kcall}{\code{signature(object = "gausspr")}: returns the call performed} \item{kernelf}{\code{signature(object = "gausspr")}: returns the kernel function used} \item{kpar}{\code{signature(object = "gausspr")}: returns the kernel parameter used} \item{lev}{\code{signature(object = "gausspr")}: returns the response levels (in classification) } \item{type}{\code{signature(object = "gausspr")}: returns the type of problem} \item{xmatrix}{\code{signature(object = "gausspr")}: returns the data matrix used} \item{ymatrix}{\code{signature(object = "gausspr")}: returns the response matrix used} \item{scaling}{\code{signature(object = "gausspr")}: returns the scaling coefficients of the data (when \code{scaled = TRUE} is used)} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{gausspr}}, \code{\link{ksvm-class}}, \code{\link{vm-class}} } \examples{ # train model data(iris) test <- gausspr(Species~.,data=iris,var=2) test alpha(test) error(test) lev(test) } \keyword{classes} kernlab/man/kernel-class.Rd0000644000175100001440000000422311304023134015323 0ustar hornikusers\name{kernel-class} \docType{class} \alias{rbfkernel-class} \alias{polykernel-class} \alias{vanillakernel-class} \alias{tanhkernel-class} \alias{anovakernel-class} \alias{besselkernel-class} \alias{laplacekernel-class} \alias{splinekernel-class} \alias{stringkernel-class} \alias{fourierkernel-class} \alias{kfunction-class} \alias{kernel-class} \alias{kpar,kernel-method} \title{Class "kernel" "rbfkernel" "polykernel", "tanhkernel", "vanillakernel"} \description{ The built-in kernel classes in \pkg{kernlab}} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("rbfkernel")}, \code{new{"polykernel"}}, \code{new{"tanhkernel"}}, \code{new{"vanillakernel"}}, \code{new{"anovakernel"}}, \code{new{"besselkernel"}}, \code{new{"laplacekernel"}}, \code{new{"splinekernel"}}, \code{new{"stringkernel"}} or by calling the \code{rbfdot}, \code{polydot}, \code{tanhdot}, \code{vanilladot}, \code{anovadot}, \code{besseldot}, \code{laplacedot}, \code{splinedot}, \code{stringdot} functions etc.. } \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"function"} containing the kernel function } \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel parameters } } } \section{Extends}{ Class \code{"kernel"}, directly. Class \code{"function"}, by class \code{"kernel"}. } \section{Methods}{ \describe{ \item{kernelMatrix}{\code{signature(kernel = "rbfkernel", x = "matrix")}: computes the kernel matrix} \item{kernelMult}{\code{signature(kernel = "rbfkernel", x = "matrix")}: computes the quadratic kernel expression} \item{kernelPol}{\code{signature(kernel = "rbfkernel", x = "matrix")}: computes the kernel expansion} \item{kernelFast}{\code{signature(kernel = "rbfkernel", x = "matrix"),,a}: computes parts or the full kernel matrix, mainly used in kernel algorithms where columns of the kernel matrix are computed per invocation } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{ \code{\link{dots}} } \examples{ rbfkernel <- rbfdot(sigma = 0.1) rbfkernel is(rbfkernel) kpar(rbfkernel) } \keyword{classes} kernlab/man/dots.Rd0000644000175100001440000001005711304023134013713 0ustar hornikusers\name{dots} \alias{dots} \alias{kernels} \alias{rbfdot} \alias{polydot} \alias{tanhdot} \alias{vanilladot} \alias{laplacedot} \alias{besseldot} \alias{anovadot} \alias{fourierdot} \alias{splinedot} \alias{kpar} \alias{kfunction} \alias{show,kernel-method} \title{Kernel Functions} \description{ The kernel generating functions provided in kernlab. \cr The Gaussian RBF kernel \eqn{k(x,x') = \exp(-\sigma \|x - x'\|^2)} \cr The Polynomial kernel \eqn{k(x,x') = (scale + offset)^{degree}}\cr The Linear kernel \eqn{k(x,x') = }\cr The Hyperbolic tangent kernel \eqn{k(x, x') = \tanh(scale + offset)}\cr The Laplacian kernel \eqn{k(x,x') = \exp(-\sigma \|x - x'\|)} \cr The Bessel kernel \eqn{k(x,x') = (- Bessel_{(\nu+1)}^n \sigma \|x - x'\|^2)} \cr The ANOVA RBF kernel \eqn{k(x,x') = \sum_{1\leq i_1 \ldots < i_D \leq N} \prod_{d=1}^D k(x_{id}, {x'}_{id})} where k(x,x) is a Gaussian RBF kernel. \cr The Spline kernel \eqn{ \prod_{d=1}^D 1 + x_i x_j + x_i x_j min(x_i, x_j) - \frac{x_i + x_j}{2} min(x_i,x_j)^2 + \frac{min(x_i,x_j)^3}{3}} \\ The String kernels (see \code{stringdot}. } \usage{ rbfdot(sigma = 1) polydot(degree = 1, scale = 1, offset = 1) tanhdot(scale = 1, offset = 1) vanilladot() laplacedot(sigma = 1) besseldot(sigma = 1, order = 1, degree = 1) anovadot(sigma = 1, degree = 1) splinedot() } \arguments{ \item{sigma}{The inverse kernel width used by the Gaussian the Laplacian, the Bessel and the ANOVA kernel } \item{degree}{The degree of the polynomial, bessel or ANOVA kernel function. This has to be an positive integer.} \item{scale}{The scaling parameter of the polynomial and tangent kernel is a convenient way of normalizing patterns without the need to modify the data itself} \item{offset}{The offset used in a polynomial or hyperbolic tangent kernel} \item{order}{The order of the Bessel function to be used as a kernel} } \details{ The kernel generating functions are used to initialize a kernel function which calculates the dot (inner) product between two feature vectors in a Hilbert Space. These functions can be passed as a \code{kernel} argument on almost all functions in \pkg{kernlab}(e.g., \code{ksvm}, \code{kpca} etc). Although using one of the existing kernel functions as a \code{kernel} argument in various functions in \pkg{kernlab} has the advantage that optimized code is used to calculate various kernel expressions, any other function implementing a dot product of class \code{kernel} can also be used as a kernel argument. This allows the user to use, test and develop special kernels for a given data set or algorithm. For details on the string kernels see \code{stringdot}. } \value{ Return an S4 object of class \code{kernel} which extents the \code{function} class. The resulting function implements the given kernel calculating the inner (dot) product between two vectors. \item{kpar}{a list containing the kernel parameters (hyperparameters) used.} The kernel parameters can be accessed by the \code{kpar} function. } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \note{If the offset in the Polynomial kernel is set to $0$, we obtain homogeneous polynomial kernels, for positive values, we have inhomogeneous kernels. Note that for negative values the kernel does not satisfy Mercer's condition and thus the optimizers may fail. \cr In the Hyperbolic tangent kernel if the offset is negative the likelihood of obtaining a kernel matrix that is not positive definite is much higher (since then even some diagonal elements may be negative), hence if this kernel has to be used, the offset should always be positive. Note, however, that this is no guarantee that the kernel will be positive. } \seealso{\code{stringdot}, \code{\link{kernelMatrix} }, \code{\link{kernelMult}}, \code{\link{kernelPol}}} \examples{ rbfkernel <- rbfdot(sigma = 0.1) rbfkernel kpar(rbfkernel) ## create two vectors x <- rnorm(10) y <- rnorm(10) ## calculate dot product rbfkernel(x,y) } \keyword{symbolmath} kernlab/man/spam.Rd0000644000175100001440000000412711304023134013703 0ustar hornikusers\name{spam} \alias{spam} \title{Spam E-mail Database} \description{A data set collected at Hewlett-Packard Labs, that classifies 4601 e-mails as spam or non-spam. In addition to this class label there are 57 variables indicating the frequency of certain words and characters in the e-mail.} \usage{data(spam)} \format{A data frame with 4601 observations and 58 variables. The first 48 variables contain the frequency of the variable name (e.g., business) in the e-mail. If the variable name starts with num (e.g., num650) the it indicates the frequency of the corresponding number (e.g., 650). The variables 49-54 indicate the frequency of the characters `;', `(', `[', `!', `\$', and `\#'. The variables 55-57 contain the average, longest and total run-length of capital letters. Variable 58 indicates the type of the mail and is either \code{"nonspam"} or \code{"spam"}, i.e. unsolicited commercial e-mail.} \details{ The data set contains 2788 e-mails classified as \code{"nonspam"} and 1813 classified as \code{"spam"}. The ``spam'' concept is diverse: advertisements for products/web sites, make money fast schemes, chain letters, pornography... This collection of spam e-mails came from the collectors' postmaster and individuals who had filed spam. The collection of non-spam e-mails came from filed work and personal e-mails, and hence the word 'george' and the area code '650' are indicators of non-spam. These are useful when constructing a personalized spam filter. One would either have to blind such non-spam indicators or get a very wide collection of non-spam to generate a general purpose spam filter. } \source{ \itemize{ \item Creators: Mark Hopkins, Erik Reeber, George Forman, Jaap Suermondt at Hewlett-Packard Labs, 1501 Page Mill Rd., Palo Alto, CA 94304 \item Donor: George Forman (gforman at nospam hpl.hp.com) 650-857-7835 } These data have been taken from the UCI Repository Of Machine Learning Databases at \url{http://www.ics.uci.edu/~mlearn/MLRepository.html}} \references{ T. Hastie, R. Tibshirani, J.H. Friedman. \emph{The Elements of Statistical Learning.} Springer, 2001. } \keyword{datasets} kernlab/man/inchol.Rd0000644000175100001440000001025011304023134014211 0ustar hornikusers\name{inchol} \alias{inchol} \alias{inchol,matrix-method} %- Also NEED an '\alias' for EACH other topic documented here. \title{Incomplete Cholesky decomposition} \description{ \code{inchol} computes the incomplete Cholesky decomposition of the kernel matrix from a data matrix. } \usage{ inchol(x, kernel="rbfdot", kpar=list(sigma=0.1), tol = 0.001, maxiter = dim(x)[1], blocksize = 50, verbose = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{The data matrix indexed by row} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class \code{kernel}, which computes the inner product in feature space between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well. } \item{tol}{algorithm stops when remaining pivots bring less accuracy then \code{tol} (default: 0.001)} \item{maxiter}{maximum number of iterations and columns in \eqn{Z}} \item{blocksize}{add this many columns to matrix per iteration} \item{verbose}{print info on algorithm convergence} } \details{An incomplete cholesky decomposition calculates \eqn{Z} where \eqn{K= ZZ'} \eqn{K} being the kernel matrix. Since the rank of a kernel matrix is usually low, \eqn{Z} tends to be smaller then the complete kernel matrix. The decomposed matrix can be used to create memory efficient kernel-based algorithms without the need to compute and store a complete kernel matrix in memory.} \value{ An S4 object of class "inchol" which is an extension of the class "matrix". The object is the decomposed kernel matrix along with the slots : \item{pivots}{Indices on which pivots where done} \item{diagresidues}{Residuals left on the diagonal} \item{maxresiduals}{Residuals picked for pivoting} slots can be accessed either by \code{object@slot} or by accessor functions with the same name (e.g., \code{pivots(object))}} \references{ Francis R. Bach, Michael I. Jordan\cr \emph{Kernel Independent Component Analysis}\cr Journal of Machine Learning Research 3, 1-48\cr \url{http://www.jmlr.org/papers/volume3/bach02a/bach02a.pdf} } \author{Alexandros Karatzoglou (based on Matlab code by S.V.N. (Vishy) Vishwanathan and Alex Smola)\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{csi}}, \code{\link{inchol-class}}, \code{\link{chol}}} \examples{ data(iris) datamatrix <- as.matrix(iris[,-5]) # initialize kernel function rbf <- rbfdot(sigma=0.1) rbf Z <- inchol(datamatrix,kernel=rbf) dim(Z) pivots(Z) # calculate kernel matrix K <- crossprod(t(Z)) # difference between approximated and real kernel matrix (K - kernelMatrix(kernel=rbf, datamatrix))[6,] } \keyword{methods} \keyword{algebra} \keyword{array} kernlab/man/lssvm.Rd0000644000175100001440000002010212117365064014114 0ustar hornikusers\name{lssvm} \docType{methods} \alias{lssvm} \alias{lssvm-methods} \alias{lssvm,formula-method} \alias{lssvm,vector-method} \alias{lssvm,matrix-method} \alias{lssvm,list-method} \alias{lssvm,kernelMatrix-method} \alias{show,lssvm-method} \alias{coef,lssvm-method} \alias{predict,lssvm-method} \title{Least Squares Support Vector Machine} \description{ The \code{lssvm} function is an implementation of the Least Squares SVM. \code{lssvm} includes a reduced version of Least Squares SVM using a decomposition of the kernel matrix which is calculated by the \code{csi} function. } \usage{ \S4method{lssvm}{formula}(x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE) \S4method{lssvm}{vector}(x, ...) \S4method{lssvm}{matrix}(x, y, scaled = TRUE, kernel = "rbfdot", kpar = "automatic", type = NULL, tau = 0.01, reduced = TRUE, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ..., subset, na.action = na.omit) \S4method{lssvm}{kernelMatrix}(x, y, type = NULL, tau = 0.01, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ...) \S4method{lssvm}{list}(x, y, scaled = TRUE, kernel = "stringdot", kpar = list(length=4, lambda = 0.5), type = NULL, tau = 0.01, reduced = TRUE, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ..., subset) } \arguments{ \item{x}{a symbolic description of the model to be fit, a matrix or vector containing the training data when a formula interface is not used or a \code{kernelMatrix} or a list of character vectors.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `lssvm' is called from.} \item{y}{a response vector with one label for each row/component of \code{x}. Can be either a factor (for classification tasks) or a numeric vector (for classification or regression - currently nor supported -).} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally to zero mean and unit variance. The center and scale values are returned and used for later predictions.} \item{type}{Type of problem. Either "classification" or "regression". Depending on whether \code{y} is a factor or not, the default setting for \code{type} is "classification" or "regression" respectively, but can be overwritten by setting an explicit value. (regression is currently not supported)\cr} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel "Gaussian" \item \code{polydot} Polynomial kernel \item \code{vanilladot} Linear kernel \item \code{tanhdot} Hyperbolic tangent kernel \item \code{laplacedot} Laplacian kernel \item \code{besseldot} Bessel kernel \item \code{anovadot} ANOVA RBF kernel \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } Setting the kernel parameter to "matrix" treats \code{x} as a kernel matrix calling the \code{kernelMatrix} interface.\cr The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{ the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.\cr \code{kpar} can also be set to the string "automatic" which uses the heuristics in \code{\link{sigest}} to calculate a good \code{sigma} value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic"). } \item{tau}{the regularization parameter (default 0.01) } \item{reduced}{if set to \code{FALSE} the full linear problem of the lssvm is solved, when \code{TRUE} a reduced method using \code{csi} is used.} \item{rank}{the maximal rank of the decomposed kernel matrix, see \code{csi}} \item{delta}{number of columns of cholesky performed in advance, see \code{csi} (default 40)} \item{tol}{tolerance of termination criterion for the \code{csi} function, lower tolerance leads to more precise approximation but may increase the training time and the decomposed matrix size (default: 0.0001)} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: 'TRUE')} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the Mean Squared Error for regression} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{Least Squares Support Vector Machines are reformulation to the standard SVMs that lead to solving linear KKT systems. The algorithm is based on the minimization of a classical penalized least-squares cost function. The current implementation approximates the kernel matrix by an incomplete Cholesky factorization obtained by the \code{\link{csi}} function, thus the solution is an approximation to the exact solution of the lssvm optimization problem. The quality of the solution depends on the approximation and can be influenced by the "rank" , "delta", and "tol" parameters. } \value{ An S4 object of class \code{"lssvm"} containing the fitted model, Accessor functions can be used to access the slots of the object (see examples) which include: \item{alpha}{the parameters of the \code{"lssvm"}} \item{coef}{the model coefficients (identical to alpha)} \item{b}{the model offset.} \item{xmatrix}{the training data used by the model} } \references{ J. A. K. Suykens and J. Vandewalle\cr \emph{Least Squares Support Vector Machine Classifiers}\cr Neural Processing Letters vol. 9, issue 3, June 1999\cr } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{ksvm}}, \code{\link{gausspr}}, \code{\link{csi}} } \examples{ ## simple example data(iris) lir <- lssvm(Species~.,data=iris) lir lirr <- lssvm(Species~.,data= iris, reduced = FALSE) lirr ## Using the kernelMatrix interface iris <- unique(iris) rbf <- rbfdot(0.5) k <- kernelMatrix(rbf, as.matrix(iris[,-5])) klir <- lssvm(k, iris[, 5]) klir pre <- predict(klir, k) } \keyword{classif} \keyword{nonlinear} \keyword{methods} kernlab/man/ksvm-class.Rd0000644000175100001440000001532112117364353015042 0ustar hornikusers\name{ksvm-class} \docType{class} \alias{ksvm-class} \alias{SVindex} \alias{alphaindex} \alias{prob.model} \alias{scaling} \alias{prior} \alias{show} \alias{param} \alias{b} \alias{obj} \alias{nSV} \alias{coef,vm-method} \alias{SVindex,ksvm-method} \alias{alpha,ksvm-method} \alias{alphaindex,ksvm-method} \alias{cross,ksvm-method} \alias{error,ksvm-method} \alias{param,ksvm-method} \alias{fitted,ksvm-method} \alias{prior,ksvm-method} \alias{prob.model,ksvm-method} \alias{kernelf,ksvm-method} \alias{kpar,ksvm-method} \alias{lev,ksvm-method} \alias{kcall,ksvm-method} \alias{scaling,ksvm-method} \alias{type,ksvm-method} \alias{xmatrix,ksvm-method} \alias{ymatrix,ksvm-method} \alias{b,ksvm-method} \alias{obj,ksvm-method} \alias{nSV,ksvm-method} \title{Class "ksvm" } \description{An S4 class containing the output (model) of the \code{ksvm} Support Vector Machines function } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("ksvm", ...)} or by calls to the \code{ksvm} function. } \section{Slots}{ \describe{ \item{\code{type}:}{Object of class \code{"character"} containing the support vector machine type ("C-svc", "nu-svc", "C-bsvc", "spoc-svc", "one-svc", "eps-svr", "nu-svr", "eps-bsvr")} \item{\code{param}:}{Object of class \code{"list"} containing the Support Vector Machine parameters (C, nu, epsilon)} \item{\code{kernelf}:}{Object of class \code{"function"} containing the kernel function} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel function parameters (hyperparameters)} \item{\code{kcall}:}{Object of class \code{"ANY"} containing the \code{ksvm} function call} \item{\code{scaling}:}{Object of class \code{"ANY"} containing the scaling information performed on the data} \item{\code{terms}:}{Object of class \code{"ANY"} containing the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"input"} (\code{"list"} for multiclass problems or \code{"matrix"} for binary classification and regression problems) containing the support vectors calculated from the data matrix used during computations (possibly scaled and without NA). In the case of multi-class classification each list entry contains the support vectors from each binary classification problem from the one-against-one method.} \item{\code{ymatrix}:}{Object of class \code{"output"} the response \code{"matrix"} or \code{"factor"} or \code{"vector"} or \code{"logical"}} \item{\code{fitted}:}{Object of class \code{"output"} with the fitted values, predictions using the training set.} \item{\code{lev}:}{Object of class \code{"vector"} with the levels of the response (in the case of classification)} \item{\code{prob.model}:}{Object of class \code{"list"} with the class prob. model} \item{\code{prior}:}{Object of class \code{"list"} with the prior of the training set} \item{\code{nclass}:}{Object of class \code{"numeric"} containing the number of classes (in the case of classification)} \item{\code{alpha}:}{Object of class \code{"listI"} containing the resulting alpha vector (\code{"list"} or \code{"matrix"} in case of multiclass classification) (support vectors)} \item{\code{coef}:}{Object of class \code{"ANY"} containing the resulting coefficients} \item{\code{alphaindex}:}{Object of class \code{"list"} containing} \item{\code{b}:}{Object of class \code{"numeric"} containing the resulting offset } \item{\code{SVindex}:}{Object of class \code{"vector"} containing the indexes of the support vectors} \item{\code{nSV}:}{Object of class \code{"numeric"} containing the number of support vectors } \item{\code{obj}:}{Object of class \code{vector} containing the value of the objective function. When using one-against-one in multiclass classification this is a vector.} \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{cross}:}{Object of class \code{"numeric"} containing the cross-validation error } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed for NA } } } \section{Methods}{ \describe{ \item{SVindex}{\code{signature(object = "ksvm")}: return the indexes of support vectors} \item{alpha}{\code{signature(object = "ksvm")}: returns the complete 5 alpha vector (wit zero values)} \item{alphaindex}{\code{signature(object = "ksvm")}: returns the indexes of non-zero alphas (support vectors)} \item{cross}{\code{signature(object = "ksvm")}: returns the cross-validation error } \item{error}{\code{signature(object = "ksvm")}: returns the training error } \item{obj}{\code{signature(object = "ksvm")}: returns the value of the objective function} \item{fitted}{\code{signature(object = "vm")}: returns the fitted values (predict on training set) } \item{kernelf}{\code{signature(object = "ksvm")}: returns the kernel function} \item{kpar}{\code{signature(object = "ksvm")}: returns the kernel parameters (hyperparameters)} \item{lev}{\code{signature(object = "ksvm")}: returns the levels in case of classification } \item{prob.model}{\code{signature(object="ksvm")}: returns class prob. model values} \item{param}{\code{signature(object="ksvm")}: returns the parameters of the SVM in a list (C, epsilon, nu etc.)} \item{prior}{\code{signature(object="ksvm")}: returns the prior of the training set} \item{kcall}{\code{signature(object="ksvm")}: returns the \code{ksvm} function call} \item{scaling}{\code{signature(object = "ksvm")}: returns the scaling values } \item{show}{\code{signature(object = "ksvm")}: prints the object information} \item{type}{\code{signature(object = "ksvm")}: returns the problem type} \item{xmatrix}{\code{signature(object = "ksvm")}: returns the data matrix used} \item{ymatrix}{\code{signature(object = "ksvm")}: returns the response vector} } } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzolgou@ci.tuwien.ac.at}} \seealso{ \code{\link{ksvm}}, \code{\link{rvm-class}}, \code{\link{gausspr-class}} } \examples{ ## simple example using the promotergene data set data(promotergene) ## train a support vector machine gene <- ksvm(Class~.,data=promotergene,kernel="rbfdot", kpar=list(sigma=0.015),C=50,cross=4) gene # the kernel function kernelf(gene) # the alpha values alpha(gene) # the coefficients coef(gene) # the fitted values fitted(gene) # the cross validation error cross(gene) } \keyword{classes} kernlab/man/kfa-class.Rd0000644000175100001440000000371511304023134014611 0ustar hornikusers\name{kfa-class} \docType{class} \alias{kfa-class} \alias{alpha,kfa-method} \alias{alphaindex,kfa-method} \alias{kcall,kfa-method} \alias{kernelf,kfa-method} \alias{predict,kfa-method} \alias{xmatrix,kfa-method} \title{Class "kfa"} \description{The class of the object returned by the Kernel Feature Analysis \code{kfa} function} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("kfa", ...)} or by calling the \code{kfa} method. The objects contain the features along with the alpha values. } \section{Slots}{ \describe{ \item{\code{alpha}:}{Object of class \code{"matrix"} containing the alpha values } \item{\code{alphaindex}:}{Object of class \code{"vector"} containing the indexes of the selected feature} \item{\code{kernelf}:}{Object of class \code{"kfunction"} containing the kernel function used} \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the selected features} \item{\code{kcall}:}{Object of class \code{"call"} containing the \code{kfa} function call} \item{\code{terms}:}{Object of class \code{"ANY"} containing the formula terms} } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "kfa")}: returns the alpha values } \item{alphaindex}{\code{signature(object = "kfa")}: returns the index of the selected features} \item{kcall}{\code{signature(object = "kfa")}: returns the function call } \item{kernelf}{\code{signature(object = "kfa")}: returns the kernel function used } \item{predict}{\code{signature(object = "kfa")}: used to embed more data points to the feature base} \item{xmatrix}{\code{signature(object = "kfa")}: returns the selected features. } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{kfa}}, \code{\link{kpca-class}} } \examples{ data(promotergene) f <- kfa(~.,data=promotergene) } \keyword{classes} kernlab/man/lssvm-class.Rd0000644000175100001440000001040611304023134015207 0ustar hornikusers\name{lssvm-class} \docType{class} \alias{lssvm-class} \alias{alpha,lssvm-method} \alias{b,lssvm-method} \alias{cross,lssvm-method} \alias{error,lssvm-method} \alias{kcall,lssvm-method} \alias{kernelf,lssvm-method} \alias{kpar,lssvm-method} \alias{param,lssvm-method} \alias{lev,lssvm-method} \alias{type,lssvm-method} \alias{alphaindex,lssvm-method} \alias{xmatrix,lssvm-method} \alias{ymatrix,lssvm-method} \alias{scaling,lssvm-method} \alias{nSV,lssvm-method} \title{Class "lssvm"} \description{The Gaussian Processes object } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("lssvm", ...)}. or by calling the \code{lssvm} function } \section{Slots}{ \describe{ \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} contains the kernel parameter used } \item{\code{param}:}{Object of class \code{"list"} contains the regularization parameter used.} \item{\code{kcall}:}{Object of class \code{"call"} contains the used function call } \item{\code{type}:}{Object of class \code{"character"} contains type of problem } \item{\code{coef}:}{Object of class \code{"ANY"} contains the model parameter } \item{\code{terms}:}{Object of class \code{"ANY"} contains the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the data matrix used } \item{\code{ymatrix}:}{Object of class \code{"output"} containing the response matrix} \item{\code{fitted}:}{Object of class \code{"output"} containing the fitted values } \item{\code{b}:}{Object of class \code{"numeric"} containing the offset } \item{\code{lev}:}{Object of class \code{"vector"} containing the levels of the response (in case of classification) } \item{\code{scaling}:}{Object of class \code{"ANY"} containing the scaling information performed on the data} \item{\code{nclass}:}{Object of class \code{"numeric"} containing the number of classes (in case of classification) } \item{\code{alpha}:}{Object of class \code{"listI"} containing the computes alpha values } \item{\code{alphaindex}}{Object of class \code{"list"} containing the indexes for the alphas in various classes (in multi-class problems).} \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{cross}:}{Object of class \code{"numeric"} containing the cross validation error} \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed in NA } \item{\code{nSV}:}{Object of class \code{"numeric"} containing the number of model parameters } } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "lssvm")}: returns the alpha vector} \item{cross}{\code{signature(object = "lssvm")}: returns the cross validation error } \item{error}{\code{signature(object = "lssvm")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values } \item{kcall}{\code{signature(object = "lssvm")}: returns the call performed} \item{kernelf}{\code{signature(object = "lssvm")}: returns the kernel function used} \item{kpar}{\code{signature(object = "lssvm")}: returns the kernel parameter used} \item{param}{\code{signature(object = "lssvm")}: returns the regularization parameter used} \item{lev}{\code{signature(object = "lssvm")}: returns the response levels (in classification) } \item{type}{\code{signature(object = "lssvm")}: returns the type of problem} \item{scaling}{\code{signature(object = "ksvm")}: returns the scaling values } \item{xmatrix}{\code{signature(object = "lssvm")}: returns the data matrix used} \item{ymatrix}{\code{signature(object = "lssvm")}: returns the response matrix used} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{lssvm}}, \code{\link{ksvm-class}} } \examples{ # train model data(iris) test <- lssvm(Species~.,data=iris,var=2) test alpha(test) error(test) lev(test) } \keyword{classes} kernlab/man/ranking.Rd0000644000175100001440000001252513561515153014413 0ustar hornikusers\name{ranking} \alias{ranking} \alias{ranking,matrix-method} \alias{ranking,list-method} \alias{ranking,kernelMatrix-method} \title{Ranking} \description{ A universal ranking algorithm which assigns importance/ranking to data points given a query. } \usage{ \S4method{ranking}{matrix}(x, y, kernel ="rbfdot", kpar = list(sigma = 1), scale = FALSE, alpha = 0.99, iterations = 600, edgegraph = FALSE, convergence = FALSE ,...) \S4method{ranking}{kernelMatrix}(x, y, alpha = 0.99, iterations = 600, convergence = FALSE,...) \S4method{ranking}{list}(x, y, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), alpha = 0.99, iterations = 600, convergence = FALSE, ...) } \arguments{ \item{x}{a matrix containing the data to be ranked, or the kernel matrix of data to be ranked or a list of character vectors} \item{y}{The index of the query point in the data matrix or a vector of length equal to the rows of the data matrix having a one at the index of the query points index and zero at all the other points.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{scale}{If TRUE the data matrix columns are scaled to zero mean and unit variance.} \item{alpha}{ The \code{alpha} parameter takes values between 0 and 1 and is used to control the authoritative scores received from the unlabeled points. For 0 no global structure is found the algorithm ranks the points similarly to the original distance metric.} \item{iterations}{Maximum number of iterations} \item{edgegraph}{Construct edgegraph (only supported with the RBF kernel)} \item{convergence}{Include convergence matrix in results} \item{\dots}{Additional arguments} } \details{ A simple universal ranking algorithm which exploits the intrinsic global geometric structure of the data. In many real world applications this should be superior to a local method in which the data are simply ranked by pairwise Euclidean distances. Firstly a weighted network is defined on the data and an authoritative score is assigned to each query. The query points act as source nodes that continually pump their authoritative scores to the remaining points via the weighted network and the remaining points further spread the scores they received to their neighbors. This spreading process is repeated until convergence and the points are ranked according to their score at the end of the iterations. } \value{ An S4 object of class \code{ranking} which extends the \code{matrix} class. The first column of the returned matrix contains the original index of the points in the data matrix the second column contains the final score received by each point and the third column the ranking of the point. The object contains the following slots : \item{edgegraph}{Containing the edgegraph of the data points. } \item{convergence}{Containing the convergence matrix} } \references{ D. Zhou, J. Weston, A. Gretton, O. Bousquet, B. Schoelkopf \cr \emph{Ranking on Data Manifolds}\cr Advances in Neural Information Processing Systems 16.\cr MIT Press Cambridge Mass. 2004 \cr \url{http://papers.nips.cc/paper/2447-ranking-on-data-manifolds/} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{ranking-class}}, \code{\link{specc}} } \examples{ data(spirals) ## create data from spirals ran <- spirals[rowSums(abs(spirals) < 0.55) == 2,] ## rank points according to similarity to the most upper left point ranked <- ranking(ran, 54, kernel = "rbfdot", kpar = list(sigma = 100), edgegraph = TRUE) ranked[54, 2] <- max(ranked[-54, 2]) c<-1:86 op <- par(mfrow = c(1, 2),pty="s") plot(ran) plot(ran, cex=c[ranked[,3]]/40) } \keyword{cluster} \keyword{classif} kernlab/man/predict.kqr.Rd0000644000175100001440000000214112117365174015203 0ustar hornikusers\name{predict.kqr} \alias{predict.kqr} \alias{predict,kqr-method} \title{Predict method for kernel Quantile Regression object} \description{Prediction of test data for kernel quantile regression} \usage{ \S4method{predict}{kqr}(object, newdata) } \arguments{ \item{object}{an S4 object of class \code{kqr} created by the \code{kqr} function} \item{newdata}{a data frame, matrix, or kernelMatrix containing new data} } \value{The value of the quantile given by the computed \code{kqr} model in a vector of length equal to the the rows of \code{newdata}. } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \keyword{methods} \keyword{regression} \examples{ # create data x <- sort(runif(300)) y <- sin(pi*x) + rnorm(300,0,sd=exp(sin(2*pi*x))) # first calculate the median qrm <- kqr(x, y, tau = 0.5, C=0.15) # predict and plot plot(x, y) ytest <- predict(qrm, x) lines(x, ytest, col="blue") # calculate 0.9 quantile qrm <- kqr(x, y, tau = 0.9, kernel = "rbfdot", kpar= list(sigma=10), C=0.15) ytest <- predict(qrm, x) lines(x, ytest, col="red") } kernlab/man/promotergene.Rd0000644000175100001440000000310712117365235015464 0ustar hornikusers\name{promotergene} \alias{promotergene} \docType{data} \title{E. coli promoter gene sequences (DNA)} \description{ Promoters have a region where a protein (RNA polymerase) must make contact and the helical DNA sequence must have a valid conformation so that the two pieces of the contact region spatially align. The data contains DNA sequences of promoters and non-promoters. } \usage{data(promotergene)} \format{ A data frame with 106 observations and 58 variables. The first variable \code{Class} is a factor with levels \code{+} for a promoter gene and \code{-} for a non-promoter gene. The remaining 57 variables \code{V2 to V58} are factors describing the sequence. The DNA bases are coded as follows: \code{a} adenine \code{c} cytosine \code{g} guanine \code{t} thymine } \source{ UCI Machine Learning data repository \cr \url{ftp://ftp.ics.uci.edu/pub/machine-learning-databases/molecular-biology/promoter-gene-sequences} } \references{ Towell, G., Shavlik, J. and Noordewier, M. \cr \emph{Refinement of Approximate Domain Theories by Knowledge-Based Artificial Neural Networks.} \cr In Proceedings of the Eighth National Conference on Artificial Intelligence (AAAI-90) } \examples{ data(promotergene) ## Create classification model using Gaussian Processes prom <- gausspr(Class~.,data=promotergene,kernel="rbfdot", kpar=list(sigma=0.02),cross=4) prom ## Create model using Support Vector Machines promsv <- ksvm(Class~.,data=promotergene,kernel="laplacedot", kpar="automatic",C=60,cross=4) promsv } \keyword{datasets} kernlab/man/kha.Rd0000644000175100001440000001163213561515676013535 0ustar hornikusers\name{kha} \alias{kha} \alias{kha,formula-method} \alias{kha,matrix-method} \alias{predict,kha-method} \encoding{latin1} \title{Kernel Principal Components Analysis} \description{ Kernel Hebbian Algorithm is a nonlinear iterative algorithm for principal component analysis.} \usage{ \S4method{kha}{formula}(x, data = NULL, na.action, ...) \S4method{kha}{matrix}(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 5, eta = 0.005, th = 1e-4, maxiter = 10000, verbose = FALSE, na.action = na.omit, ...) } \arguments{ \item{x}{ The data matrix indexed by row or a formula describing the model. Note, that an intercept is always included, whether given in the formula or not.} \item{data}{an optional data frame containing the variables in the model (when using a formula).} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes the inner product in feature space between two vector arguments (see \code{\link{kernels}}). \pkg{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{features}{Number of features (principal components) to return. (default: 5)} \item{eta}{The hebbian learning rate (default : 0.005)} \item{th}{the smallest value of the convergence step (default : 0.0001) } \item{maxiter}{the maximum number of iterations.} \item{verbose}{print convergence every 100 iterations. (default : FALSE)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{The original form of KPCA can only be used on small data sets since it requires the estimation of the eigenvectors of a full kernel matrix. The Kernel Hebbian Algorithm iteratively estimates the Kernel Principal Components with only linear order memory complexity. (see ref. for more details) } \value{ An S4 object containing the principal component vectors along with the corresponding normalization values. \item{pcv}{a matrix containing the principal component vectors (column wise)} \item{eig}{The normalization values} \item{xmatrix}{The original data matrix} all the slots of the object can be accessed by accessor functions. } \note{The predict function can be used to embed new data on the new space} \references{Kwang In Kim, M.O. Franz and B. Schlkopf\cr \emph{Kernel Hebbian Algorithm for Iterative Kernel Principal Component Analysis}\cr Max-Planck-Institut fr biologische Kybernetik, Tbingen (109)\cr \url{http://www.is.tuebingen.mpg.de/fileadmin/user_upload/files/publications/pdf2302.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{kpca}}, \code{\link{kfa}}, \code{\link{kcca}}, \code{pca}} \examples{ # another example using the iris data(iris) test <- sample(1:150,70) kpc <- kha(~.,data=iris[-test,-5],kernel="rbfdot", kpar=list(sigma=0.2),features=2, eta=0.001, maxiter=65) #print the principal component vectors pcv(kpc) #plot the data projection on the components plot(predict(kpc,iris[,-5]),col=as.integer(iris[,5]), xlab="1st Principal Component",ylab="2nd Principal Component") } \keyword{cluster} kernlab/man/kqr.Rd0000644000175100001440000002055212117365752013562 0ustar hornikusers\name{kqr} \alias{kqr} \alias{kqr,formula-method} \alias{kqr,vector-method} \alias{kqr,matrix-method} \alias{kqr,list-method} \alias{kqr,kernelMatrix-method} \alias{coef,kqr-method} \alias{show,kqr-method} \title{Kernel Quantile Regression.} \description{The Kernel Quantile Regression algorithm \code{kqr} performs non-parametric Quantile Regression.} \usage{ \S4method{kqr}{formula}(x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE) \S4method{kqr}{vector}(x,...) \S4method{kqr}{matrix}(x, y, scaled = TRUE, tau = 0.5, C = 0.1, kernel = "rbfdot", kpar = "automatic", reduced = FALSE, rank = dim(x)[1]/6, fit = TRUE, cross = 0, na.action = na.omit) \S4method{kqr}{kernelMatrix}(x, y, tau = 0.5, C = 0.1, fit = TRUE, cross = 0) \S4method{kqr}{list}(x, y, tau = 0.5, C = 0.1, kernel = "strigdot", kpar= list(length=4, C=0.5), fit = TRUE, cross = 0) } \arguments{ \item{x}{e data or a symbolic description of the model to be fit. When not using a formula x can be a matrix or vector containing the training data or a kernel matrix of class \code{kernelMatrix} of the training data or a list of character vectors (for use with the string kernel). Note, that the intercept is always excluded, whether given in the formula or not.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{kqr} is called from.} \item{y}{a numeric vector or a column matrix containing the response.} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally (both \code{x} and \code{y} variables) to zero mean and unit variance. The center and scale values are returned and used for later predictions. (default: TRUE)} \item{tau}{the quantile to be estimated, this is generally a number strictly between 0 and 1. For 0.5 the median is calculated. (default: 0.5)} \item{C}{the cost regularization parameter. This parameter controls the smoothness of the fitted function, essentially higher values for C lead to less smooth functions.(default: 1)} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. \code{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{lenght, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the \code{kpar} parameter as well. In the case of a Radial Basis kernel function (Gaussian) kpar can also be set to the string "automatic" which uses the heuristics in 'sigest' to calculate a good 'sigma' value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic"). } \item{reduced}{use an incomplete cholesky decomposition to calculate a decomposed form \eqn{Z} of the kernel Matrix \eqn{K} (where \eqn{K = ZZ'}) and perform the calculations with \eqn{Z}. This might be useful when using \code{kqr} with large datasets since normally an n times n kernel matrix would be computed. Setting \code{reduced} to \code{TRUE} makes use of \code{csi} to compute a decomposed form instead and thus only a \eqn{n \times m} matrix where \eqn{m < n} and \eqn{n} the sample size is stored in memory (default: FALSE)} \item{rank}{the rank m of the decomposed matrix calculated when using an incomplete cholesky decomposition. This parameter is only taken into account when \code{reduced} is \code{TRUE}(default : dim(x)[1]/6)} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: 'TRUE')} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the Pinball loss and the for quantile regression} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{additional parameters.} } \details{In quantile regression a function is fitted to the data so that it satisfies the property that a portion \eqn{tau} of the data \eqn{y|n} is below the estimate. While the error bars of many regression problems can be viewed as such estimates quantile regression estimates this quantity directly. Kernel quantile regression is similar to nu-Support Vector Regression in that it minimizes a regularized loss function in RKHS. The difference between nu-SVR and kernel quantile regression is in the type of loss function used which in the case of quantile regression is the pinball loss (see reference for details.). Minimizing the regularized loss boils down to a quadratic problem which is solved using an interior point QP solver \code{ipop} implemented in \code{kernlab}. } \value{ An S4 object of class \code{kqr} containing the fitted model along with information.Accessor functions can be used to access the slots of the object which include : \item{alpha}{The resulting model parameters which can be also accessed by \code{coef}.} \item{kernelf}{the kernel function used.} \item{error}{Training error (if fit == TRUE)} see \code{kqr-class} for more details. } \references{Ichiro Takeuchi, Quoc V. Le, Timothy D. Sears, Alexander J. Smola\cr \emph{Nonparametric Quantile Estimation}\cr Journal of Machine Learning Research 7,2006,1231-1264 \cr \url{http://www.jmlr.org/papers/volume7/takeuchi06a/takeuchi06a.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{predict.kqr}}, \code{\link{kqr-class}}, \code{\link{ipop}}, \code{\link{rvm}}, \code{\link{ksvm}}} \examples{ # create data x <- sort(runif(300)) y <- sin(pi*x) + rnorm(300,0,sd=exp(sin(2*pi*x))) # first calculate the median qrm <- kqr(x, y, tau = 0.5, C=0.15) # predict and plot plot(x, y) ytest <- predict(qrm, x) lines(x, ytest, col="blue") # calculate 0.9 quantile qrm <- kqr(x, y, tau = 0.9, kernel = "rbfdot", kpar= list(sigma=10), C=0.15) ytest <- predict(qrm, x) lines(x, ytest, col="red") # calculate 0.1 quantile qrm <- kqr(x, y, tau = 0.1,C=0.15) ytest <- predict(qrm, x) lines(x, ytest, col="green") # print first 10 model coefficients coef(qrm)[1:10] } \keyword{regression} \keyword{nonlinear} \keyword{methods} kernlab/man/reuters.Rd0000644000175100001440000000111711304023134014430 0ustar hornikusers\name{reuters} \alias{reuters} \alias{rlabels} \title{Reuters Text Data} \description{A small sample from the Reuters news data set.} \usage{data(reuters)} \format{ A list of 40 text documents along with the labels. \code{reuters} contains the text documents and \code{rlabels} the labels in a vector. } \details{ This dataset contains a list of 40 text documents along with the labels. The data consist out of 20 documents from the \code{acq} category and 20 documents from the crude category. The labels are stored in \code{rlabels} } \source{Reuters} \keyword{datasets} kernlab/man/vm-class.Rd0000644000175100001440000000732511304023134014473 0ustar hornikusers\name{vm-class} \docType{class} \alias{vm-class} \alias{cross} \alias{alpha} \alias{error} \alias{type} \alias{kernelf} \alias{xmatrix} \alias{ymatrix} \alias{lev} \alias{kcall} \alias{alpha,vm-method} \alias{cross,vm-method} \alias{error,vm-method} \alias{fitted,vm-method} \alias{kernelf,vm-method} \alias{kpar,vm-method} \alias{lev,vm-method} \alias{kcall,vm-method} \alias{type,vm-method} \alias{xmatrix,vm-method} \alias{ymatrix,vm-method} \title{Class "vm" } \description{An S4 VIRTUAL class used as a base for the various vector machine classes in \pkg{kernlab}} \section{Objects from the Class}{ Objects from the class cannot be created directly but only contained in other classes. } \section{Slots}{ \describe{ \item{\code{alpha}:}{Object of class \code{"listI"} containing the resulting alpha vector (list in case of multiclass classification) (support vectors)} \item{\code{type}:}{Object of class \code{"character"} containing the vector machine type e.g., ("C-svc", "nu-svc", "C-bsvc", "spoc-svc", "one-svc", "eps-svr", "nu-svr", "eps-bsvr")} \item{\code{kernelf}:}{Object of class \code{"function"} containing the kernel function} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel function parameters (hyperparameters)} \item{\code{kcall}:}{Object of class \code{"call"} containing the function call} \item{\code{terms}:}{Object of class \code{"ANY"} containing the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"input"} the data matrix used during computations (support vectors) (possibly scaled and without NA)} \item{\code{ymatrix}:}{Object of class \code{"output"} the response matrix/vector } \item{\code{fitted}:}{Object of class \code{"output"} with the fitted values, predictions using the training set.} \item{\code{lev}:}{Object of class \code{"vector"} with the levels of the response (in the case of classification)} \item{\code{nclass}:}{Object of class \code{"numeric"} containing the number of classes (in the case of classification)} \item{\code{error}:}{Object of class \code{"vector"} containing the training error} \item{\code{cross}:}{Object of class \code{"vector"} containing the cross-validation error } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed for NA } } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "vm")}: returns the complete alpha vector (wit zero values)} \item{cross}{\code{signature(object = "vm")}: returns the cross-validation error } \item{error}{\code{signature(object = "vm")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values (predict on training set) } \item{kernelf}{\code{signature(object = "vm")}: returns the kernel function} \item{kpar}{\code{signature(object = "vm")}: returns the kernel parameters (hyperparameters)} \item{lev}{\code{signature(object = "vm")}: returns the levels in case of classification } \item{kcall}{\code{signature(object="vm")}: returns the function call} \item{type}{\code{signature(object = "vm")}: returns the problem type} \item{xmatrix}{\code{signature(object = "vm")}: returns the data matrix used(support vectors)} \item{ymatrix}{\code{signature(object = "vm")}: returns the response vector} } } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzolgou@ci.tuwien.ac.at}} \seealso{ \code{\link{ksvm-class}}, \code{\link{rvm-class}}, \code{\link{gausspr-class}} } \keyword{classes} kernlab/man/kcca.Rd0000644000175100001440000000724113561515565013671 0ustar hornikusers\name{kcca} \alias{kcca} \alias{kcca,matrix-method} \title{Kernel Canonical Correlation Analysis} \description{ Computes the canonical correlation analysis in feature space. } \usage{ \S4method{kcca}{matrix}(x, y, kernel="rbfdot", kpar=list(sigma=0.1), gamma = 0.1, ncomps = 10, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{a matrix containing data index by row} \item{y}{a matrix containing data index by row} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a inner product in feature space between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{gamma}{regularization parameter (default : 0.1)} \item{ncomps}{number of canonical components (default : 10) } \item{\dots}{additional parameters for the \code{kpca} function} } \details{ The kernel version of canonical correlation analysis. Kernel Canonical Correlation Analysis (KCCA) is a non-linear extension of CCA. Given two random variables, KCCA aims at extracting the information which is shared by the two random variables. More precisely given \eqn{x} and \eqn{y} the purpose of KCCA is to provide nonlinear mappings \eqn{f(x)} and \eqn{g(y)} such that their correlation is maximized. } \value{ An S4 object containing the following slots: \item{kcor}{Correlation coefficients in feature space} \item{xcoef}{estimated coefficients for the \code{x} variables in the feature space} \item{ycoef}{estimated coefficients for the \code{y} variables in the feature space} %% \item{xvar}{The canonical variates for \code{x}} %% \item{yvar}{The canonical variates for \code{y}} } \references{ Malte Kuss, Thore Graepel \cr \emph{The Geometry Of Kernel Canonical Correlation Analysis}\cr \url{https://www.microsoft.com/en-us/research/publication/the-geometry-of-kernel-canonical-correlation-analysis/}} \author{ Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{\code{\link{cancor}}, \code{\link{kpca}}, \code{\link{kfa}}, \code{\link{kha}}} \examples{ ## dummy data x <- matrix(rnorm(30),15) y <- matrix(rnorm(30),15) kcca(x,y,ncomps=2) } \keyword{multivariate} kernlab/man/gausspr.Rd0000644000175100001440000001661412560371302014443 0ustar hornikusers\name{gausspr} \alias{gausspr} \alias{gausspr,formula-method} \alias{gausspr,vector-method} \alias{gausspr,matrix-method} \alias{coef,gausspr-method} \alias{show,gausspr-method} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Gaussian processes for regression and classification} \description{ \code{gausspr} is an implementation of Gaussian processes for classification and regression. } \usage{ \S4method{gausspr}{formula}(x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE) \S4method{gausspr}{vector}(x,...) \S4method{gausspr}{matrix}(x, y, scaled = TRUE, type= NULL, kernel="rbfdot", kpar="automatic", var=1, variance.model = FALSE, tol=0.0005, cross=0, fit=TRUE, ... , subset, na.action = na.omit) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{a symbolic description of the model to be fit or a matrix or vector when a formula interface is not used. When not using a formula x is a matrix or vector containing the variables in the model} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `gausspr' is called from.} \item{y}{a response vector with one label for each row/component of \code{x}. Can be either a factor (for classification tasks) or a numeric vector (for regression).} \item{type}{Type of problem. Either "classification" or "regression". Depending on whether \code{y} is a factor or not, the default setting for \code{type} is \code{classification} or \code{regression}, respectively, but can be overwritten by setting an explicit value.\cr} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally (both \code{x} and \code{y} variables) to zero mean and unit variance. The center and scale values are returned and used for later predictions.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{var}{the initial noise variance, (only for regression) (default : 0.001)} \item{variance.model}{build model for variance or standard deviation estimation (only for regression) (default : FALSE)} \item{tol}{tolerance of termination criterion (default: 0.001)} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: 'TRUE')} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the Mean Squared Error for regression} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{ A Gaussian process is specified by a mean and a covariance function. The mean is a function of \eqn{x} (which is often the zero function), and the covariance is a function \eqn{C(x,x')} which expresses the expected covariance between the value of the function \eqn{y} at the points \eqn{x} and \eqn{x'}. The actual function \eqn{y(x)} in any data modeling problem is assumed to be a single sample from this Gaussian distribution. Laplace approximation is used for the parameter estimation in gaussian processes for classification.\cr The predict function can return class probabilities for classification problems by setting the \code{type} parameter to "probabilities". For the regression setting the \code{type} parameter to "variance" or "sdeviation" returns the estimated variance or standard deviation at each predicted point. } \value{ An S4 object of class "gausspr" containing the fitted model along with information. Accessor functions can be used to access the slots of the object which include : \item{alpha}{The resulting model parameters} \item{error}{Training error (if fit == TRUE)} } \references{ C. K. I. Williams and D. Barber \cr Bayesian classification with Gaussian processes. \cr IEEE Transactions on Pattern Analysis and Machine Intelligence, 20(12):1342-1351, 1998\cr \url{http://www.dai.ed.ac.uk/homes/ckiw/postscript/pami_final.ps.gz} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{predict.gausspr}}, \code{\link{rvm}}, \code{\link{ksvm}}, \code{\link{gausspr-class}}, \code{\link{lssvm}} } \examples{ # train model data(iris) test <- gausspr(Species~.,data=iris,var=2) test alpha(test) # predict on the training set predict(test,iris[,-5]) # class probabilities predict(test, iris[,-5], type="probabilities") # create regression data x <- seq(-20,20,0.1) y <- sin(x)/x + rnorm(401,sd=0.03) # regression with gaussian processes foo <- gausspr(x, y) foo # predict and plot ytest <- predict(foo, x) plot(x, y, type ="l") lines(x, ytest, col="red") #predict and variance x = c(-4, -3, -2, -1, 0, 0.5, 1, 2) y = c(-2, 0, -0.5,1, 2, 1, 0, -1) plot(x,y) foo2 <- gausspr(x, y, variance.model = TRUE) xtest <- seq(-4,2,0.2) lines(xtest, predict(foo2, xtest)) lines(xtest, predict(foo2, xtest)+2*predict(foo2,xtest, type="sdeviation"), col="red") lines(xtest, predict(foo2, xtest)-2*predict(foo2,xtest, type="sdeviation"), col="red") } \keyword{classif} \keyword{regression} \keyword{nonlinear} \keyword{methods} kernlab/man/onlearn-class.Rd0000644000175100001440000000672412117365114015523 0ustar hornikusers\name{onlearn-class} \docType{class} \alias{onlearn-class} \alias{alpha,onlearn-method} \alias{b,onlearn-method} \alias{buffer,onlearn-method} \alias{fit,onlearn-method} \alias{kernelf,onlearn-method} \alias{kpar,onlearn-method} \alias{predict,onlearn-method} \alias{rho,onlearn-method} \alias{rho} \alias{show,onlearn-method} \alias{type,onlearn-method} \alias{xmatrix,onlearn-method} \alias{buffer} \title{Class "onlearn"} \description{ The class of objects used by the Kernel-based Online learning algorithms} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("onlearn", ...)}. or by calls to the function \code{inlearn}. } \section{Slots}{ \describe{ \item{\code{kernelf}:}{Object of class \code{"function"} containing the used kernel function} \item{\code{buffer}:}{Object of class \code{"numeric"} containing the size of the buffer} \item{\code{kpar}:}{Object of class \code{"list"} containing the hyperparameters of the kernel function.} \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the data points (similar to support vectors) } \item{\code{fit}:}{Object of class \code{"numeric"} containing the decision function value of the last data point} \item{\code{onstart}:}{Object of class \code{"numeric"} used for indexing } \item{\code{onstop}:}{Object of class \code{"numeric"} used for indexing} \item{\code{alpha}:}{Object of class \code{"ANY"} containing the model parameters} \item{\code{rho}:}{Object of class \code{"numeric"} containing model parameter} \item{\code{b}:}{Object of class \code{"numeric"} containing the offset} \item{\code{pattern}:}{Object of class \code{"factor"} used for dealing with factors} \item{\code{type}:}{Object of class \code{"character"} containing the problem type (classification, regression, or novelty } } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "onlearn")}: returns the model parameters} \item{b}{\code{signature(object = "onlearn")}: returns the offset } \item{buffer}{\code{signature(object = "onlearn")}: returns the buffer size} \item{fit}{\code{signature(object = "onlearn")}: returns the last decision function value} \item{kernelf}{\code{signature(object = "onlearn")}: return the kernel function used} \item{kpar}{\code{signature(object = "onlearn")}: returns the hyper-parameters used} \item{onlearn}{\code{signature(obj = "onlearn")}: the learning function} \item{predict}{\code{signature(object = "onlearn")}: the predict function} \item{rho}{\code{signature(object = "onlearn")}: returns model parameter} \item{show}{\code{signature(object = "onlearn")}: show function} \item{type}{\code{signature(object = "onlearn")}: returns the type of problem} \item{xmatrix}{\code{signature(object = "onlearn")}: returns the stored data points} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{onlearn}}, \code{\link{inlearn}} } \examples{ ## create toy data set x <- rbind(matrix(rnorm(100),,2),matrix(rnorm(100)+3,,2)) y <- matrix(c(rep(1,50),rep(-1,50)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2), type="classification") ## learn one data point at the time for(i in sample(1:100,100)) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) } \keyword{classes} kernlab/man/couple.Rd0000644000175100001440000000363211304023134014232 0ustar hornikusers\name{couple} \alias{couple} \title{Probabilities Coupling function} \description{ \code{couple} is used to link class-probability estimates produced by pairwise coupling in multi-class classification problems. } \usage{ couple(probin, coupler = "minpair") } \arguments{ \item{probin}{ The pairwise coupled class-probability estimates} \item{coupler}{The type of coupler to use. Currently \code{minpar} and \code{pkpd} and \code{vote} are supported (see reference for more details). If \code{vote} is selected the returned value is a primitive estimate passed on given votes.} } \details{ As binary classification problems are much easier to solve many techniques exist to decompose multi-class classification problems into many binary classification problems (voting, error codes, etc.). Pairwise coupling (one against one) constructs a rule for discriminating between every pair of classes and then selecting the class with the most winning two-class decisions. By using Platt's probabilities output for SVM one can get a class probability for each of the \eqn{k(k-1)/2} models created in the pairwise classification. The couple method implements various techniques to combine these probabilities. } \value{ A matrix with the resulting probability estimates. } \references{ Ting-Fan Wu, Chih-Jen Lin, ruby C. Weng\cr \emph{Probability Estimates for Multi-class Classification by Pairwise Coupling}\cr Neural Information Processing Symposium 2003 \cr \url{http://books.nips.cc/papers/files/nips16/NIPS2003_0538.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{ \code{\link{predict.ksvm}}, \code{\link{ksvm}}} \examples{ ## create artificial pairwise probabilities pairs <- matrix(c(0.82,0.12,0.76,0.1,0.9,0.05),2) couple(pairs) couple(pairs, coupler="pkpd") couple(pairs, coupler ="vote") } \keyword{classif} kernlab/man/plot.Rd0000644000175100001440000000216511304023134013721 0ustar hornikusers\name{plot} \alias{plot.ksvm} \alias{plot,ksvm,missing-method} \alias{plot,ksvm-method} \title{plot method for support vector object} \description{Plot a binary classification support vector machine object. The \code{plot} function returns a contour plot of the decision values. } \usage{ \S4method{plot}{ksvm}(object, data=NULL, grid = 50, slice = list()) } \arguments{ \item{object}{a \code{ksvm} classification object created by the \code{ksvm} function} \item{data}{a data frame or matrix containing data to be plotted} \item{grid}{granularity for the contour plot.} \item{slice}{a list of named numeric values for the dimensions held constant (only needed if more than two variables are used). Dimensions not specified are fixed at 0. } } \seealso{\code{\link{ksvm}}} \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \keyword{methods} \keyword{regression} \keyword{classif} \examples{ ## Demo of the plot function x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) } kernlab/man/prc-class.Rd0000644000175100001440000000353311304023134014632 0ustar hornikusers\name{prc-class} \docType{class} \alias{prc-class} \alias{eig} \alias{pcv} \alias{eig,prc-method} \alias{kcall,prc-method} \alias{kernelf,prc-method} \alias{pcv,prc-method} \alias{xmatrix,prc-method} \title{Class "prc"} \description{Principal Components Class} \section{Objects of class "prc"}{Objects from the class cannot be created directly but only contained in other classes.} \section{Slots}{ \describe{ \item{\code{pcv}:}{Object of class \code{"matrix"} containing the principal component vectors } \item{\code{eig}:}{Object of class \code{"vector"} containing the corresponding eigenvalues} \item{\code{kernelf}:}{Object of class \code{"kfunction"} containing the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel parameters used } \item{\code{xmatrix}:}{Object of class \code{"input"} containing the data matrix used } \item{\code{kcall}:}{Object of class \code{"ANY"} containing the function call } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed on NA } } } \section{Methods}{ \describe{ \item{eig}{\code{signature(object = "prc")}: returns the eigenvalues } \item{kcall}{\code{signature(object = "prc")}: returns the performed call} \item{kernelf}{\code{signature(object = "prc")}: returns the used kernel function} \item{pcv}{\code{signature(object = "prc")}: returns the principal component vectors } \item{predict}{\code{signature(object = "prc")}: embeds new data } \item{xmatrix}{\code{signature(object = "prc")}: returns the used data matrix } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kpca-class}},\code{\link{kha-class}}, \code{\link{kfa-class}} } \keyword{classes} kernlab/man/predict.ksvm.Rd0000644000175100001440000000511412560430652015365 0ustar hornikusers\name{predict.ksvm} \alias{predict.ksvm} \alias{predict,ksvm-method} \title{predict method for support vector object} \description{Prediction of test data using support vector machines} \usage{ \S4method{predict}{ksvm}(object, newdata, type = "response", coupler = "minpair") } \arguments{ \item{object}{an S4 object of class \code{ksvm} created by the \code{ksvm} function} \item{newdata}{a data frame or matrix containing new data} \item{type}{one of \code{response}, \code{probabilities} ,\code{votes}, \code{decision} indicating the type of output: predicted values, matrix of class probabilities, matrix of vote counts, or matrix of decision values.} \item{coupler}{Coupling method used in the multiclass case, can be one of \code{minpair} or \code{pkpd} (see reference for more details).} } \value{ If \code{type(object)} is \code{C-svc}, \code{nu-svc}, \code{C-bsvm} or \code{spoc-svc} the vector returned depends on the argument \code{type}: \item{response}{predicted classes (the classes with majority vote).} \item{probabilities}{matrix of class probabilities (one column for each class and one row for each input).} \item{votes}{matrix of vote counts (one column for each class and one row for each new input)} If \code{type(object)} is \code{eps-svr}, \code{eps-bsvr} or \code{nu-svr} a vector of predicted values is returned. If \code{type(object)} is \code{one-classification} a vector of logical values is returned. } \references{ \itemize{ \item T.F. Wu, C.J. Lin, R.C. Weng. \cr \emph{Probability estimates for Multi-class Classification by Pairwise Coupling}\cr \url{http://www.csie.ntu.edu.tw/~cjlin/papers/svmprob/svmprob.pdf} \item H.T. Lin, C.J. Lin, R.C. Weng\cr \emph{A note on Platt's probabilistic outputs for support vector machines}\cr \url{http://www.csie.ntu.edu.tw/~cjlin/papers/plattprob.pdf} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \keyword{methods} \keyword{regression} \keyword{classif} \examples{ ## example using the promotergene data set data(promotergene) ## create test and training set ind <- sample(1:dim(promotergene)[1],20) genetrain <- promotergene[-ind, ] genetest <- promotergene[ind, ] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot", kpar=list(sigma=0.015),C=70,cross=4,prob.model=TRUE) gene ## predict gene type probabilities on the test set genetype <- predict(gene,genetest,type="probabilities") genetype } kernlab/man/kqr-class.Rd0000644000175100001440000001051412117363316014654 0ustar hornikusers\name{kqr-class} \docType{class} \alias{kqr-class} \alias{alpha,kqr-method} \alias{cross,kqr-method} \alias{error,kqr-method} \alias{kcall,kqr-method} \alias{kernelf,kqr-method} \alias{kpar,kqr-method} \alias{param,kqr-method} \alias{alphaindex,kqr-method} \alias{b,kqr-method} \alias{xmatrix,kqr-method} \alias{ymatrix,kqr-method} \alias{scaling,kqr-method} \title{Class "kqr"} \description{The Kernel Quantile Regression object class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("kqr", ...)}. or by calling the \code{kqr} function } \section{Slots}{ \describe{ \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} contains the kernel parameter used } \item{\code{coef}:}{Object of class \code{"ANY"} containing the model parameters} \item{\code{param}:}{Object of class \code{"list"} contains the cost parameter C and tau parameter used } \item{\code{kcall}:}{Object of class \code{"list"} contains the used function call } \item{\code{terms}:}{Object of class \code{"ANY"} contains the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"input"} containing the data matrix used } \item{\code{ymatrix}:}{Object of class \code{"output"} containing the response matrix} \item{\code{fitted}:}{Object of class \code{"output"} containing the fitted values } \item{\code{alpha}:}{Object of class \code{"listI"} containing the computes alpha values } \item{\code{b}:}{Object of class \code{"numeric"} containing the offset of the model.} \item{\code{scaling}}{Object of class \code{"ANY"} containing the scaling coefficients of the data (when case \code{scaled = TRUE} is used).} \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{cross}:}{Object of class \code{"numeric"} containing the cross validation error} \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed in NA } \item{\code{nclass}:}{Inherited from class \code{vm}, not used in kqr} \item{\code{lev}:}{Inherited from class \code{vm}, not used in kqr} \item{\code{type}:}{Inherited from class \code{vm}, not used in kqr} } } \section{Methods}{ \describe{ \item{coef}{\code{signature(object = "kqr")}: returns the coefficients (alpha) of the model} \item{alpha}{\code{signature(object = "kqr")}: returns the alpha vector (identical to \code{coef})} \item{b}{\code{signature(object = "kqr")}: returns the offset beta of the model.} \item{cross}{\code{signature(object = "kqr")}: returns the cross validation error } \item{error}{\code{signature(object = "kqr")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values } \item{kcall}{\code{signature(object = "kqr")}: returns the call performed} \item{kernelf}{\code{signature(object = "kqr")}: returns the kernel function used} \item{kpar}{\code{signature(object = "kqr")}: returns the kernel parameter used} \item{param}{\code{signature(object = "kqr")}: returns the cost regularization parameter C and tau used} \item{xmatrix}{\code{signature(object = "kqr")}: returns the data matrix used} \item{ymatrix}{\code{signature(object = "kqr")}: returns the response matrix used} \item{scaling}{\code{signature(object = "kqr")}: returns the scaling coefficients of the data (when \code{scaled = TRUE} is used)} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kqr}}, \code{\link{vm-class}}, \code{\link{ksvm-class}} } \examples{ # create data x <- sort(runif(300)) y <- sin(pi*x) + rnorm(300,0,sd=exp(sin(2*pi*x))) # first calculate the median qrm <- kqr(x, y, tau = 0.5, C=0.15) # predict and plot plot(x, y) ytest <- predict(qrm, x) lines(x, ytest, col="blue") # calculate 0.9 quantile qrm <- kqr(x, y, tau = 0.9, kernel = "rbfdot", kpar = list(sigma = 10), C = 0.15) ytest <- predict(qrm, x) lines(x, ytest, col="red") # print model coefficients and other information coef(qrm) b(qrm) error(qrm) kernelf(qrm) } \keyword{classes} kernlab/man/kkmeans.Rd0000644000175100001440000001345113271635323014412 0ustar hornikusers\name{kkmeans} \alias{kkmeans} \alias{kkmeans,matrix-method} \alias{kkmeans,formula-method} \alias{kkmeans,list-method} \alias{kkmeans,kernelMatrix-method} \title{Kernel k-means} \description{ A weighted kernel version of the famous k-means algorithm. } \usage{ \S4method{kkmeans}{formula}(x, data = NULL, na.action = na.omit, ...) \S4method{kkmeans}{matrix}(x, centers, kernel = "rbfdot", kpar = "automatic", alg="kkmeans", p=1, na.action = na.omit, ...) \S4method{kkmeans}{kernelMatrix}(x, centers, ...) \S4method{kkmeans}{list}(x, centers, kernel = "stringdot", kpar = list(length=4, lambda=0.5), alg ="kkmeans", p = 1, na.action = na.omit, ...) } \arguments{ \item{x}{the matrix of data to be clustered, or a symbolic description of the model to be fit, or a kernel Matrix of class \code{kernelMatrix}, or a list of character vectors.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `kkmeans' is called from.} \item{centers}{Either the number of clusters or a matrix of initial cluster centers. If the first a random initial partitioning is used.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a inner product in feature space between two vector arguments (see \code{link{kernels}}). \pkg{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel "Gaussian" \item \code{polydot} Polynomial kernel \item \code{vanilladot} Linear kernel \item \code{tanhdot} Hyperbolic tangent kernel \item \code{laplacedot} Laplacian kernel \item \code{besseldot} Bessel kernel \item \code{anovadot} ANOVA RBF kernel \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } Setting the kernel parameter to "matrix" treats \code{x} as a kernel matrix calling the \code{kernelMatrix} interface.\cr The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{a character string or the list of hyper-parameters (kernel parameters). The default character string \code{"automatic"} uses a heuristic the determine a suitable value for the width parameter of the RBF kernel.\cr A list can also be used containing the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{alg}{the algorithm to use. Options currently include \code{kkmeans} and \code{kerninghan}. } \item{p}{a parameter used to keep the affinity matrix positive semidefinite} \item{na.action}{The action to perform on NA} \item{\dots}{additional parameters} } \details{ \code{kernel k-means} uses the 'kernel trick' (i.e. implicitly projecting all data into a non-linear feature space with the use of a kernel) in order to deal with one of the major drawbacks of \code{k-means} that is that it cannot capture clusters that are not linearly separable in input space. \cr The algorithm is implemented using the triangle inequality to avoid unnecessary and computational expensive distance calculations. This leads to significant speedup particularly on large data sets with a high number of clusters. \cr With a particular choice of weights this algorithm becomes equivalent to Kernighan-Lin, and the norm-cut graph partitioning algorithms. \cr The function also support input in the form of a kernel matrix or a list of characters for text clustering.\cr The data can be passed to the \code{kkmeans} function in a \code{matrix} or a \code{data.frame}, in addition \code{kkmeans} also supports input in the form of a kernel matrix of class \code{kernelMatrix} or as a list of character vectors where a string kernel has to be used. } \value{ An S4 object of class \code{specc} which extends the class \code{vector} containing integers indicating the cluster to which each point is allocated. The following slots contain useful information \item{centers}{A matrix of cluster centers.} \item{size}{The number of point in each cluster} \item{withinss}{The within-cluster sum of squares for each cluster} \item{kernelf}{The kernel function used} } \references{ Inderjit Dhillon, Yuqiang Guan, Brian Kulis\cr A Unified view of Kernel k-means, Spectral Clustering and Graph Partitioning\cr UTCS Technical Report\cr \url{http://people.bu.edu/bkulis/pubs/spectral_techreport.pdf} } \author{ Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{specc}}, \code{\link{kpca}}, \code{\link{kcca}} } \examples{ ## Cluster the iris data set. data(iris) sc <- kkmeans(as.matrix(iris[,-5]), centers=3) sc centers(sc) size(sc) withinss(sc) } \keyword{cluster} kernlab/man/csi-class.Rd0000644000175100001440000000545411304023134014630 0ustar hornikusers\name{csi-class} \docType{class} \alias{csi-class} \alias{Q} \alias{R} \alias{predgain} \alias{truegain} \alias{diagresidues,csi-method} \alias{maxresiduals,csi-method} \alias{pivots,csi-method} \alias{predgain,csi-method} \alias{truegain,csi-method} \alias{Q,csi-method} \alias{R,csi-method} \title{Class "csi"} \description{The reduced Cholesky decomposition object} \section{Objects from the Class}{Objects can be created by calls of the form \code{new("csi", ...)}. or by calling the \code{csi} function.} \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"matrix"} contains the decomposed matrix} \item{\code{pivots}:}{Object of class \code{"vector"} contains the pivots performed} \item{\code{diagresidues}:}{Object of class \code{"vector"} contains the diagonial residues} \item{\code{maxresiduals}:}{Object of class \code{"vector"} contains the maximum residues} \item{predgain}{Object of class \code{"vector"} contains the predicted gain before adding each column} \item{truegain}{Object of class \code{"vector"} contains the actual gain after adding each column} \item{Q}{Object of class \code{"matrix"} contains Q from the QR decomposition of the kernel matrix} \item{R}{Object of class \code{"matrix"} contains R from the QR decomposition of the kernel matrix} } } \section{Extends}{ Class \code{"matrix"}, directly. } \section{Methods}{ \describe{ \item{diagresidues}{\code{signature(object = "csi")}: returns the diagonial residues} \item{maxresiduals}{\code{signature(object = "csi")}: returns the maximum residues} \item{pivots}{\code{signature(object = "csi")}: returns the pivots performed} \item{predgain}{\code{signature(object = "csi")}: returns the predicted gain before adding each column} \item{truegain}{\code{signature(object = "csi")}: returns the actual gain after adding each column} \item{Q}{\code{signature(object = "csi")}: returns Q from the QR decomposition of the kernel matrix} \item{R}{\code{signature(object = "csi")}: returns R from the QR decomposition of the kernel matrix} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{csi}}, \code{\link{inchol-class}}} \examples{ data(iris) ## create multidimensional y matrix yind <- t(matrix(1:3,3,150)) ymat <- matrix(0, 150, 3) ymat[yind==as.integer(iris[,5])] <- 1 datamatrix <- as.matrix(iris[,-5]) # initialize kernel function rbf <- rbfdot(sigma=0.1) rbf Z <- csi(datamatrix,ymat, kernel=rbf, rank = 30) dim(Z) pivots(Z) # calculate kernel matrix K <- crossprod(t(Z)) # difference between approximated and real kernel matrix (K - kernelMatrix(kernel=rbf, datamatrix))[6,] } \keyword{classes} kernlab/man/rvm-class.Rd0000644000175100001440000001100211304023134014640 0ustar hornikusers\name{rvm-class} \docType{class} \alias{rvm-class} \alias{RVindex} \alias{mlike} \alias{nvar} \alias{RVindex,rvm-method} \alias{alpha,rvm-method} \alias{cross,rvm-method} \alias{error,rvm-method} \alias{kcall,rvm-method} \alias{kernelf,rvm-method} \alias{kpar,rvm-method} \alias{lev,rvm-method} \alias{mlike,rvm-method} \alias{nvar,rvm-method} \alias{type,rvm-method} \alias{xmatrix,rvm-method} \alias{ymatrix,rvm-method} \title{Class "rvm"} \description{Relevance Vector Machine Class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("rvm", ...)}. or by calling the \code{rvm} function. } \section{Slots}{ \describe{ \item{\code{tol}:}{Object of class \code{"numeric"} contains tolerance of termination criteria used.} \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used } \item{\code{kpar}:}{Object of class \code{"list"} contains the hyperparameter used} \item{\code{kcall}:}{Object of class \code{"call"} contains the function call} \item{\code{type}:}{Object of class \code{"character"} contains type of problem} \item{\code{terms}:}{Object of class \code{"ANY"} containing the terms representation of the symbolic model used (when using a formula interface)} \item{\code{xmatrix}:}{Object of class \code{"matrix"} contains the data matrix used during computation} \item{\code{ymatrix}:}{Object of class \code{"output"} contains the response matrix} \item{\code{fitted}:}{Object of class \code{"output"} with the fitted values, (predict on training set).} \item{\code{lev}:}{Object of class \code{"vector"} contains the levels of the response (in classification)} \item{\code{nclass}:}{Object of class \code{"numeric"} contains the number of classes (in classification)} \item{\code{alpha}:}{Object of class \code{"listI"} containing the the resulting alpha vector} \item{\code{coef}:}{Object of class \code{"ANY"} containing the the resulting model parameters} \item{\code{nvar}:}{Object of class \code{"numeric"} containing the calculated variance (in case of regression)} \item{\code{mlike}:}{Object of class \code{"numeric"} containing the computed maximum likelihood} \item{\code{RVindex}:}{Object of class \code{"vector"} containing the indexes of the resulting relevance vectors } \item{\code{nRV}:}{Object of class \code{"numeric"} containing the number of relevance vectors} \item{\code{cross}:}{Object of class \code{"numeric"} containing the resulting cross validation error } \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed on NA} } } \section{Methods}{ \describe{ \item{RVindex}{\code{signature(object = "rvm")}: returns the index of the relevance vectors } \item{alpha}{\code{signature(object = "rvm")}: returns the resulting alpha vector} \item{cross}{\code{signature(object = "rvm")}: returns the resulting cross validation error} \item{error}{\code{signature(object = "rvm")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values } \item{kcall}{\code{signature(object = "rvm")}: returns the function call } \item{kernelf}{\code{signature(object = "rvm")}: returns the used kernel function } \item{kpar}{\code{signature(object = "rvm")}: returns the parameters of the kernel function} \item{lev}{\code{signature(object = "rvm")}: returns the levels of the response (in classification)} \item{mlike}{\code{signature(object = "rvm")}: returns the estimated maximum likelihood} \item{nvar}{\code{signature(object = "rvm")}: returns the calculated variance (in regression)} \item{type}{\code{signature(object = "rvm")}: returns the type of problem} \item{xmatrix}{\code{signature(object = "rvm")}: returns the data matrix used during computation} \item{ymatrix}{\code{signature(object = "rvm")}: returns the used response } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{rvm}}, \code{\link{ksvm-class}} } \examples{ # create data x <- seq(-20,20,0.1) y <- sin(x)/x + rnorm(401,sd=0.05) # train relevance vector machine foo <- rvm(x, y) foo alpha(foo) RVindex(foo) fitted(foo) kernelf(foo) nvar(foo) ## show slots slotNames(foo) } \keyword{classes} kernlab/DESCRIPTION0000644000175100001440000000252413562454636013435 0ustar hornikusersPackage: kernlab Version: 0.9-29 Title: Kernel-Based Machine Learning Lab Authors@R: c(person("Alexandros", "Karatzoglou", role = c("aut", "cre"), email = "alexandros.karatzoglou@gmail.com"), person("Alex", "Smola", role = "aut"), person("Kurt", "Hornik", role = "aut"), person("National ICT Australia (NICTA)", role = "cph"), person(c("Michael", "A."), "Maniscalco", role = c("ctb", "cph")), person(c("Choon", "Hui"), "Teo", role = "ctb")) Description: Kernel-based machine learning methods for classification, regression, clustering, novelty detection, quantile regression and dimensionality reduction. Among other methods 'kernlab' includes Support Vector Machines, Spectral Clustering, Kernel PCA, Gaussian Processes and a QP solver. Depends: R (>= 2.10) Imports: methods, stats, grDevices, graphics LazyLoad: Yes License: GPL-2 SystemRequirements: C++11 NeedsCompilation: yes Packaged: 2019-11-12 06:36:20 UTC; hornik Author: Alexandros Karatzoglou [aut, cre], Alex Smola [aut], Kurt Hornik [aut], National ICT Australia (NICTA) [cph], Michael A. Maniscalco [ctb, cph], Choon Hui Teo [ctb] Maintainer: Alexandros Karatzoglou Repository: CRAN Date/Publication: 2019-11-12 07:05:02 UTC kernlab/build/0000755000175100001440000000000013562451344013014 5ustar hornikuserskernlab/build/vignette.rds0000644000175100001440000000045513562451344015357 0ustar hornikusersuPMO0-јldɆ݃ZZ,E_Z,RcN;^f޼!"/p鯷!V_(U5NxgA -H8o5%~ui4 #include extern double mymax(double, double); /* LEVEL 1 BLAS */ /*extern double ddot_(int *, double *, int *, double *, int *);*/ void dtrqsol(int n, double *x, double *p, double delta, double *sigma) { /* c ********** c c Subroutine dtrqsol c c This subroutine computes the largest (non-negative) solution c of the quadratic trust region equation c c ||x + sigma*p|| = delta. c c The code is only guaranteed to produce a non-negative solution c if ||x|| <= delta, and p != 0. If the trust region equation has c no solution, sigma = 0. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x must contain the vector x. c On exit x is unchanged. c c p is a double precision array of dimension n. c On entry p must contain the vector p. c On exit p is unchanged. c c delta is a double precision variable. c On entry delta specifies the scalar delta. c On exit delta is unchanged. c c sigma is a double precision variable. c On entry sigma need not be specified. c On exit sigma contains the non-negative solution. c c ********** */ int inc = 1; double dsq = delta*delta, ptp, ptx, rad, xtx; ptx = F77_CALL(ddot)(&n, p, &inc, x, &inc); ptp = F77_CALL(ddot)(&n, p, &inc, p, &inc); xtx = F77_CALL(ddot)(&n, x, &inc, x, &inc); /* Guard against abnormal cases. */ rad = ptx*ptx + ptp*(dsq - xtx); rad = sqrt(mymax(rad, 0)); if (ptx > 0) *sigma = (dsq - xtx)/(ptx + rad); else if (rad > 0) *sigma = (rad - ptx)/ptp; else *sigma = 0; } kernlab/src/esa.h0000644000175100001440000001062112234152620013413 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ESA.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef ESA_H #define ESA_H #include "datatype.h" #include "errorcode.h" #include "lcp.h" #include "ctable.h" #include "ilcpfactory.h" #include "isafactory.h" #include #include //#define SLINK // #define SSARRAY // does not yeet work correctly, CW class ESA { private: int _verb; public: UInt32 size; //' The length of #text# SYMBOL *text; //' Text corresponds to SA #ifdef SSARRAY int *suftab; //' Suffix Array #else UInt32 *suftab; //' Suffix Array #endif LCP lcptab; //' LCP array ChildTable childtab; //' Child table (fields merged) UInt32 *suflink; //' Suffix link table. Two fields: l,r //' --- for bucket table --- UInt32 bcktab_depth; //' Number of char defining each bucket UInt32 bcktab_size; //' size of bucket table UInt32 *bcktab_val; //' value column of bucket table UInt32 *bcktab_key4; //' 4-bytes key column of Bucket table UInt32 *coef4; UInt32 hash_value4; UInt64 *bcktab_key8; //' 8-bytes key column of Bucket table UInt64 *coef8; UInt64 hash_value8; //' --- /// Constructors ESA(const UInt32 & size_, SYMBOL *text_, int verb=INFO); /// Destructor virtual ~ESA(); /// Construct child table ErrorCode ConstructChildTable(); /// Get suffix link interval ErrorCode GetSuflink(const UInt32 &i, const UInt32 &j, UInt32 &sl_i, UInt32 &sl_j); /// Find the suffix link ErrorCode FindSuflink(const UInt32 &parent_i, const UInt32 &parent_j, const UInt32 &child_i, const UInt32 &child_j, UInt32 &sl_i, UInt32 &sl_j); /// Construct suffix link table ErrorCode ConstructSuflink(); /// Construct bucket table ErrorCode ConstructBcktab(const UInt32 &alphabet_size=256); /// Get all non-singleton child-intervals ErrorCode GetChildIntervals(const UInt32 &lb, const UInt32 &rb, std::vector > &q); /// Get intervals by index ErrorCode GetIntervalByIndex(const UInt32 &parent_i, const UInt32 &parent_j, const UInt32 &start_idx, UInt32 &child_i, UInt32 &child_j); /// Get intervals by character ErrorCode GetIntervalByChar(const UInt32 &parent_i, const UInt32 &parent_j, const SYMBOL &start_ch, const UInt32 &depth, UInt32 &child_i, UInt32 &child_j); /// Get lcp value ErrorCode GetLcp(const UInt32 &i, const UInt32 &j, UInt32 &val); /// Compare pattern to text[suftab[idx]..length]. ErrorCode Compare(const UInt32 &idx, const UInt32 &depth, SYMBOL *pattern, const UInt32 &p_len, UInt32 &matched_len); /// Find longest substring of pattern in enhanced suffix array. ErrorCode Match(const UInt32 &i, const UInt32 &j, SYMBOL *pattern, const UInt32 p_len, UInt32 &lb, UInt32 &rb, UInt32 &matched_len); /// Similar to Match() but returns also floor interval of [lb..rb] ErrorCode ExactSuffixMatch(const UInt32 &i, const UInt32 &j, const UInt32 &offset, SYMBOL *pattern, const UInt32 p_len, UInt32 &lb, UInt32 &rb, UInt32 &matched_len, UInt32 &floor_lb, UInt32 &floor_rb, UInt32 &floor_len); }; #endif kernlab/src/stack.h0000644000175100001440000000623313333062601013754 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #ifndef MSUFSORT_STACK_H #define MSUFSORT_STACK_H //============================================================================================= // A quick and dirty stack class for use with the MSufSort algorithm // // Author: M.A. Maniscalco // Date: 7/30/04 // email: michael@www.michael-maniscalco.com // //============================================================================================= #include "memory.h" template class Stack { public: Stack(unsigned int initialSize, unsigned int maxExpandSize, bool preAllocate = false): m_initialSize(initialSize), m_maxExpandSize(maxExpandSize), m_preAllocate(preAllocate) { Initialize(); } virtual ~Stack(){SetSize(0);} void Push(T value); T & Pop(); T & Top(); void SetSize(unsigned int stackSize); void Initialize(); unsigned int Count(); void Clear(); T * m_stack; T * m_stackPtr; T * m_endOfStack; unsigned int m_stackSize; unsigned int m_initialSize; unsigned int m_maxExpandSize; bool m_preAllocate; }; template inline void Stack::Clear() { m_stackPtr = m_stack; } template inline unsigned int Stack::Count() { return (unsigned int)(m_stackPtr - m_stack); } template inline void Stack::Initialize() { m_stack = m_endOfStack = m_stackPtr = 0; m_stackSize = 0; if (m_preAllocate) SetSize(m_initialSize); } template inline void Stack::Push(T value) { if (m_stackPtr >= m_endOfStack) { unsigned int newSize = (m_stackSize < m_maxExpandSize) ? m_stackSize + m_maxExpandSize : (m_stackSize << 1); SetSize(newSize); } *(m_stackPtr++) = value; } template inline T & Stack::Pop() { return *(--m_stackPtr); } template inline T & Stack::Top() { return *(m_stackPtr - 1); } template inline void Stack::SetSize(unsigned int stackSize) { if (m_stackSize == stackSize) return; T * newStack = 0; if (stackSize) { newStack = new T[stackSize]; unsigned int bytesToCopy = (unsigned int)(m_stackPtr - m_stack) * (unsigned int)sizeof(T); if (bytesToCopy) memcpy((void *)newStack, m_stack, bytesToCopy); m_stackPtr = &newStack[m_stackPtr - m_stack]; m_endOfStack = &newStack[stackSize]; m_stackSize = stackSize; } if (m_stack) delete [] m_stack; m_stack = newStack; } #endif kernlab/src/errorcode.h0000644000175100001440000000374312234152620014636 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ErrorCode.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef _ERRORCODE_H_ #define _ERRORCODE_H_ #include "datatype.h" #include // Verbosity level enum verbosity {QUIET, INFO, DEBUG1}; #define ErrorCode UInt32 /** * for general use */ #define NOERROR 0 #define GENERAL_ERROR 1 #define MEM_ALLOC_FAILED 2 #define INVALID_PARAM 3 #define ARRAY_EMPTY 4 #define OPERATION_FAILED 5 /** * SuffixArray */ #define MATCH_NOT_FOUND 101 #define PARTIAL_MATCH 102 /** * LCP */ #define LCP_COMPACT_FAILED 201 #define CHECKERROR(i) { \ if((i) != NOERROR) { \ exit(EXIT_FAILURE); \ } \ } // #define MESSAGE(msg) { std::cout<<(msg)< * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/LCP.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef LCP_H #define LCP_H #include "datatype.h" #include "errorcode.h" #include #include #include #include #include /** * LCP array class */ class LCP { private: /// Compacted array /* std::vector _p_array; */ /* std::vector _idx_array; */ /* std::vector _val_array; */ Byte1 *_p_array; UInt32 *_idx_array; UInt32 *_val_array; UInt32 _size; bool _is_compact; UInt32 *_beg; UInt32 *_end; UInt32 *_cache; /* typedef std::vector::const_iterator const_itr; */ /* const_itr _beg; */ /* const_itr _end; */ /* const_itr _cache; */ UInt32 _dist; public: /// Original array - 4bytes //std::vector array; UInt32 *array; /// Constructors LCP(const UInt32 &size); /// Destructors virtual ~LCP(); /// Methods /// Compact 4n bytes array into (1n+8p) bytes arrays ErrorCode compact(void); /// Retrieve lcp array value // ErrorCode lcp(const UInt32 &idx, UInt32 &value); UInt32 operator[] (const UInt32& idx); friend std::ostream& operator << (std::ostream& os, LCP& lcp); }; #endif kernlab/src/dspcg.c0000644000175100001440000001617211304023134013737 0ustar hornikusers#include #include extern void *xmalloc(size_t); extern double mymin(double, double); extern double mymax(double, double); /* LEVEL 1 BLAS */ /*extern double dnrm2_(int *, double *, int *);*/ /* LEVEL 2 BLAS */ /*extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *);*/ /*extern void dtrsv_(char *, char *, char *, int *, double *, int *, double *, int *);*/ /* MINPACK 2 */ extern void dprsrch(int, double *, double *, double *, double *, double *, double *); extern double dprecond(int, double *, double *); extern void dtrpcg(int, double*, double *, double, double *, double, double, double *, int *, int *); void dspcg(int n, double *x, double *xl, double *xu, double *A, double *g, double delta, double rtol, double *s, int *info) { /* c ********* c c Subroutine dspcg c c This subroutine generates a sequence of approximate minimizers c for the subproblem c c min { q(x) : xl <= x <= xu }. c c The quadratic is defined by c c q(x[0]+s) = 0.5*s'*A*s + g'*s, c c where x[0] is a base point provided by the user, A is a symmetric c positive semidefinite dense matrix, and g is a vector. c c At each stage we have an approximate minimizer x[k], and generate c a direction p[k] by solving the subproblem c c min { q(x[k]+p) : || p || <= delta, s(fixed) = 0 }, c c where fixed is the set of variables fixed at x[k], delta is the c trust region bound. c c B = A(free:free), c c where free is the set of free variables at x[k]. Given p[k], c the next minimizer x[k+1] is generated by a projected search. c c The starting point for this subroutine is x[1] = x[0] + s, where c x[0] is a base point and s is the Cauchy step. c c The subroutine converges when the step s satisfies c c || (g + A*s)[free] || <= rtol*|| g[free] || c c In this case the final x is an approximate minimizer in the c face defined by the free variables. c c The subroutine terminates when the trust region bound does c not allow further progress, that is, || L'*p[k] || = delta. c In this case the final x satisfies q(x) < q(x[k]). c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is the final minimizer. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c A is a double precision array of dimension n*n. c On entry A specifies the matrix A. c On exit A is unchanged. c c g is a double precision array of dimension n. c On entry g must contain the vector g. c On exit g is unchanged. c c delta is a double precision variable. c On entry delta is the trust region size. c On exit delta is unchanged. c c rtol is a double precision variable. c On entry rtol specifies the accuracy of the final minimizer. c On exit rtol is unchanged. c c s is a double precision array of dimension n. c On entry s is the Cauchy step. c On exit s contain the final step. c c info is an integer variable. c On entry info need not be specified. c On exit info is set as follows: c c info = 1 Convergence. The final step s satisfies c || (g + A*s)[free] || <= rtol*|| g[free] ||, c and the final x is an approximate minimizer c in the face defined by the free variables. c c info = 2 Termination. The trust region bound does c not allow further progress. */ int i, j, nfaces, nfree, inc = 1, infotr, iters = 0, itertr; double gfnorm, gfnormf, stol = 1e-16, alpha; double one = 1, zero = 0; double *B = (double *) xmalloc(sizeof(double)*n*n); double *L = (double *) xmalloc(sizeof(double)*n*n); double *w = (double *) xmalloc(sizeof(double)*n); double *wa = (double *) xmalloc(sizeof(double)*n); double *wxl = (double *) xmalloc(sizeof(double)*n); double *wxu = (double *) xmalloc(sizeof(double)*n); int *indfree = (int *) xmalloc(sizeof(int)*n); double *gfree = (double *) xmalloc(sizeof(double)*n); /* Compute A*(x[1] - x[0]) and store in w. */ F77_CALL(dsymv)("U", &n, &one, A, &n, s, &inc, &zero, w, &inc); /* Compute the Cauchy point. */ for (j=0;j * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/I_WeightFactory.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef I_WEIGHTFACTORY_H #define I_WEIGHTFACTORY_H #include "datatype.h" #include "errorcode.h" /// Weight Factory interface for string kernel class I_WeightFactory { public: /// Constructor I_WeightFactory(){} /// Destructor virtual ~I_WeightFactory(){} /// Compute edge weight between floor interval and the end of matched substring. virtual ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) = 0; }; #endif kernlab/src/isafactory.h0000644000175100001440000000306412234152620015012 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/I_SAFactory.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 //' Interface for Enhanced Suffix Array construction algorithms #ifndef I_SAFACTORY_H #define I_SAFACTORY_H #include "datatype.h" #include "errorcode.h" class I_SAFactory { public: ///Constructor I_SAFactory(){} ///Destructor virtual ~I_SAFactory(){} ///Methods virtual ErrorCode ConstructSA(SYMBOL *text, const UInt32 &len, UInt32 *&array) = 0; }; #endif kernlab/src/dtrpcg.c0000644000175100001440000001515112134020542014120 0ustar hornikusers#include #include #include #include extern void *xmalloc(size_t); /* LEVEL 1 BLAS */ /* extern int daxpy_(int *, double *, double *, int *, double *, int *); */ /* extern double ddot_(int *, double *, int *, double *, int *); */ /* extern double dnrm2_(int *, double *, int *); */ /* extern int dscal_(int *, double *, double *, int *); */ /* LEVEL 2 BLAS */ /* extern int dtrsv_(char *, char *, char *, int *, double *, int *, double *, int *); */ /* extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); */ /* MINPACK 2 */ extern void dtrqsol(int, double *, double *, double , double *); void dtrpcg(int n, double *A, double *g, double delta, double *L, double tol, double stol, double *w, int *iters, int *info) { /* c ********* c c Subroutine dtrpcg c c Given a dense symmetric positive semidefinite matrix A, this c subroutine uses a preconditioned conjugate gradient method to find c an approximate minimizer of the trust region subproblem c c min { q(s) : || L'*s || <= delta }. c c where q is the quadratic c c q(s) = 0.5*s'*A*s + g'*s, c c This subroutine generates the conjugate gradient iterates for c the equivalent problem c c min { Q(w) : || w || <= delta }. c c where Q is the quadratic defined by c c Q(w) = q(s), w = L'*s. c c Termination occurs if the conjugate gradient iterates leave c the trust region, a negative curvature direction is generated, c or one of the following two convergence tests is satisfied. c c Convergence in the original variables: c c || grad q(s) || <= tol c c Convergence in the scaled variables: c c || grad Q(w) || <= stol c c Note that if w = L'*s, then L*grad Q(w) = grad q(s). c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c A is a double precision array of dimension n*n. c On entry A specifies the matrix A. c On exit A is unchanged. c c g is a double precision array of dimension n. c On entry g must contain the vector g. c On exit g is unchanged. c c delta is a double precision variable. c On entry delta is the trust region size. c On exit delta is unchanged. c c L is a double precision array of dimension n*n. c On entry L need not to be specified. c On exit the lower triangular part of L contains the matrix L. c c tol is a double precision variable. c On entry tol specifies the convergence test c in the un-scaled variables. c On exit tol is unchanged c c stol is a double precision variable. c On entry stol specifies the convergence test c in the scaled variables. c On exit stol is unchanged c c w is a double precision array of dimension n. c On entry w need not be specified. c On exit w contains the final conjugate gradient iterate. c c iters is an integer variable. c On entry iters need not be specified. c On exit iters is set to the number of conjugate c gradient iterations. c c info is an integer variable. c On entry info need not be specified. c On exit info is set as follows: c c info = 1 Convergence in the original variables. c || grad q(s) || <= tol c c info = 2 Convergence in the scaled variables. c || grad Q(w) || <= stol c c info = 3 Negative curvature direction generated. c In this case || w || = delta and a direction c c of negative curvature w can be recovered by c solving L'*w = p. c c info = 4 Conjugate gradient iterates exit the c trust region. In this case || w || = delta. c c info = 5 Failure to converge within itermax(n) iterations. c c ********** */ int i, inc = 1; double one = 1, zero = 0, alpha, malpha, beta, ptq, rho; double *p, *q, *t, *r, *z, sigma, rtr, rnorm, rnorm0, tnorm; p = (double *) xmalloc(sizeof(double)*n); q = (double *) xmalloc(sizeof(double)*n); t = (double *) xmalloc(sizeof(double)*n); r = (double *) xmalloc(sizeof(double)*n); z = (double *) xmalloc(sizeof(double)*n); /* Initialize the iterate w and the residual r. Initialize the residual t of grad q to -g. Initialize the residual r of grad Q by solving L*r = -g. Note that t = L*r. */ for (i=0;i 0) alpha = rho/ptq; else alpha = 0; dtrqsol(n, w, p, delta, &sigma); /* Exit if there is negative curvature or if the iterates exit the trust region. */ if (ptq <= 0 || alpha >= sigma) { F77_CALL(daxpy)(&n, &sigma, p, &inc, w, &inc); if (ptq <= 0) *info = 3; else *info = 4; goto return0; } /* Update w and the residuals r and t. Note that t = L*r. */ malpha = -alpha; F77_CALL(daxpy)(&n, &alpha, p, &inc, w, &inc); F77_CALL(daxpy)(&n, &malpha, q, &inc, r, &inc); F77_CALL(daxpy)(&n, &malpha, z, &inc, t,&inc); /* Exit if the residual convergence test is satisfied. */ rtr = F77_CALL(ddot)(&n, r, &inc, r, &inc); rnorm = sqrt(rtr); tnorm = sqrt(F77_CALL(ddot)(&n, t, &inc, t, &inc)); if (tnorm <= tol) { *info = 1; goto return0; } if (rnorm <= stol) { *info = 2; goto return0; } /* Compute p = r + beta*p and update rho. */ beta = rtr/rho; F77_CALL(dscal)(&n, &beta, p, &inc); F77_CALL(daxpy)(&n, &one, r, &inc, p, &inc); rho = rtr; } /* iters > itermax = n */ *info = 5; return0: free(p); free(q); free(r); free(t); free(z); } kernlab/src/inductionsort.h0000644000175100001440000000554312234152620015556 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #ifndef MSUFSORT_INDUCTION_SORTING_H #define MSUFSORT_INDUCTION_SORTING_H #include "introsort.h" class InductionSortObject { public: InductionSortObject(unsigned int inductionPosition = 0, unsigned int inductionValue = 0, unsigned int suffixIndex = 0); bool operator <= (InductionSortObject & object); bool operator == (InductionSortObject & object); InductionSortObject& operator = (InductionSortObject & object); bool operator >= (InductionSortObject & object); bool operator > (InductionSortObject & object); bool operator < (InductionSortObject & object); unsigned int m_sortValue[2]; }; inline bool InductionSortObject::operator <= (InductionSortObject & object) { if (m_sortValue[0] < object.m_sortValue[0]) return true; else if (m_sortValue[0] == object.m_sortValue[0]) return (m_sortValue[1] <= object.m_sortValue[1]); return false; } inline bool InductionSortObject::operator == (InductionSortObject & object) { return ((m_sortValue[0] == object.m_sortValue[0]) && (m_sortValue[1] == object.m_sortValue[1])); } inline bool InductionSortObject::operator >= (InductionSortObject & object) { if (m_sortValue[0] > object.m_sortValue[0]) return true; else if (m_sortValue[0] == object.m_sortValue[0]) return (m_sortValue[1] >= object.m_sortValue[1]); return false; } inline InductionSortObject & InductionSortObject::operator = (InductionSortObject & object) { m_sortValue[0] = object.m_sortValue[0]; m_sortValue[1] = object.m_sortValue[1]; return *this; } inline bool InductionSortObject::operator > (InductionSortObject & object) { if (m_sortValue[0] > object.m_sortValue[0]) return true; else if (m_sortValue[0] == object.m_sortValue[0]) return (m_sortValue[1] > object.m_sortValue[1]); return false; } inline bool InductionSortObject::operator < (InductionSortObject & object) { if (m_sortValue[0] < object.m_sortValue[0]) return true; else if (m_sortValue[0] == object.m_sortValue[0]) return (m_sortValue[1] < object.m_sortValue[1]); return false; } #endif kernlab/src/lcp.cpp0000644000175100001440000001271513561512465013775 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/LCP.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef LCP_CPP #define LCP_CPP #include "lcp.h" // Threshold for compacting LCP[] const Real THRESHOLD = 0.3; LCP::LCP(const UInt32 &size): _p_array(0), _idx_array(0), _val_array(0), _size(size), _is_compact(false), _beg(0), _end(0), _cache(0), _dist(0), array(new UInt32[size]){ } LCP::~LCP() { if(array) {delete [] array; array = 0;} if(_p_array) {delete [] _p_array; _p_array = 0;} if(_idx_array) {delete [] _idx_array; _idx_array = 0;} if(_val_array) {delete [] _val_array; _val_array = 0;} } /** * Compact initial/original lcp array of n elements (i.e. 4n bytes) * into a n byte array with 8 bytes of secondary storage. * */ ErrorCode LCP::compact(void){ // Validate pre-conditions //assert(!array.empty() && array.size() == _size); assert(array); // Already compact. Nothing to do if (_is_compact) return NOERROR; // Count number of lcp-values >= 255. // UInt32 idx_len = std::count_if(array.begin(), array.end(), // std::bind2nd(std::greater(),254)); #ifdef _RWSTD_NO_CLASS_PARTIAL_SPEC UInt32 idx_len = 0; std::count_if(array, array + _size, std::bind2nd(std::greater(),254), idx_len); #else UInt32 idx_len = std::count_if(array, array + _size, std::bind(std::greater(), std::placeholders::_1, 254)); #endif // Compact iff idx_len/|array| > THRESHOLD if((Real)idx_len/_size > THRESHOLD) { //std::cout<< "Not compacting " << std::endl; return NOERROR; } // std::cout<< "Compacting with : " << idx_len << std::endl; // We know how much space to use // _p_array.resize(_size); // _idx_array.resize(idx_len); // _val_array.resize(idx_len); _p_array = new Byte1[_size]; _idx_array = new UInt32[idx_len]; _val_array = new UInt32[idx_len]; // Hold pointers for later. Avoids function calls // _beg = _idx_array.begin(); // _end = _idx_array.end(); // _cache = _idx_array.begin(); _beg = _idx_array; _end = _idx_array + idx_len; _cache = _idx_array; _dist = 0; for(UInt32 i=0, j=0; i<_size; i++) { if(array[i] < 255){ _p_array[i] = array[i]; }else { _p_array[i] = 255; _idx_array[j] = i; _val_array[j] = array[i]; j++; } } //array.resize(0); // array.clear(); delete [] array; array = 0; _is_compact = true; return NOERROR; } /** * Retrieve lcp array values. * * \param idx - (IN) Index of lcp array */ UInt32 LCP::operator [] (const UInt32 &idx) { // input is valid? // assert (idx >= 0 && idx < _size); if(!_is_compact){ // LCP array has not been compacted yet! return array[idx]; } if(_p_array[idx] < 255){ // Found in primary index return (UInt32) _p_array[idx]; } // svnvish: BUGBUG // Do some caching here. // // Now search in secondary index as last resort // std::pair< const_itr, const_itr > p = equal_range(_beg, _end, idx); // return _val_array[std::distance(_beg, p.first)]; if (++_cache == _end){ _cache = _beg; _dist = 0; }else{ _dist++; } UInt32 c_idx = *(_cache); if (c_idx == idx){ return _val_array[_dist]; } // _cache = std::equal_range(_beg, _end, idx).first; _cache = std::lower_bound(_beg, _end, idx); #ifdef _RWSTD_NO_CLASS_PARTIAL_SPEC _dist = 0; std::distance(_beg, _cache, _dist); #else _dist = std::distance(_beg, _cache); #endif //std::cout << "here" << std::endl; // _cache = equal_range(_beg, _end, idx).first; // _dist = std::distance(_beg, _cache); return _val_array[_dist]; // if (c_idx > idx){ // _cache = equal_range(_beg, _cache, idx).first; // }else{ // _cache = equal_range(_cache, _end, idx).first; // } // //_cache = p.first; // _dist = std::distance(_beg, _cache); // return _val_array[_dist]; } /** * Dump array elements to output stream. * * \param os - (IN) Output stream * \param lcp - (IN) LCP object. */ std::ostream& operator << (std::ostream& os, LCP& lcp){ for( UInt32 i = 0; i < lcp._size; i++ ){ os << "lcp[ " << i << "]: " << lcp[i] << std::endl; } return os; } #endif kernlab/src/cweight.cpp0000644000175100001440000000427412234152620014637 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ConstantWeight.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 // 12 Oct 2006 #ifndef CWEIGHT_CPP #define CWEIGHT_CPP #include "cweight.h" #include /** * Constant weight function. Computes number of common substrings. Every * matched substring is of same weight (i.e. 1) * W(y,t) := tau - gamma * * \param floor_len - (IN) Length of floor interval of matched substring. * (cf. gamma in VisSmo02). * \param x_len - (IN) Length of the matched substring. * (cf. tau in visSmo02). * \param weight - (OUT) The weight value. * */ ErrorCode ConstantWeight::ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) { //' Input validation assert(x_len >= floor_len); //' x_len == floor_len when the substring found ends on an interval. weight = (x_len - floor_len); // std::cout << "floor_len : " << floor_len // << " x_len : " << x_len // << " weight : " << weight << std::endl; return NOERROR; } #endif kernlab/src/expdecayweight.cpp0000644000175100001440000000557312234152620016222 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ExpDecayWeight.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef EXPDECAYWEIGHT_CPP #define EXPDECAYWEIGHT_CPP #include #include #include "expdecayweight.h" using namespace std; /** * Exponential Decay weight function. * W(y,t) := (lambda^{-gamma} - lambda^{-tau}) / (lambda - 1) * * \param floor_len - (IN) Length of floor interval of matched substring. * (cf. gamma in VisSmo02). * \param x_len - (IN) Length of the matched substring. * (cf. tau in visSmo02). * \param weight - (OUT) The weight value. * */ ErrorCode ExpDecayWeight::ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) // ErrorCode // ExpDecayWeight::ComputeWeight(const Real &floor_len, const Real &x_len, Real &weight) { //' Input validation assert(x_len >= floor_len); //' x_len == floor_len when the substring found ends on an interval. if(floor_len == x_len) { //' substring ended on an interval, so, get the val from val[] weight = 0.0; } else { //weight = (pow(-(floor_len-1), lambda) - pow(-x_len, lambda)) / (1-lambda); //weight = (pow(lambda,((Real)floor_len)) - pow(lambda, (Real)x_len+1)) / (1-lambda); // double a=floor_len*-1.0; // double b=x_len*-1.0; // weight = (pow(lambda,a) - pow(lambda, b)) / (lambda-1); weight = (pow(lambda,Real(-1.0*floor_len)) - pow(lambda, Real(-1.0*x_len))) / (lambda-1); } // std::cout << "floor_len : " << floor_len // << " x_len : " << x_len // << " pow1 : " << pow(lambda,-((Real)floor_len)) // << " pow2 : " << pow(lambda,-(Real)x_len) // << " weight : " << weight << std::endl; return NOERROR; } #endif kernlab/src/wkasailcp.h0000644000175100001440000000337712234152620014633 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/W_kasai_lcp.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef W_KASAI_LCP_H #define W_KASAI_LCP_H #include "datatype.h" #include "errorcode.h" #include "ilcpfactory.h" #include "lcp.h" /** * Kasai et al's LCP array computation algorithm is * is slightly faster than Manzini's algorithm. However, * it needs inverse suffix array which costs extra memory. */ class W_kasai_lcp : public I_LCPFactory { public: /// Constructor W_kasai_lcp(){} /// Desctructor virtual ~W_kasai_lcp(){} /// Compute LCP array. ErrorCode ComputeLCP(const SYMBOL *text, const UInt32 &len, const UInt32 *sa, LCP& lcp); }; #endif kernlab/src/init.c0000644000175100001440000000234413271617375013623 0ustar hornikusers#include #include #include // for NULL #include /* .Call calls */ extern SEXP fullsubstringk(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP smo_optim(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP stringtv(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP subsequencek(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP substringk(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP tron_optim(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); static const R_CallMethodDef CallEntries[] = { {"fullsubstringk", (DL_FUNC) &fullsubstringk, 6}, {"smo_optim", (DL_FUNC) &smo_optim, 23}, {"stringtv", (DL_FUNC) &stringtv, 7}, {"subsequencek", (DL_FUNC) &subsequencek, 6}, {"substringk", (DL_FUNC) &substringk, 6}, {"tron_optim", (DL_FUNC) &tron_optim, 27}, {NULL, NULL, 0} }; void R_init_kernlab(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } kernlab/src/ilcpfactory.h0000644000175100001440000000304512234152620015164 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/I_LCPFactory.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef ILCPFACTORY_H #define ILCPFACTORY_H #include "datatype.h" #include "errorcode.h" #include "lcp.h" class I_LCPFactory { public: /// Constructor I_LCPFactory(){} /// Destructor virtual ~I_LCPFactory(){} /// Methods virtual ErrorCode ComputeLCP(const SYMBOL *text, const UInt32 &length, const UInt32 *sa, LCP& lcp) = 0; }; #endif kernlab/src/dgpnrm.c0000644000175100001440000000217211304023134014121 0ustar hornikusers#include double dgpnrm(int n, double *x, double *xl, double *xu, double *g) { /* c ********** c c Function dgpnrm c c This function computes the infinite norm of the c projected gradient at x. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is unchanged. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c g is a double precision array of dimension n. c On entry g specifies the gradient g. c On exit g is unchanged. c c ********** */ int i; double norm = 0; for (i=0;i= 0 && x[i] == xl[i]))) if (fabs(g[i]) > norm) norm = fabs(g[i]); return norm; } kernlab/src/dbreakpt.c0000644000175100001440000000417111304023134014427 0ustar hornikusersextern double mymin(double, double); extern double mymax(double, double); void dbreakpt(int n, double *x, double *xl, double *xu, double *w, int *nbrpt, double *brptmin, double *brptmax) { /* c ********** c c Subroutine dbreakpt c c This subroutine computes the number of break-points, and c the minimal and maximal break-points of the projection of c x + alpha*w on the n-dimensional interval [xl,xu]. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is unchanged. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c w is a double precision array of dimension n. c On entry w specifies the vector w. c On exit w is unchanged. c c nbrpt is an integer variable. c On entry nbrpt need not be specified. c On exit nbrpt is the number of break points. c c brptmin is a double precision variable c On entry brptmin need not be specified. c On exit brptmin is minimal break-point. c c brptmax is a double precision variable c On entry brptmax need not be specified. c On exit brptmax is maximal break-point. c c ********** */ int i; double brpt; *nbrpt = 0; for (i=0;i 0) { (*nbrpt)++; brpt = (xu[i] - x[i])/w[i]; if (*nbrpt == 1) *brptmin = *brptmax = brpt; else { *brptmin = mymin(brpt, *brptmin); *brptmax = mymax(brpt, *brptmax); } } else if (x[i] > xl[i] && w[i] < 0) { (*nbrpt)++; brpt = (xl[i] - x[i])/w[i]; if (*nbrpt == 1) *brptmin = *brptmax = brpt; else { *brptmin = mymin(brpt, *brptmin); *brptmax = mymax(brpt, *brptmax); } } if (*nbrpt == 0) *brptmin = *brptmax = 0; } kernlab/src/dcauchy.c0000644000175100001440000001161213561526540014271 0ustar hornikusers#include #include extern void *xmalloc(size_t); /* LEVEL 1 BLAS */ /* extern double ddot_(int *, double *, int *, double *, int *); extern double dnrm2_(int *, double *, int *); */ /* LEVEL 2 BLAS */ /* extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); */ /* MINPACK 2 */ extern void dbreakpt(int, double *, double *, double *, double *, int *, double *, double *); extern void dgpstep(int, double *, double *, double *, double, double *, double *); void dcauchy(int n, double *x, double *xl, double *xu, double *A, double *g, double delta, double *alpha, double *s, double *wa) { /* c ********** c c Subroutine dcauchy c c This subroutine computes a Cauchy step that satisfies a trust c region constraint and a sufficient decrease condition. c c The Cauchy step is computed for the quadratic c c q(s) = 0.5*s'*A*s + g'*s, c c where A is a symmetric matrix , and g is a vector. Given a c parameter alpha, the Cauchy step is c c s[alpha] = P[x - alpha*g] - x, c c with P the projection onto the n-dimensional interval [xl,xu]. c The Cauchy step satisfies the trust region constraint and the c sufficient decrease condition c c || s || <= delta, q(s) <= mu_0*(g'*s), c c where mu_0 is a constant in (0,1). c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is unchanged. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c A is a double precision array of dimension n*n. c On entry A specifies the matrix A. c On exit A is unchanged. c c g is a double precision array of dimension n. c On entry g specifies the gradient g. c On exit g is unchanged. c c delta is a double precision variable. c On entry delta is the trust region size. c On exit delta is unchanged. c c alpha is a double precision variable. c On entry alpha is the current estimate of the step. c On exit alpha defines the Cauchy step s[alpha]. c c s is a double precision array of dimension n. c On entry s need not be specified. c On exit s is the Cauchy step s[alpha]. c c ********** */ double one = 1, zero = 0; /* Constant that defines sufficient decrease. Interpolation and extrapolation factors. */ double mu0 = 0.01, interpf = 0.1, extrapf = 10; int search, interp, nbrpt, nsteps = 1, i, inc = 1; double alphas, brptmax, brptmin, gts, q; /* FIXME KH 2019-11-09: double *wa = (double *) xmalloc(sizeof(double)*n); */ /* Find the minimal and maximal break-point on x - alpha*g. */ for (i=0;i delta) interp = 1; else { F77_CALL(dsymv)("U", &n, &one, A, &n, s, &inc, &zero, wa, &inc); gts = F77_CALL(ddot)(&n, g, &inc, s, &inc); q = 0.5*F77_CALL(ddot)(&n, s, &inc, wa, &inc) + gts; interp = q >= mu0*gts ? 1 : 0; } /* Either interpolate or extrapolate to find a successful step. */ if (interp) { /* Reduce alpha until a successful step is found. */ search = 1; while (search) { /* This is a crude interpolation procedure that will be replaced in future versions of the code. */ nsteps++; (*alpha) *= interpf; dgpstep(n, x, xl, xu, -(*alpha), g, s); if (F77_CALL(dnrm2)(&n, s, &inc) <= delta) { F77_CALL(dsymv)("U", &n, &one, A, &n, s, &inc, &zero, wa, &inc); gts = F77_CALL(ddot)(&n, g, &inc, s, &inc); q = 0.5 * F77_CALL(ddot)(&n, s, &inc, wa, &inc) + gts; search = q > mu0*gts ? 1 : 0; } } } else { search = 1; alphas = *alpha; /* Increase alpha until a successful step is found. */ while (search && (*alpha) <= brptmax) { /* This is a crude extrapolation procedure that will be replaced in future versions of the code. */ nsteps++; alphas = *alpha; (*alpha) *= extrapf; dgpstep(n, x, xl, xu, -(*alpha), g, s); if (F77_CALL(dnrm2)(&n, s, &inc) <= delta) { F77_CALL(dsymv)("U", &n, &one, A, &n, s, &inc, &zero, wa, &inc); gts = F77_CALL(ddot)(&n, g, &inc, s, &inc); q = 0.5 * F77_CALL(ddot)(&n, s, &inc, wa, &inc) + gts; search = q < mu0*gts ? 1 : 0; } else search = 0; } *alpha = alphas; dgpstep(n, x, xl, xu, -(*alpha), g, s); } /* FIXME KH 2019-11-09: free(wa); */ } kernlab/src/svm.cpp0000644000175100001440000025315612652176277014041 0ustar hornikusers#include #include #include #include #include #include #include #include #include #include #include "svm.h" typedef float Qfloat; typedef signed char schar; #ifndef min template inline T min(T x,T y) { return (x inline T max(T x,T y) { return (x>y)?x:y; } #endif template inline void swap(T& x, T& y) { T t=x; x=y; y=t; } template inline void clone(T*& dst, S* src, int n) { dst = new T[n]; memcpy((void *)dst,(void *)src,sizeof(T)*n); } inline double powi(double base, int times) { double tmp = base, ret = 1.0; for(int t=times; t>0; t/=2) { if(t%2==1) ret*=tmp; tmp = tmp * tmp; } return ret; } #define INF HUGE_VAL # define TAU 1e-12 #define Malloc(type,n) (type *)malloc((n)*sizeof(type)) #if 0 void info(char *fmt,...) { va_list ap; va_start(ap,fmt); //vprintf(fmt,ap); va_end(ap); } void info_flush() { fflush(stdout); } #else void info(char *fmt,...) {} void info_flush() {} #endif // // Kernel Cache // // l is the number of total data items // size is the cache size limit in bytes // class Cache { public: Cache(int l,long int size, int qpsize); ~Cache(); // request data [0,len) // return some position p where [p,len) need to be filled // (p >= len if nothing needs to be filled) int get_data(const int index, Qfloat **data, int len); void swap_index(int i, int j); // future_option private: int l; long int size; struct head_t { head_t *prev, *next; // a cicular list Qfloat *data; int len; // data[0,len) is cached in this entry }; head_t *head; head_t lru_head; void lru_delete(head_t *h); void lru_insert(head_t *h); }; Cache::Cache(int l_,long int size_,int qpsize):l(l_),size(size_) { head = (head_t *)calloc(l,sizeof(head_t)); // initialized to 0 size /= sizeof(Qfloat); size -= l * sizeof(head_t) / sizeof(Qfloat); size = max(size, (long int) qpsize*l); // cache must be large enough for 'qpsize' columns lru_head.next = lru_head.prev = &lru_head; } Cache::~Cache() { for(head_t *h = lru_head.next; h != &lru_head; h=h->next) free(h->data); free(head); } void Cache::lru_delete(head_t *h) { // delete from current location h->prev->next = h->next; h->next->prev = h->prev; } void Cache::lru_insert(head_t *h) { // insert to last position h->next = &lru_head; h->prev = lru_head.prev; h->prev->next = h; h->next->prev = h; } int Cache::get_data(const int index, Qfloat **data, int len) { head_t *h = &head[index]; if(h->len) lru_delete(h); int more = len - h->len; if(more > 0) { // free old space while(size < more) { head_t *old = lru_head.next; lru_delete(old); free(old->data); size += old->len; old->data = 0; old->len = 0; } // allocate new space h->data = (Qfloat *)realloc(h->data,sizeof(Qfloat)*len); size -= more; swap(h->len,len); } lru_insert(h); *data = h->data; return len; } void Cache::swap_index(int i, int j) { if(i==j) return; if(head[i].len) lru_delete(&head[i]); if(head[j].len) lru_delete(&head[j]); swap(head[i].data,head[j].data); swap(head[i].len,head[j].len); if(head[i].len) lru_insert(&head[i]); if(head[j].len) lru_insert(&head[j]); if(i>j) swap(i,j); for(head_t *h = lru_head.next; h!=&lru_head; h=h->next) { if(h->len > i) { if(h->len > j) swap(h->data[i],h->data[j]); else { // give up lru_delete(h); free(h->data); size += h->len; h->data = 0; h->len = 0; } } } } // // Kernel evaluation // // the static method k_function is for doing single kernel evaluation // the constructor of Kernel prepares to calculate the l*l kernel matrix // the member function get_Q is for getting one column from the Q Matrix // class QMatrix { public: virtual Qfloat *get_Q(int column, int len) const = 0; virtual double *get_QD() const = 0; virtual void swap_index(int i, int j) const = 0; virtual ~QMatrix() {} }; class Kernel: public QMatrix{ public: Kernel(int l, svm_node * const * x, const svm_parameter& param); virtual ~Kernel(); static double k_function(const svm_node *x, const svm_node *y, const svm_parameter& param); virtual Qfloat *get_Q(int column, int len) const = 0; virtual double *get_QD() const = 0; virtual void swap_index(int i, int j) const // no so const... { swap(x[i],x[j]); if(x_square) swap(x_square[i],x_square[j]); } protected: double (Kernel::*kernel_function)(int i, int j) const; private: const svm_node **x; double *x_square; // svm_parameter const int kernel_type; const int degree; const double gamma; const double coef0; const double lim; const double *K; const int m; static double dot(const svm_node *px, const svm_node *py); static double anova(const svm_node *px, const svm_node *py, const double sigma, const int degree); double kernel_linear(int i, int j) const { return dot(x[i],x[j]); } double kernel_poly(int i, int j) const { return powi(gamma*dot(x[i],x[j])+coef0,degree); } double kernel_rbf(int i, int j) const { return exp(-gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j]))); } double kernel_sigmoid(int i, int j) const { return tanh(gamma*dot(x[i],x[j])+coef0); } double kernel_laplace(int i, int j) const { return exp(-gamma*sqrt(fabs(x_square[i]+x_square[j]-2*dot(x[i],x[j])))); } double kernel_bessel(int i, int j) const { double bkt = gamma*sqrt(fabs(x_square[i]+x_square[j]-2*dot(x[i],x[j]))); if (bkt < 0.000001){ return 1 ; } else { return(powi(((jn((int)degree, bkt)/powi(bkt,((int)degree)))/lim),(int) coef0)); } } double kernel_anova(int i, int j) const { return anova(x[i], x[j], gamma, degree); } double kernel_spline(int i, int j) const { double result=1.0; double min; double t1,t4; const svm_node *px = x[i], *py= x[j]; // px = x[i]; // py = x[j]; while(px->index != -1 && py->index != -1) { if(px->index == py->index) { min=((px->valuevalue)?px->value:py->value); t1 = (px->value * py->value); t4 = min*min; result*=( 1.0 + t1 + (t1*min) ) - ( ((px->value+py->value)/2.0) * t4) + ((t4 * min)/3.0); } ++px; ++py; } return result; } double kernel_R(int i, int j) const { return *(K + m*i +j); } }; Kernel::Kernel(int l, svm_node * const * x_, const svm_parameter& param) :kernel_type(param.kernel_type), degree(param.degree), gamma(param.gamma), coef0(param.coef0), lim(param.lim), K(param.K), m(param.m) { switch(kernel_type) { case LINEAR: kernel_function = &Kernel::kernel_linear; break; case POLY: kernel_function = &Kernel::kernel_poly; break; case RBF: kernel_function = &Kernel::kernel_rbf; break; case SIGMOID: kernel_function = &Kernel::kernel_sigmoid; break; case LAPLACE: kernel_function = &Kernel::kernel_laplace; break; case BESSEL: kernel_function = &Kernel::kernel_bessel; break; case ANOVA: kernel_function = &Kernel::kernel_anova; break; case SPLINE: kernel_function = &Kernel::kernel_spline; break; case R: kernel_function = &Kernel::kernel_R; break; } clone(x,x_,l); if(kernel_type == RBF || kernel_type == LAPLACE || kernel_type == BESSEL) { x_square = new double[l]; for(int i=0;iindex != -1 && py->index != -1) { if(px->index == py->index) { sum += px->value * py->value; ++px; ++py; } else { if(px->index > py->index) ++py; else ++px; } } return sum; } double Kernel::anova(const svm_node *px, const svm_node *py, const double sigma, const int degree) { double sum = 0; double tv; while(px->index != -1 && py->index != -1) { if(px->index == py->index) { tv = (px->value - py->value) * (px->value - py->value); sum += exp( - sigma * tv); ++px; ++py; } else { if(px->index > py->index) { sum += exp( - sigma * (py->value * py->value)); ++py; } else { sum += exp( - sigma * (px->value * px->value)); ++px; } } } return (powi(sum,degree)); } double Kernel::k_function(const svm_node *x, const svm_node *y, const svm_parameter& param) { switch(param.kernel_type) { case LINEAR: return dot(x,y); case POLY: return powi(param.gamma*dot(x,y)+param.coef0,param.degree); case RBF: { double sum = 0; while(x->index != -1 && y->index !=-1) { if(x->index == y->index) { double d = x->value - y->value; sum += d*d; ++x; ++y; } else { if(x->index > y->index) { sum += y->value * y->value; ++y; } else { sum += x->value * x->value; ++x; } } } while(x->index != -1) { sum += x->value * x->value; ++x; } while(y->index != -1) { sum += y->value * y->value; ++y; } return exp(-param.gamma*sum); } case SIGMOID: return tanh(param.gamma*dot(x,y)+param.coef0); default: return 0; /* Unreachable */ } } // Generalized SMO+SVMlight algorithm // Solves: // // min 0.5(\alpha^T Q \alpha) + p^T \alpha // // y^T \alpha = \delta // y_i = +1 or -1 // 0 <= alpha_i <= Cp for y_i = 1 // 0 <= alpha_i <= Cn for y_i = -1 // // Given: // // Q, p, y, Cp, Cn, and an initial feasible point \alpha // l is the size of vectors and matrices // eps is the stopping criterion // // solution will be put in \alpha, objective value will be put in obj // class Solver { public: Solver() {}; virtual ~Solver() {}; struct SolutionInfo { double obj; double rho; double upper_bound_p; double upper_bound_n; double r; // for Solver_NU }; void Solve(int l, const QMatrix& Q, const double *p_, const schar *y_, double *alpha_, double Cp, double Cn, double eps, SolutionInfo* si, int shrinking); protected: int active_size; schar *y; double *G; // gradient of objective function enum { LOWER_BOUND, UPPER_BOUND, FREE }; char *alpha_status; // LOWER_BOUND, UPPER_BOUND, FREE double *alpha; const QMatrix *Q; const double *QD; double eps; double Cp,Cn; double *p; int *active_set; double *G_bar; // gradient, if we treat free variables as 0 int l; bool unshrink; // XXX double get_C(int i) { return (y[i] > 0)? Cp : Cn; } void update_alpha_status(int i) { if(alpha[i] >= get_C(i)) alpha_status[i] = UPPER_BOUND; else if(alpha[i] <= 0) alpha_status[i] = LOWER_BOUND; else alpha_status[i] = FREE; } bool is_upper_bound(int i) { return alpha_status[i] == UPPER_BOUND; } bool is_lower_bound(int i) { return alpha_status[i] == LOWER_BOUND; } bool is_free(int i) { return alpha_status[i] == FREE; } void swap_index(int i, int j); void reconstruct_gradient(); virtual int select_working_set(int &i, int &j); virtual double calculate_rho(); virtual void do_shrinking(); private: bool be_shrunk(int i, double Gmax1, double Gmax2); }; void Solver::swap_index(int i, int j) { Q->swap_index(i,j); swap(y[i],y[j]); swap(G[i],G[j]); swap(alpha_status[i],alpha_status[j]); swap(alpha[i],alpha[j]); swap(p[i],p[j]); swap(active_set[i],active_set[j]); swap(G_bar[i],G_bar[j]); } void Solver::reconstruct_gradient() { // reconstruct inactive elements of G from G_bar and free variables if(active_size == l) return; int i,j; int nr_free = 0; for(j=active_size;j 2*active_size*(l-active_size)) { for(i=active_size;iget_Q(i,active_size); for(j=0;jget_Q(i,l); double alpha_i = alpha[i]; for(j=active_size;jl = l; this->Q = &Q; QD=Q.get_QD(); clone(p, p_,l); clone(y, y_,l); clone(alpha,alpha_,l); this->Cp = Cp; this->Cn = Cn; this->eps = eps; unshrink = false; // initialize alpha_status { alpha_status = new char[l]; for(int i=0;iINT_MAX/100 ? INT_MAX : 100*l); int counter = min(l,1000)+1; while(iter < max_iter) { // show progress and do shrinking if(--counter == 0) { counter = min(l,1000); if(shrinking) do_shrinking(); } int i,j; if(select_working_set(i,j)!=0) { // reconstruct the whole gradient reconstruct_gradient(); // reset active set size and check active_size = l; if(select_working_set(i,j)!=0) break; else counter = 1; // do shrinking next iteration } ++iter; // update alpha[i] and alpha[j], handle bounds carefully const Qfloat *Q_i = Q.get_Q(i,active_size); const Qfloat *Q_j = Q.get_Q(j,active_size); double C_i = get_C(i); double C_j = get_C(j); double old_alpha_i = alpha[i]; double old_alpha_j = alpha[j]; if(y[i]!=y[j]) { double quad_coef = QD[i]+QD[j]+2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; double delta = (-G[i]-G[j])/quad_coef; double diff = alpha[i] - alpha[j]; alpha[i] += delta; alpha[j] += delta; if(diff > 0) { if(alpha[j] < 0) { alpha[j] = 0; alpha[i] = diff; } } else { if(alpha[i] < 0) { alpha[i] = 0; alpha[j] = -diff; } } if(diff > C_i - C_j) { if(alpha[i] > C_i) { alpha[i] = C_i; alpha[j] = C_i - diff; } } else { if(alpha[j] > C_j) { alpha[j] = C_j; alpha[i] = C_j + diff; } } } else { double quad_coef = QD[i]+QD[j]-2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; double delta = (G[i]-G[j])/quad_coef; double sum = alpha[i] + alpha[j]; alpha[i] -= delta; alpha[j] += delta; if(sum > C_i) { if(alpha[i] > C_i) { alpha[i] = C_i; alpha[j] = sum - C_i; } } else { if(alpha[j] < 0) { alpha[j] = 0; alpha[i] = sum; } } if(sum > C_j) { if(alpha[j] > C_j) { alpha[j] = C_j; alpha[i] = sum - C_j; } } else { if(alpha[i] < 0) { alpha[i] = 0; alpha[j] = sum; } } } // update G double delta_alpha_i = alpha[i] - old_alpha_i; double delta_alpha_j = alpha[j] - old_alpha_j; for(int k=0;k= max_iter) { if(active_size < l) { // reconstruct the whole gradient to calculate objective value reconstruct_gradient(); active_size = l; } } // calculate rho si->rho = calculate_rho(); // calculate objective value { double v = 0; int i; for(i=0;iobj = v/2; } // put back the solution { for(int i=0;iupper_bound_p = Cp; si->upper_bound_n = Cn; delete[] p; delete[] y; delete[] alpha; delete[] alpha_status; delete[] active_set; delete[] G; delete[] G_bar; } // return 1 if already optimal, return 0 otherwise int Solver::select_working_set(int &out_i, int &out_j) { // return i,j such that // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha) // j: minimizes the decrease of obj value // (if quadratic coefficeint <= 0, replace it with tau) // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha) double Gmax = -INF; double Gmax2 = -INF; int Gmax_idx = -1; int Gmin_idx = -1; double obj_diff_min = INF; for(int t=0;t= Gmax) { Gmax = -G[t]; Gmax_idx = t; } } else { if(!is_lower_bound(t)) if(G[t] >= Gmax) { Gmax = G[t]; Gmax_idx = t; } } int i = Gmax_idx; const Qfloat *Q_i = NULL; if(i != -1) // NULL Q_i not accessed: Gmax=-INF if i=-1 Q_i = Q->get_Q(i,active_size); for(int j=0;j= Gmax2) Gmax2 = G[j]; if (grad_diff > 0) { double obj_diff; double quad_coef = QD[i]+QD[j]-2.0*y[i]*Q_i[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } else { if (!is_upper_bound(j)) { double grad_diff= Gmax-G[j]; if (-G[j] >= Gmax2) Gmax2 = -G[j]; if (grad_diff > 0) { double obj_diff; double quad_coef = QD[i]+QD[j]+2.0*y[i]*Q_i[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } } if(Gmax+Gmax2 < eps) return 1; out_i = Gmax_idx; out_j = Gmin_idx; return 0; } bool Solver::be_shrunk(int i, double Gmax1, double Gmax2) { if(is_upper_bound(i)) { if(y[i]==+1) return(-G[i] > Gmax1); else return(-G[i] > Gmax2); } else if(is_lower_bound(i)) { if(y[i]==+1) return(G[i] > Gmax2); else return(G[i] > Gmax1); } else return(false); } void Solver::do_shrinking() { int i; double Gmax1 = -INF; // max { -y_i * grad(f)_i | i in I_up(\alpha) } double Gmax2 = -INF; // max { y_i * grad(f)_i | i in I_low(\alpha) } // find maximal violating pair first for(i=0;i= Gmax1) Gmax1 = -G[i]; } if(!is_lower_bound(i)) { if(G[i] >= Gmax2) Gmax2 = G[i]; } } else { if(!is_upper_bound(i)) { if(-G[i] >= Gmax2) Gmax2 = -G[i]; } if(!is_lower_bound(i)) { if(G[i] >= Gmax1) Gmax1 = G[i]; } } } if(unshrink == false && Gmax1 + Gmax2 <= eps*10) { unshrink = true; reconstruct_gradient(); active_size = l; } for(i=0;i i) { if (!be_shrunk(active_size, Gmax1, Gmax2)) { swap_index(i,active_size); break; } active_size--; } } } double Solver::calculate_rho() { double r; int nr_free = 0; double ub = INF, lb = -INF, sum_free = 0; for(int i=0;i0) r = sum_free/nr_free; else r = (ub+lb)/2; return r; } // // Solver for nu-svm classification and regression // // additional constraint: e^T \alpha = constant // class Solver_NU: public Solver { public: Solver_NU() {} void Solve(int l, const QMatrix& Q, const double *p, const schar *y, double *alpha, double Cp, double Cn, double eps, SolutionInfo* si, int shrinking) { this->si = si; Solver::Solve(l,Q,p,y,alpha,Cp,Cn,eps,si,shrinking); } private: SolutionInfo *si; int select_working_set(int &i, int &j); double calculate_rho(); bool be_shrunk(int i, double Gmax1, double Gmax2, double Gmax3, double Gmax4); void do_shrinking(); }; // return 1 if already optimal, return 0 otherwise int Solver_NU::select_working_set(int &out_i, int &out_j) { // return i,j such that y_i = y_j and // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha) // j: minimizes the decrease of obj value // (if quadratic coefficeint <= 0, replace it with tau) // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha) double Gmaxp = -INF; double Gmaxp2 = -INF; int Gmaxp_idx = -1; double Gmaxn = -INF; double Gmaxn2 = -INF; int Gmaxn_idx = -1; int Gmin_idx = -1; double obj_diff_min = INF; for(int t=0;t= Gmaxp) { Gmaxp = -G[t]; Gmaxp_idx = t; } } else { if(!is_lower_bound(t)) if(G[t] >= Gmaxn) { Gmaxn = G[t]; Gmaxn_idx = t; } } int ip = Gmaxp_idx; int in = Gmaxn_idx; const Qfloat *Q_ip = NULL; const Qfloat *Q_in = NULL; if(ip != -1) // NULL Q_ip not accessed: Gmaxp=-INF if ip=-1 Q_ip = Q->get_Q(ip,active_size); if(in != -1) Q_in = Q->get_Q(in,active_size); for(int j=0;j= Gmaxp2) Gmaxp2 = G[j]; if (grad_diff > 0) { double obj_diff; double quad_coef = QD[ip]+QD[j]-2*Q_ip[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } else { if (!is_upper_bound(j)) { double grad_diff=Gmaxn-G[j]; if (-G[j] >= Gmaxn2) Gmaxn2 = -G[j]; if (grad_diff > 0) { double obj_diff; double quad_coef = QD[in]+QD[j]-2*Q_in[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } } if(max(Gmaxp+Gmaxp2,Gmaxn+Gmaxn2) < eps) return 1; if (y[Gmin_idx] == +1) out_i = Gmaxp_idx; else out_i = Gmaxn_idx; out_j = Gmin_idx; return 0; } bool Solver_NU::be_shrunk(int i, double Gmax1, double Gmax2, double Gmax3, double Gmax4) { if(is_upper_bound(i)) { if(y[i]==+1) return(-G[i] > Gmax1); else return(-G[i] > Gmax4); } else if(is_lower_bound(i)) { if(y[i]==+1) return(G[i] > Gmax2); else return(G[i] > Gmax3); } else return(false); } void Solver_NU::do_shrinking() { double Gmax1 = -INF; // max { -y_i * grad(f)_i | y_i = +1, i in I_up(\alpha) } double Gmax2 = -INF; // max { y_i * grad(f)_i | y_i = +1, i in I_low(\alpha) } double Gmax3 = -INF; // max { -y_i * grad(f)_i | y_i = -1, i in I_up(\alpha) } double Gmax4 = -INF; // max { y_i * grad(f)_i | y_i = -1, i in I_low(\alpha) } // find maximal violating pair first int i; for(i=0;i Gmax1) Gmax1 = -G[i]; } else if(-G[i] > Gmax4) Gmax4 = -G[i]; } if(!is_lower_bound(i)) { if(y[i]==+1) { if(G[i] > Gmax2) Gmax2 = G[i]; } else if(G[i] > Gmax3) Gmax3 = G[i]; } } if(unshrink == false && max(Gmax1+Gmax2,Gmax3+Gmax4) <= eps*10) { unshrink = true; reconstruct_gradient(); active_size = l; } for(i=0;i i) { if (!be_shrunk(active_size, Gmax1, Gmax2, Gmax3, Gmax4)) { swap_index(i,active_size); break; } active_size--; } } } double Solver_NU::calculate_rho() { int nr_free1 = 0,nr_free2 = 0; double ub1 = INF, ub2 = INF; double lb1 = -INF, lb2 = -INF; double sum_free1 = 0, sum_free2 = 0; for(int i=0;i 0) r1 = sum_free1/nr_free1; else r1 = (ub1+lb1)/2; if(nr_free2 > 0) r2 = sum_free2/nr_free2; else r2 = (ub2+lb2)/2; si->r = (r1+r2)/2; return (r1-r2)/2; } /////////////////// BSVM code class Solver_SPOC { public: Solver_SPOC() {}; ~Solver_SPOC() {}; void Solve(int l, const Kernel& Q, double *alpha_, short *y_, double *C_, double eps, int shrinking, int nr_class); private: int active_size; double *G; // gradient of objective function short *y; bool *alpha_status; // free:true, bound:false double *alpha; const Kernel *Q; double eps; double *C; int *active_set; int l, nr_class; bool unshrinked; double get_C(int i, int m) { if (y[i] == m) return C[m]; return 0; } void update_alpha_status(int i, int m) { if(alpha[i*nr_class+m] >= get_C(i, m)) alpha_status[i*nr_class+m] = false; else alpha_status[i*nr_class+m] = true; } void swap_index(int i, int j); double select_working_set(int &q); void solve_sub_problem(double A, double *B, double C, double *nu); void reconstruct_gradient(); void do_shrinking(); }; void Solver_SPOC::swap_index(int i, int j) { Q->swap_index(i, j); swap(y[i], y[j]); swap(active_set[i], active_set[j]); for (int m=0;mget_Q(i,l); double alpha_i_m = alpha[i*nr_class+m]; for (int j=active_size;jl = l; this->nr_class = nr_class; this->Q = &Q; clone(y,y_,l); clone(alpha,alpha_,l*nr_class); C = C_; this->eps = eps; unshrinked = false; int i, m, q, old_q = -1; // initialize alpha_status { alpha_status = new bool[l*nr_class]; for(i=0;i 0) solve_sub_problem(A, B, C[y[q]], nu); else { i = 0; for (m=1;m B[i]) i = m; nu[i] = -C[y[q]]; } nu[y[q]] += C[y[q]]; for (m=0;m 1e-12) #endif { alpha[q*nr_class+m] = nu[m]; update_alpha_status(q, m); for (i=0;i 0) nSV++; } //info("\noptimization finished, #iter = %d, obj = %lf\n",iter, obj); // info("nSV = %d, nFREE = %d\n",nSV,nFREE); // put back the solution { for(int i=0;i vio_q) { q = i; vio_q = lb - ub; } } return vio_q; } void Solver_SPOC::do_shrinking() { int i, m; double Gm = select_working_set(i); if (Gm < eps) return; // shrink for (i=0;i= th) goto out; for (m++;m= th) goto out; --active_size; swap_index(i, active_size); --i; out: ; } // unshrink, check all variables again before final iterations if (unshrinked || Gm > 10*eps) return; unshrinked = true; reconstruct_gradient(); for (i=l-1;i>=active_size;i--) { double *G_i = &G[i*nr_class]; double th = G_i[y[i]] - Gm/2; for (m=0;m= th) goto out1; for (m++;m= th) goto out1; swap_index(i, active_size); ++active_size; ++i; out1: ; } } int compar(const void *a, const void *b) { if (*(double *)a > *(double *)b) return -1; else if (*(double *)a < *(double *)b) return 1; return 0; } void Solver_SPOC::solve_sub_problem(double A, double *B, double C, double *nu) { int r; double *D; clone(D, B, nr_class+1); qsort(D, nr_class, sizeof(double), compar); D[nr_class] = -INF; double phi = D[0] - A*C; for (r=0;phi<(r+1)*D[r+1];r++) phi += D[r+1]; delete[] D; phi /= (r+1); for (r=0;r 0)? Cp : Cn; } void update_alpha_status(int i) { if(alpha[i] >= get_C(i)) alpha_status[i] = UPPER_BOUND; else if(alpha[i] <= 0) alpha_status[i] = LOWER_BOUND; else alpha_status[i] = FREE; } bool is_upper_bound(int i) { return alpha_status[i] == UPPER_BOUND; } bool is_lower_bound(int i) { return alpha_status[i] == LOWER_BOUND; } bool is_free(int i) { return alpha_status[i] == FREE; } virtual void swap_index(int i, int j); virtual void reconstruct_gradient(); virtual void shrink_one(int k); virtual void unshrink_one(int k); double select_working_set(int &q); void do_shrinking(); private: double Cp, Cn; double *b; schar *y; }; void Solver_B::swap_index(int i, int j) { Q->swap_index(i,j); swap(y[i],y[j]); swap(G[i],G[j]); swap(alpha_status[i],alpha_status[j]); swap(alpha[i],alpha[j]); swap(b[i],b[j]); swap(active_set[i],active_set[j]); swap(G_bar[i],G_bar[j]); } void Solver_B::reconstruct_gradient() { // reconstruct inactive elements of G from G_bar and free variables if(active_size == l) return; int i; for(i=active_size;iget_Q(i,l); double alpha_i = alpha[i]; for(int j=active_size;jl = l; this->Q = &Q; b = b_; clone(y, y_, l); clone(alpha,alpha_,l); this->Cp = Cp; this->Cn = Cn; this->eps = eps; this->qpsize = qpsize; unshrinked = false; // initialize alpha_status { alpha_status = new char[l]; for(int i=0;i1e-12) { alpha[working_set[i]] = qp.x[i]; Qfloat *QB_i = QB[i]; for(j=0;jobj = v/2; } // juggle everything back /*{ for(int i=0;iupper_bound = new double[2]; si->upper_bound[0] = Cp; si->upper_bound[1] = Cn; // info("\noptimization finished, #iter = %d\n",iter); // put back the solution { for(int i=0;i= positive_max[j]) break; positive_max[j-1] = positive_max[j]; positive_set[j-1] = positive_set[j]; } positive_max[j-1] = v; positive_set[j-1] = i; } } for (i=0;i0) continue; } if (v > positive_max[0]) { for (j=1;j= -Gm) continue; } else continue; --active_size; shrink_one(k); --k; // look at the newcomer } // unshrink, check all variables again before final iterations if (unshrinked || Gm > eps*10) return; unshrinked = true; reconstruct_gradient(); for(k=l-1;k>=active_size;k--) { if (is_lower_bound(k)) { if (G[k] > Gm) continue; } else if (is_upper_bound(k)) { if (G[k] < -Gm) continue; } else continue; unshrink_one(k); active_size++; ++k; // look at the newcomer } } class Solver_B_linear : public Solver_B { public: Solver_B_linear() {}; ~Solver_B_linear() {}; int Solve(int l, svm_node * const * x_, double *b_, schar *y_, double *alpha_, double *w, double Cp, double Cn, double eps, SolutionInfo* si, int shrinking, int qpsize); private: double get_C(int i) { return (y[i] > 0)? Cp : Cn; } void swap_index(int i, int j); void reconstruct_gradient(); double dot(int i, int j); double Cp, Cn; double *b; schar *y; double *w; const svm_node **x; }; double Solver_B_linear::dot(int i, int j) { const svm_node *px = x[i], *py = x[j]; double sum = 0; while(px->index != -1 && py->index != -1) { if(px->index == py->index) { sum += px->value * py->value; ++px; ++py; } else { if(px->index > py->index) ++py; else ++px; } } return sum; } void Solver_B_linear::swap_index(int i, int j) { swap(y[i],y[j]); swap(G[i],G[j]); swap(alpha_status[i],alpha_status[j]); swap(alpha[i],alpha[j]); swap(b[i],b[j]); swap(active_set[i],active_set[j]); swap(x[i], x[j]); } void Solver_B_linear::reconstruct_gradient() { int i; for(i=active_size;iindex != -1;px++) sum += w[px->index]*px->value; sum += w[0]; G[i] = y[i]*sum + b[i]; } } int Solver_B_linear::Solve(int l, svm_node * const * x_, double *b_, schar *y_, double *alpha_, double *w, double Cp, double Cn, double eps, SolutionInfo* si, int shrinking, int qpsize) { this->l = l; clone(x, x_, l); clone(b, b_, l); clone(y, y_, l); clone(alpha,alpha_,l); this->Cp = Cp; this->Cn = Cn; this->eps = eps; this->qpsize = qpsize; this->w = w; unshrinked = false; // initialize alpha_status { alpha_status = new char[l]; for(int i=0;iindex != -1;px++) sum += w[px->index]*px->value; sum += w[0]; G[i] += y[i]*sum; } } // optimization step int iter = 0; int counter = min(l*2/qpsize,2000/qpsize)+1; while(1) { // show progress and do shrinking if(--counter == 0) { counter = min(l*2/qpsize, 2000/qpsize); if(shrinking) do_shrinking(); // info("."); } int i,j,q; if (select_working_set(q) < eps) { // reconstruct the whole gradient reconstruct_gradient(); // reset active set size and check active_size = l; // info("*");info_flush(); if (select_working_set(q) < eps) break; else counter = 1; // do shrinking next iteration } if (counter == min(l*2/qpsize, 2000/qpsize)) { bool same = true; for (i=0;i1e-12) { alpha[Bi] = qp.x[i]; update_alpha_status(Bi); double yalpha = y[Bi]*d; for (const svm_node *px = x[Bi];px->index != -1;px++) w[px->index] += yalpha*px->value; w[0] += yalpha; } } for(j=0;jindex != -1;px++) sum += w[px->index]*px->value; sum += w[0]; G[j] = y[j]*sum + b[j]; } } // calculate objective value { double v = 0; int i; for(i=0;iobj = v/2; } // juggle everything back /*{ for(int i=0;iupper_bound = new double[2]; si->upper_bound[0] = Cp; si->upper_bound[1] = Cn; // info("\noptimization finished, #iter = %d\n",iter); // put back the solution { for(int i=0;iget_Q(real_i[i],real_l); double alpha_i = alpha[i], t; int y_i = y[i], yy_i = yy[i], ub, k; t = 2*alpha_i; ub = start2[yy_i*nr_class+y_i+1]; for (j=start2[yy_i*nr_class+y_i];jl = l; this->nr_class = nr_class; this->real_l = l/(nr_class - 1); this->Q = &Q; this->lin = lin; clone(y,y_,l); clone(alpha,alpha_,l); C = C_; this->eps = eps; this->qpsize = qpsize; unshrinked = false; // initialize alpha_status { alpha_status = new char[l]; for(int i=0;i 1e-12) { alpha[Bi] = qp.x[i]; Qfloat *QB_i = QB[i]; int y_Bi = y[Bi], yy_Bi = yy[Bi], ub, k; double t = 2*d; ub = start1[yy_Bi*nr_class+y_Bi+1]; for (j=start1[yy_Bi*nr_class+y_Bi];jobj = v/4; } clone(si->upper_bound,C,nr_class); //info("\noptimization finished, #iter = %d\n",iter); // put back the solution { for(int i=0;i0;i--) swap_index(start2[i], start2[i-1]); t = s + 1; for (i=nr_class*nr_class;i>t;i--) swap_index(start1[i], start1[i-1]); t = nr_class*nr_class; for (i=s+1;i<=t;i++) start1[i]++; for (i=0;i<=s;i++) start2[i]++; } // // Q matrices for various formulations // class BSVC_Q: public Kernel { public: BSVC_Q(const svm_problem& prob, const svm_parameter& param, const schar *y_) :Kernel(prob.l, prob.x, param) { clone(y,y_,prob.l); cache = new Cache(prob.l,(int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[1]; QD[0] = 1; } Qfloat *get_Q(int i, int len) const { Qfloat *data; int start; if((start = cache->get_data(i,&data,len)) < len) { for(int j=start;j*kernel_function)(i,j) + 1); } return data; } double *get_QD() const { return QD; } void swap_index(int i, int j) const { cache->swap_index(i,j); Kernel::swap_index(i,j); swap(y[i],y[j]); } ~BSVC_Q() { delete[] y; delete cache; delete[] QD; } private: schar *y; Cache *cache; double *QD; }; class BONE_CLASS_Q: public Kernel { public: BONE_CLASS_Q(const svm_problem& prob, const svm_parameter& param) :Kernel(prob.l, prob.x, param) { cache = new Cache(prob.l,(int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[1]; QD[0] = 1; } Qfloat *get_Q(int i, int len) const { Qfloat *data; int start; if((start = cache->get_data(i,&data,len)) < len) { for(int j=start;j*kernel_function)(i,j) + 1; } return data; } double *get_QD() const { return QD; } ~BONE_CLASS_Q() { delete cache; delete[] QD; } private: Cache *cache; double *QD; }; class BSVR_Q: public Kernel { public: BSVR_Q(const svm_problem& prob, const svm_parameter& param) :Kernel(prob.l, prob.x, param) { l = prob.l; cache = new Cache(l,(int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[1]; QD[0] = 1; sign = new schar[2*l]; index = new int[2*l]; for(int k=0;kget_data(real_i,&data,l) < l) { for(int j=0;j*kernel_function)(real_i,j) + 1; } // reorder and copy Qfloat *buf = buffer[next_buffer]; next_buffer = (next_buffer+1)%q; schar si = sign[i]; for(int j=0;j*kernel_function)(i,i); } Qfloat *get_Q(int i, int len) const { Qfloat *data; int start; if((start = cache->get_data(i,&data,len)) < len) { for(int j=start;j*kernel_function)(i,j)); } return data; } double *get_QD() const { return QD; } void swap_index(int i, int j) const { cache->swap_index(i,j); Kernel::swap_index(i,j); swap(y[i],y[j]); swap(QD[i],QD[j]); } ~SVC_Q() { delete[] y; delete cache; delete[] QD; } private: schar *y; Cache *cache; double *QD; }; class ONE_CLASS_Q: public Kernel { public: ONE_CLASS_Q(const svm_problem& prob, const svm_parameter& param) :Kernel(prob.l, prob.x, param) { cache = new Cache(prob.l,(long int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[prob.l]; for(int i=0;i*kernel_function)(i,i); } Qfloat *get_Q(int i, int len) const { Qfloat *data; int start; if((start = cache->get_data(i,&data,len)) < len) { for(int j=start;j*kernel_function)(i,j); } return data; } double *get_QD() const { return QD; } void swap_index(int i, int j) const { cache->swap_index(i,j); Kernel::swap_index(i,j); swap(QD[i],QD[j]); } ~ONE_CLASS_Q() { delete cache; delete[] QD; } private: Cache *cache; double *QD; }; class SVR_Q: public Kernel { public: SVR_Q(const svm_problem& prob, const svm_parameter& param) :Kernel(prob.l, prob.x, param) { l = prob.l; cache = new Cache(l,(long int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[2*l]; sign = new schar[2*l]; index = new int[2*l]; for(int k=0;k*kernel_function)(k,k); QD[k+l]=QD[k]; } buffer[0] = new Qfloat[2*l]; buffer[1] = new Qfloat[2*l]; next_buffer = 0; } void swap_index(int i, int j) const { swap(sign[i],sign[j]); swap(index[i],index[j]); swap(QD[i],QD[j]); } Qfloat *get_Q(int i, int len) const { Qfloat *data; int real_i = index[i]; if(cache->get_data(real_i,&data,l) < l) { for(int j=0;j*kernel_function)(real_i,j); } // reorder and copy Qfloat *buf = buffer[next_buffer]; next_buffer = 1 - next_buffer; schar si = sign[i]; for(int j=0;jsvm_type; if(svm_type != C_BSVC && svm_type != EPSILON_BSVR && svm_type != KBB && svm_type != SPOC) return "unknown svm type"; // kernel_type int kernel_type = param->kernel_type; if(kernel_type != LINEAR && kernel_type != POLY && kernel_type != RBF && kernel_type != SIGMOID && kernel_type != R && kernel_type != LAPLACE&& kernel_type != BESSEL&& kernel_type != ANOVA) return "unknown kernel type"; // cache_size,eps,C,nu,p,shrinking if(kernel_type != LINEAR) if(param->cache_size <= 0) return "cache_size <= 0"; if(param->eps <= 0) return "eps <= 0"; if(param->C <= 0) return "C <= 0"; if(svm_type == EPSILON_BSVR) if(param->p < 0) return "p < 0"; if(param->shrinking != 0 && param->shrinking != 1) return "shrinking != 0 and shrinking != 1"; if(svm_type == C_BSVC || svm_type == KBB || svm_type == SPOC) if(param->qpsize < 2) return "qpsize < 2"; if(kernel_type == LINEAR) if (param->Cbegin <= 0) return "Cbegin <= 0"; if(kernel_type == LINEAR) if (param->Cstep <= 1) return "Cstep <= 1"; return NULL; } const char *svm_check_parameter(const svm_problem *prob, const svm_parameter *param) { // svm_type int svm_type = param->svm_type; if(svm_type != C_SVC && svm_type != NU_SVC && svm_type != ONE_CLASS && svm_type != EPSILON_SVR && svm_type != NU_SVR) return "unknown svm type"; // kernel_type int kernel_type = param->kernel_type; if(kernel_type != LINEAR && kernel_type != POLY && kernel_type != RBF && kernel_type != SIGMOID && kernel_type != R && kernel_type != LAPLACE&& kernel_type != BESSEL&& kernel_type != ANOVA&& kernel_type != SPLINE) return "unknown kernel type"; // cache_size,eps,C,nu,p,shrinking if(param->cache_size <= 0) return "cache_size <= 0"; if(param->eps <= 0) return "eps <= 0"; if(svm_type == C_SVC || svm_type == EPSILON_SVR || svm_type == NU_SVR) if(param->C <= 0) return "C <= 0"; if(svm_type == NU_SVC || svm_type == ONE_CLASS || svm_type == NU_SVR) if(param->nu < 0 || param->nu > 1) return "nu < 0 or nu > 1"; if(svm_type == EPSILON_SVR) if(param->p < 0) return "p < 0"; if(param->shrinking != 0 && param->shrinking != 1) return "shrinking != 0 and shrinking != 1"; // check whether nu-svc is feasible if(svm_type == NU_SVC) { int l = prob->l; int max_nr_class = 16; int nr_class = 0; int *label = Malloc(int,max_nr_class); int *count = Malloc(int,max_nr_class); int i; for(i=0;iy[i]; int j; for(j=0;jnu*(n1+n2)/2 > min(n1,n2)) { free(label); free(count); return "specified nu is infeasible"; } } } } return NULL; } #include #include #include extern "C" { struct svm_node ** sparsify (double *x, int r, int c) { struct svm_node** sparse; int i, ii, count; sparse = (struct svm_node **) malloc (r * sizeof(struct svm_node *)); for (i = 0; i < r; i++) { /* determine nr. of non-zero elements */ for (count = ii = 0; ii < c; ii++) if (x[i * c + ii] != 0) count++; /* allocate memory for column elements */ sparse[i] = (struct svm_node *) malloc ((count + 1) * sizeof(struct svm_node)); /* set column elements */ for (count = ii = 0; ii < c; ii++) if (x[i * c + ii] != 0) { sparse[i][count].index = ii; sparse[i][count].value = x[i * c + ii]; count++; } /* set termination element */ sparse[i][count].index = -1; } return sparse; } struct svm_node ** transsparse (double *x, int r, int *rowindex, int *colindex) { struct svm_node** sparse; int i, ii, count = 0, nnz = 0; sparse = (struct svm_node **) malloc (r * sizeof(struct svm_node*)); for (i = 0; i < r; i++) { /* allocate memory for column elements */ nnz = rowindex[i+1] - rowindex[i]; sparse[i] = (struct svm_node *) malloc ((nnz + 1) * sizeof(struct svm_node)); /* set column elements */ for (ii = 0; ii < nnz; ii++) { sparse[i][ii].index = colindex[count]; sparse[i][ii].value = x[count]; count++; } /* set termination element */ sparse[i][ii].index = -1; } return sparse; } void tron_run(const svm_problem *prob, const svm_parameter* param, double *alpha, double *weighted_C, Solver_B::SolutionInfo* sii, int nr_class, int *count) { int l = prob->l; int i; double Cp = param->C; double Cn = param->C; if(param->nr_weight > 0) { Cp = param->C*param->weight[0]; Cn = param->C*param->weight[1]; } switch(param->svm_type) { case C_BSVC: { // double *alpha = new double[l]; double *minus_ones = new double[l]; schar *y = new schar[l]; for(i=0;iy[i] > 0) y[i] = +1; else y[i]=-1; } if (param->kernel_type == LINEAR) { double *w = new double[prob->n+1]; for (i=0;i<=prob->n;i++) w[i] = 0; Solver_B_linear s; int totaliter = 0; double Cpj = param->Cbegin, Cnj = param->Cbegin*Cn/Cp; while (Cpj < Cp) { totaliter += s.Solve(l, prob->x, minus_ones, y, alpha, w, Cpj, Cnj, param->eps, sii, param->shrinking, param->qpsize); if (Cpj*param->Cstep >= Cp) { for (i=0;i<=prob->n;i++) w[i] = 0; for (i=0;i= Cpj) alpha[i] = Cp; else if (y[i] == -1 && alpha[i] >= Cnj) alpha[i] = Cn; else alpha[i] *= Cp/Cpj; double yalpha = y[i]*alpha[i]; for (const svm_node *px = prob->x[i];px->index != -1;px++) w[px->index] += yalpha*px->value; w[0] += yalpha; } } else { for (i=0;iCstep; for (i=0;i<=prob->n;i++) w[i] *= param->Cstep; } Cpj *= param->Cstep; Cnj *= param->Cstep; } totaliter += s.Solve(l, prob->x, minus_ones, y, alpha, w, Cp, Cn, param->eps, sii, param->shrinking, param->qpsize); //info("\noptimization finished, #iter = %d\n",totaliter); delete[] w; } else { Solver_B s; s.Solve(l, BSVC_Q(*prob,*param,y), minus_ones, y, alpha, Cp, Cn, param->eps, sii, param->shrinking, param->qpsize); } // double sum_alpha=0; // for(i=0;iC*prob->l)); // for(i=0;ip - prob->y[i]; y[i] = 1; alpha2[i+l] = 0; linear_term[i+l] = param->p + prob->y[i]; y[i+l] = -1; } if (param->kernel_type == LINEAR) { double *w = new double[prob->n+1]; for (i=0;i<=prob->n;i++) w[i] = 0; struct svm_node **x = new svm_node*[2*l]; for (i=0;ix[i]; Solver_B_linear s; int totaliter = 0; double Cj = param->Cbegin; while (Cj < param->C) { totaliter += s.Solve(2*l, x, linear_term, y, alpha, w, Cj, Cj, param->eps, sii, param->shrinking, param->qpsize); if (Cj*param->Cstep >= param->C) { for (i=0;i<=prob->n;i++) w[i] = 0; for (i=0;i<2*l;i++) { if (alpha[i] >= Cj) alpha[i] = param->C; else alpha[i] *= param->C/Cj; double yalpha = y[i]*alpha[i]; for (const svm_node *px = x[i];px->index != -1;px++) w[px->index] += yalpha*px->value; w[0] += yalpha; } } else { for (i=0;i<2*l;i++) alpha[i] *= param->Cstep; for (i=0;i<=prob->n;i++) w[i] *= param->Cstep; } Cj *= param->Cstep; } totaliter += s.Solve(2*l, x, linear_term, y, alpha2, w, param->C, param->C, param->eps, sii, param->shrinking, param->qpsize); //info("\noptimization finished, #iter = %d\n",totaliter); } else { Solver_B s; s.Solve(2*l, BSVR_Q(*prob,*param), linear_term, y, alpha2, param->C, param->C, param->eps, sii, param->shrinking, param->qpsize); } double sum_alpha = 0; for(i=0;iC*l)); delete[] y; delete[] alpha2; delete[] linear_term; } break; case KBB: { Solver_B::SolutionInfo si; int i=0 , j=0 ,k=0 , ll = l*(nr_class - 1); double *alpha2 = Malloc(double, ll); short *y = new short[ll]; for (i=0;iy[q]; else q += count[j]; } Solver_MB s; s.Solve(ll, BONE_CLASS_Q(*prob,*param), -2, alpha2, y, weighted_C, 2*param->eps, &si, param->shrinking, param->qpsize, nr_class, count); //info("obj = %f, rho = %f\n",si.obj,0.0); int *start = Malloc(int,nr_class); start[0] = 0; for(i=1;iy[i]; } Solver_SPOC s; s.Solve(l, ONE_CLASS_Q(*prob, *param), alpha, y, weighted_C, param->eps, param->shrinking, nr_class); free(weighted_C); delete[] y; } break; } } SEXP tron_optim(SEXP x, SEXP r, SEXP c, SEXP y, SEXP K, SEXP colindex, SEXP rowindex, SEXP sparse, SEXP nclass, SEXP countc, SEXP kernel_type, SEXP svm_type, SEXP cost, SEXP eps, SEXP gamma, SEXP degree, SEXP coef0, SEXP Cbegin, SEXP Cstep, SEXP weightlabels, SEXP weights, SEXP nweights, SEXP weightedc, SEXP cache, SEXP epsilon, SEXP qpsize, SEXP shrinking ) { struct svm_parameter param; struct svm_problem prob; int i ,*count = NULL; double *alpha2 = NULL; SEXP alpha3 = NULL; int nr_class; const char* s; struct Solver_B::SolutionInfo si; param.svm_type = *INTEGER(svm_type); param.kernel_type = *INTEGER(kernel_type); param.degree = *INTEGER(degree); param.gamma = *REAL(gamma); param.coef0 = *REAL(coef0); param.cache_size = *REAL(cache); param.eps = *REAL(epsilon); param.C = *REAL(cost); param.Cbegin = *REAL(Cbegin); param.Cstep = *REAL(Cstep); param.K = REAL(K); param.qpsize = *INTEGER(qpsize); nr_class = *INTEGER(nclass); param.nr_weight = *INTEGER(nweights); if (param.nr_weight > 0) { param.weight = (double *) malloc (sizeof(double) * param.nr_weight); memcpy (param.weight, REAL(weights), param.nr_weight * sizeof(double)); param.weight_label = (int *) malloc (sizeof(int) * param.nr_weight); memcpy (param.weight_label, INTEGER(weightlabels), param.nr_weight * sizeof(int)); } param.p = *REAL(eps); param.shrinking = *INTEGER(shrinking); param.lim = 1/(gammafn(param.degree+1)*powi(2,param.degree)); /* set problem */ prob.l = *INTEGER(r); prob.n = *INTEGER(c); prob.y = (double *) malloc (sizeof(double) * prob.l); memcpy(prob.y, REAL(y), prob.l*sizeof(double)); if (*INTEGER(sparse) > 0) prob.x = transsparse(REAL(x), *INTEGER(r), INTEGER(rowindex), INTEGER(colindex)); else prob.x = sparsify(REAL(x), *INTEGER(r), *INTEGER(c)); s = svm_check_parameterb(&prob, ¶m); //if (s) //printf("%s",s); //else { double *weighted_C = Malloc(double, nr_class); memcpy(weighted_C, REAL(weightedc), nr_class*sizeof(double)); if(param.svm_type == 7) { alpha2 = (double *) malloc (sizeof(double) * prob.l*nr_class); } if(param.svm_type == 8) { count = Malloc(int, nr_class); memcpy(count, INTEGER(countc), nr_class*sizeof(int)); alpha2 = (double *) malloc (sizeof(double) * prob.l*(nr_class-1)); } if(param.svm_type == 5||param.svm_type==6) { alpha2 = (double *) malloc (sizeof(double) * prob.l); } tron_run(&prob, ¶m, alpha2, weighted_C , &si, nr_class, count); //} /* clean up memory */ if (param.nr_weight > 0) { free(param.weight); free(param.weight_label); } if(param.svm_type == 7) { PROTECT(alpha3 = allocVector(REALSXP, (nr_class*prob.l + 1))); UNPROTECT(1); for (i = 0; i < prob.l; i++) free (prob.x[i]); for (i = 0; i l; int i; switch(param->svm_type) { case C_SVC: { double Cp,Cn; double *minus_ones = new double[l]; schar *y = new schar[l]; for(i=0;iy[i] > 0) y[i] = +1; else y[i]=-1; } if(param->nr_weight > 0) { Cp = C*param->weight[0]; Cn = C*param->weight[1]; } else Cp = Cn = C; Solver s; //have to weight cost parameter for multiclass. problems s.Solve(l, SVC_Q(*prob,*param,y), minus_ones, y, alpha, Cp, Cn, param->eps, si, param->shrinking); delete[] minus_ones; delete[] y; } break; case NU_SVC: { schar *y = new schar[l]; double nu = param->nu; double sum_pos = nu*l/2; double sum_neg = nu*l/2; for(i=0;iy[i]>0) { y[i] = +1; alpha[i] = min(1.0,sum_pos); sum_pos -= alpha[i]; } else { y[i] = -1; alpha[i] = min(1.0,sum_neg); sum_neg -= alpha[i]; } double *zeros = new double[l]; for(i=0;ieps, si, param->shrinking); double r = si->r; //info("C = %f\n",1/r); for(i=0;irho /= r; si->obj /= (r*r); si->upper_bound_p = 1/r; si->upper_bound_n = 1/r; delete[] y; delete[] zeros; } break; case ONE_CLASS: { double *zeros = new double[l]; schar *ones = new schar[l]; int n = (int)(param->nu*l); // # of alpha's at upper bound // set initial alpha probably usefull for smo for(i=0;inu * l - n; for(i=n+1;ieps, si, param->shrinking); delete[] zeros; delete[] ones; } break; case EPSILON_SVR: { double *alpha2 = new double[2*l]; double *linear_term = new double[2*l]; schar *y = new schar[2*l]; for(i=0;ip - prob->y[i]; y[i] = 1; alpha2[i+l] = 0; linear_term[i+l] = param->p + prob->y[i]; y[i+l] = -1; } Solver s; s.Solve(2*l, SVR_Q(*prob,*param), linear_term, y, alpha2, param->C, param->C, param->eps, si, param->shrinking); double sum_alpha = 0; for(i=0;iC*l)); delete[] alpha2; delete[] linear_term; delete[] y; } break; case NU_SVR: { double C = param->C; double *alpha2 = new double[2*l]; double *linear_term = new double[2*l]; schar *y = new schar[2*l]; double sum = C * param->nu * l / 2; for(i=0;iy[i]; y[i] = 1; linear_term[i+l] = prob->y[i]; y[i+l] = -1; } Solver_NU s; s.Solve(2*l, SVR_Q(*prob,*param), linear_term, y, alpha2, C, C, param->eps, si, param->shrinking); //info("epsilon = %f\n",-si->r); for(i=0;i 0) { param.weight = (double *) malloc (sizeof(double) * param.nr_weight); memcpy (param.weight, REAL(weights), param.nr_weight * sizeof(double)); param.weight_label = (int *) malloc (sizeof(int) * param.nr_weight); memcpy (param.weight_label, INTEGER(weightlabels), param.nr_weight * sizeof(int)); } param.p = *REAL(eps); param.shrinking = *INTEGER(shrinking); param.lim = 1/(gammafn(param.degree+1)*powi(2,param.degree)); /* set problem */ prob.l = *INTEGER(r); prob.y = REAL(y); prob.n = *INTEGER(c); if (*INTEGER(sparse) > 0) prob.x = transsparse(REAL(x), *INTEGER(r), INTEGER(rowindex), INTEGER(colindex)); else prob.x = sparsify(REAL(x), *INTEGER(r), *INTEGER(c)); double *alpha2 = (double *) malloc (sizeof(double) * prob.l); s = svm_check_parameter(&prob, ¶m); //if (s) { //printf("%s",s); //} //else { solve_smo(&prob, ¶m, alpha2, &si, *REAL(cost), REAL(linear_term)); //} PROTECT(alpha = allocVector(REALSXP, prob.l+2)); /* clean up memory */ if (param.nr_weight > 0) { free(param.weight); free(param.weight_label); } for (i = 0; i < prob.l; i++) {free (prob.x[i]); REAL(alpha)[i] = *(alpha2+i); } free (prob.x); REAL(alpha)[prob.l] = si.rho; REAL(alpha)[prob.l+1] = si.obj; free(alpha2); UNPROTECT(1); return alpha; } } kernlab/src/brweight.cpp0000644000175100001440000000435112233654617015030 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/BoundedRangeWeight.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef BRWEIGHT_CPP #define BRWEIGHT_CPP #include "brweight.h" #include #define MIN(x,y) (((x) < (y)) ? (x) : (y)) #define MAX(x,y) (((x) > (y)) ? (x) : (y)) /** * Bounded Range weight function. * W(y,t) := max(0,min(tau,n)-gamma) * * \param floor_len - (IN) Length of floor interval of matched substring. * (cf. gamma in VisSmo02). * \param x_len - (IN) Length of the matched substring. * (cf. tau in visSmo02). * \param weight - (OUT) The weight value. * */ ErrorCode BoundedRangeWeight::ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) { //' Input validation assert(x_len >= floor_len); //' x_len == floor_len when the substring found ends on an interval. Real tau = (Real)x_len; Real gamma = (Real)floor_len; weight = MAX(0,MIN(tau,n)-gamma); // std::cout << "floor_len:"< * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ConstantWeight.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 // 12 Oct 2006 #ifndef CWEIGHT_H #define CWEIGHT_H #include "datatype.h" #include "errorcode.h" #include "iweightfactory.h" #include //' Constant weight class class ConstantWeight : public I_WeightFactory { public: /// Constructor ConstantWeight(){} /// Destructor virtual ~ConstantWeight(){} /// Compute weight ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight); }; #endif kernlab/src/ctable.h0000644000175100001440000000430312234152620014075 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ChildTable.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef CTABLE_H #define CTABLE_H #include #include #include "datatype.h" #include "errorcode.h" #include "lcp.h" // using namespace std; /** * ChildTable represents the parent-child relationship between * the lcp-intervals of suffix array. * Reference: AboKurOhl04 */ class ChildTable : public std::vector { private: // childtab needs lcptab to differentiate between up, down, and // nextlIndex values. LCP& _lcptab; public: // Constructors ChildTable(const UInt32 &size, LCP& lcptab): std::vector(size), _lcptab(lcptab){} // Destructor virtual ~ChildTable() {} // Get first l-index of an l-[i..j] interval ErrorCode l_idx(const UInt32 &i, const UInt32 &j, UInt32 &idx); // .up field ErrorCode up(const UInt32 &idx, UInt32 &val); // .down field ErrorCode down(const UInt32 &idx, UInt32 &val); // .next field can be retrieved by accessing the array directly. friend std::ostream& operator << (std::ostream& os, const ChildTable& ct); }; #endif kernlab/src/esa.cpp0000644000175100001440000007370512761213650013770 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ESA.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef ESA_CPP #define ESA_CPP #include #include #include #include #include #include #include #include #include "esa.h" #ifdef SSARRAY #ifdef __cplusplus extern "C" { #endif #include "sarray.h" #ifdef __cplusplus } #endif #else #include "wmsufsort.h" #endif #include "wkasailcp.h" #define MIN(x,y) (((x) < (y)) ? (x):(y)) ESA::ESA(const UInt32 & size_, SYMBOL *text_, int verb): _verb(verb), size(size_), text(text_), suftab(0), lcptab(size_), childtab(size_, lcptab), suflink(0), bcktab_depth(0), bcktab_size(0), bcktab_val(0), bcktab_key4(0), coef4(0), bcktab_key8(0), coef8(0) { I_SAFactory* sa_fac = 0; I_LCPFactory* lcp_fac = 0; //' input validation assert(size > 0); // if(text[size-1] != SENTINEL) // text = (SYMBOL*)(std::string(text)+SENTINEL).c_str(); assert(text[size-1] == SENTINEL); // CW Sanity test for (unsigned int i = 0; i < size-1 ; i++) { assert(text[i] != 0); } // for (int i = 0; i < size ; i++) { // printf("%c : %i\n", text[i], (int) text[i]); // } #if SSARRAY suftab = new int[size]; for (unsigned int i = 0; i < size - 1 ; i++) { suftab[i] = text[i]; } suftab[size-1] = 0; ssarray((int*) suftab); #else //' Construct Suffix Array if(!sa_fac){ sa_fac = new W_msufsort(); } // CW Try // size = 10; // text[size-1] = 0; suftab = new UInt32[size]; sa_fac->ConstructSA(text, size, suftab); if(sa_fac) { delete sa_fac; sa_fac = NULL; } #endif //' Compute LCP array if(!lcp_fac){ lcp_fac = new W_kasai_lcp(); } // CW lcp_fac->ComputeLCP(text, size, suftab, lcptab); lcp_fac->ComputeLCP(text, size, (UInt32 *) suftab, lcptab); if(lcp_fac) { delete lcp_fac; lcp_fac = NULL; } //' Compress LCP array lcptab.compact(); //' Construct Child Table ConstructChildTable(); #ifdef SLINK //' Construct Suffix link table //' The suffix link interval, (l-1)-[p..q] of interval l-[i..j] can be retrieved //' by following method: //' Let k be the firstlIndex of l-[i..j], p = suflink[2*k], q = suflink[2*k+1]. suflink = new UInt32[2 * size + 2]; //' extra space for extra sentinel char! memset(suflink,0,sizeof(UInt32)*(2 * size +2)); ConstructSuflink(); #else //' Threshold for constructing bucket table if(size >= 1024) ConstructBcktab(); //' Otherwise, just do plain binary search to search for suffix link interval #endif } ESA::~ESA() { //if(text) { delete text; text = 0;} if(suflink) { delete [] suflink; suflink=0; } if(suftab) { delete [] suftab; suftab=0; } if(bcktab_val) { delete [] bcktab_val; bcktab_val=0; } if(bcktab_key4) { delete [] bcktab_key4; bcktab_key4=0;} if(coef4) { delete [] coef4; coef4 = 0; } if(bcktab_key8) { delete [] bcktab_key8; bcktab_key8=0;} if(coef8) { delete [] coef8; coef8 = 0; } } /// The lcp-interval structure. Used in ESA::ConstructChildTable() class lcp_interval { public: UInt32 lcp; UInt32 lb; UInt32 rb; std::vector child; /// Constructors lcp_interval(){} lcp_interval(const UInt32 &lcp_, const UInt32 lb_, const UInt32 &rb_, lcp_interval *itv) { lcp = lcp_; lb = lb_; rb = rb_; if(itv) child.push_back(itv); } /// Destructor ~lcp_interval(){ for(UInt32 i=0; i< child.size(); i++) delete child[i]; child.clear(); } }; /** * Construct 3-fields-merged child table. */ ErrorCode ESA::ConstructChildTable(){ // Input validation assert(text); assert(suftab); //' stack for lcp-intervals std::stack lit; //' Refer to: Abo05::Algorithm 4.5.2. lcp_interval *lastInterval = 0; lcp_interval *new_itv = 0; lit.push(new lcp_interval(0, 0, 0, 0)); //' root interval // Variables to handle 0-idx bool first = true; UInt32 prev_0idx = 0; UInt32 first0idx = 0; // Loop thru and process each index. for(UInt32 idx = 1; idx < size + 1; idx++) { UInt32 tmp_lb = idx - 1; //svnvish: BUGBUG // We just assume that the lcp of size + 1 is zero. // This simplifies the logic of the code UInt32 lcp_idx = 0; if(idx < size){ lcp_idx = lcptab[idx]; } while (lcp_idx < lit.top()->lcp){ lastInterval = lit.top(); lit.pop(); lastInterval->rb = idx - 1; // svnvish: Begin process UInt32 n_child = lastInterval->child.size(); UInt32 i = lastInterval->lb; UInt32 j = lastInterval->rb; // idx -1 ? //Step 1: Set childtab[i].down or childtab[j+1].up to first l-index UInt32 first_l_index = i+1; if(n_child && (lastInterval->child[0]->lb == i)) first_l_index = lastInterval->child[0]->rb+1; //svnvish: BUGBUG // ec = childtab.Set_Up(lastInterval->rb+1, first_l_index); // ec = childtab.Set_Down(lastInterval->lb, first_l_index); childtab[lastInterval->rb] = first_l_index; childtab[lastInterval->lb] = first_l_index; // Now we need to set the NextlIndex fields The main problem here // is that the child intervals might not be contiguous UInt32 ptr = i+1; UInt32 child_count = 0; while(ptr < j){ UInt32 first = j; UInt32 last = j; // Get next child to process if(n_child - child_count){ first = lastInterval->child[child_count]->lb; last = lastInterval->child[child_count]->rb; child_count++; } // Eat away singleton intervals while(ptr < first){ childtab[ptr] = ptr + 1; ptr++; } // Handle an child interval and make appropriate entries in // child table ptr = last + 1; if(last < j){ childtab[first] = ptr; } } //' Free lcp_intervals for(UInt32 child_cnt = 0; child_cnt < n_child; child_cnt++) { delete lastInterval->child[child_cnt]; lastInterval->child[child_cnt] = 0; } // svnvish: End process tmp_lb = lastInterval->lb; if(lcp_idx <= lit.top()->lcp) { lit.top()->child.push_back(lastInterval); lastInterval = 0; } }// while if(lcp_idx > lit.top()->lcp) { new_itv = new lcp_interval(lcp_idx, tmp_lb,0, lastInterval); lit.push(new_itv); new_itv = 0; lastInterval = 0; } // Handle the 0-indices. // 0-indices := { i | LCP[i]=0, \forall i = 0,...,n-1} if((idx < size) && (lcp_idx == 0)) { // svnvish: BUGBUG // ec = childtab.Set_NextlIndex(prev_0_index,k); childtab[prev_0idx] = idx; prev_0idx = idx; // Handle first 0-index specially // Store in childtab[(size-1)+1].up if(first){ // svnvish: BUGBUG // ec = childtab.Set_Up(size,k); CHECKERROR(ec); first0idx = idx; first = false; } } } // for childtab[size-1] = first0idx; // svnvish: All remaining elements in the stack are ignored. // chteo: Free all remaining elements in the stack. while(!lit.empty()) { lastInterval = lit.top(); delete lastInterval; lit.pop(); } assert(lit.empty()); return NOERROR; } #ifdef SLINK /** * Get suffix link interval, [sl_i..sl_j], of a given interval, [i..j]. * * \param i - (IN) Left bound of interval [i..j] * \param j - (IN) Right bound of interval [i..j] * \param sl_i - (OUT) Left bound of suffix link interval [sl_i..sl_j] * \param sl_j - (OUT) Right bound of suffix link interval [sl_i..sl_j] */ ErrorCode ESA::GetSuflink(const UInt32 &i, const UInt32 &j, UInt32 &sl_i, UInt32 &sl_j) { //' Input validation assert(i=0 && j= (j-i)); return NOERROR; } #elif defined(LSEARCH) /** * "Linear" Search version of GetSuflink. Suffix link intervals are not stored * explicitly but searched when needed. * * Note: Slow!!! especially in the case of long and similar texts. */ ErrorCode ESA::GetSuflink(const UInt32 &i, const UInt32 &j, UInt32 &sl_i, UInt32 &sl_j) { //' Variables SYMBOL ch; UInt32 lcp=0; UInt32 final_lcp = 0; UInt32 lb = 0, rb = size-1; //' root interval //' First suflink interval char := Second char of original interval ch = text[suftab[i]+1]; //' lcp of suffix link interval := lcp of original interval - 1 final_lcp = 0; GetLcp(i,j,final_lcp); final_lcp = (final_lcp > 0) ? final_lcp-1 : 0; //' Searching for suffix link interval sl_i = lb; sl_j = rb; while(lcp < final_lcp) { GetIntervalByChar(lb,rb,ch,lcp,sl_i, sl_j); GetLcp(sl_i, sl_j, lcp); lb = sl_i; rb = sl_j; ch = text[suftab[i]+lcp+1]; } assert(sl_j > sl_i); assert((sl_j-sl_i) >= (j-i)); return NOERROR; } #else /** * Construct bucket table. * * \param alpahabet_size - Size of alphabet set */ ErrorCode ESA::ConstructBcktab(const UInt32 &alphabet_size) { UInt32 MAX_DEPTH = 8; //' when alphabet_size is 256 UInt32 sizeof_uint4 = 4; //' 4 bytes integer UInt32 sizeof_uint8 = 8; //' 8 bytes integer UInt32 sizeof_key = sizeof_uint8; //' Step 1: Determine the bcktab_depth for(bcktab_depth = MAX_DEPTH; bcktab_depth >0; bcktab_depth--) { bcktab_size = 0; for(UInt32 i=0; i < size; i++) if(lcptab[i] < bcktab_depth) bcktab_size++; if(bcktab_depth <= 4) sizeof_key = sizeof_uint4; if(bcktab_size <= size/(sizeof_key + sizeof_uint4)) break; } //' Step 2: Allocate memory for bcktab_key and bcktab_val. //' Step 3: Precompute coefficients for computing hash values of prefixes later. //' Step 4: Collect the prefixes with lcp <= bcktab_depth and //' convert them into hash value. if(sizeof_key == sizeof_uint4) { //' (2) bcktab_key4 = new UInt32[bcktab_size]; bcktab_val = new UInt32[bcktab_size]; assert(bcktab_key4 && bcktab_val); //' (3) coef4 = new UInt32[4]; coef4[0] = 1; for(UInt32 i=1; i < 4; i++) coef4[i] = coef4[i-1]*alphabet_size; //' (4) for(UInt32 i=0, k=0; i < size; i++) { if(lcptab[i] < bcktab_depth) { UInt32 c = MIN((size-suftab[i]), bcktab_depth); hash_value4 = 0; for(UInt32 j=0; j < c; j++) hash_value4 += text[suftab[i]+j]*coef4[bcktab_depth-1-j]; bcktab_key4[k] = hash_value4; bcktab_val[k] = i; k++; } } } else { //' (2) bcktab_key8 = new UInt64[bcktab_size]; bcktab_val = new UInt32[bcktab_size]; assert(bcktab_key8 && bcktab_val); //' (3) coef8 = new UInt64[9]; coef8[0] = 1; for(UInt32 i=1; i < 9; i++) coef8[i] = coef8[i-1]*alphabet_size; //' (4) for(UInt32 i=0, k=0; i < size; i++) { if(lcptab[i] < bcktab_depth) { UInt32 c = MIN( (size-suftab[i]), bcktab_depth); hash_value8 = 0; for(UInt32 j=0; j < c; j++) hash_value8 += text[suftab[i]+j]*coef8[bcktab_depth-1-j]; bcktab_key8[k] = hash_value8; bcktab_val[k] = i; k++; } } } //' check if bcktab in ascending order // for(UInt32 ii=1; ii= 1); //' the interval [i..j] must has at least 2 suffixes. //' Variables UInt32 left=0, mid=0, right=0, tmp_right=0; UInt32 llcp=0, mlcp=0, rlcp=0; UInt32 orig_lcp = 0; UInt32 c = 0; UInt32 offset = 0; GetLcp(i, j, orig_lcp); if(orig_lcp <= 1) { sl_i = 0; sl_j = size-1; return NOERROR; } //' Default left = 0; right = size-1; //' Make use of bcktab here. Maximum lcp value is always 1 less than bcktab_depth. //' This is because including lcp values equal to bcktab_depth will violate //' the constraint of prefix uniqueness. offset = MIN(orig_lcp-1, bcktab_depth); assert(offset>=0); if(bcktab_key4) { hash_value4 = 0; for(UInt32 cnt=0; cnt < offset; cnt++) hash_value4 += coef4[bcktab_depth-1-cnt]*text[suftab[i]+1+cnt]; //' lower bound return the exact position of of target, if found one UInt32 *p = std::lower_bound(bcktab_key4, bcktab_key4+bcktab_size, hash_value4); left = bcktab_val[p - bcktab_key4]; //' this hash value is used to find the right bound of target interval hash_value4 += coef4[bcktab_depth-offset]; //' upper bound return the smallest value > than target. UInt32 *q = std::upper_bound(p, bcktab_key4+bcktab_size, hash_value4); if(q == bcktab_key4+bcktab_size) right = size-1; else right = bcktab_val[q - bcktab_key4] - 1; } else if(bcktab_key8) { hash_value8 = 0; for(UInt32 cnt=0; cnt < offset; cnt++) hash_value8 += coef8[bcktab_depth-1-cnt]*text[suftab[i]+1+cnt]; //' lower bound return the exact position of of target, if found one UInt64 *p = std::lower_bound(bcktab_key8, bcktab_key8+bcktab_size, hash_value8); left = bcktab_val[p - bcktab_key8]; //' this hash value is used to find the right bound of target interval hash_value8 += coef8[bcktab_depth-offset]; //' upper bound return the smallest value > than target. UInt64 *q = std::upper_bound(p, bcktab_key8+bcktab_size, hash_value8); if(q == bcktab_key8+bcktab_size) right = size-1; else right = bcktab_val[q - bcktab_key8] - 1; } tmp_right = right; assert(right <= size-1); assert(right > left); offset = 0; //' Compute LEFT boundary of suflink interval Compare(left, offset, &text[suftab[i]+1+offset], orig_lcp-1-offset, llcp); llcp += offset; if(llcp < orig_lcp-1) { Compare(right, offset, &text[suftab[i]+1+offset], orig_lcp-1-offset, rlcp); rlcp += offset; c = MIN(llcp,rlcp); while(right-left > 1){ mid = (left + right)/2; Compare(mid, c, &text[suftab[i]+1+c], orig_lcp-1-c, mlcp); mlcp += c; //' if target not found yet... if(mlcp < orig_lcp-1) { if(text[suftab[mid]+mlcp] < text[suftab[i]+mlcp+1]) { left = mid; llcp = mlcp; } else { right = mid; rlcp = mlcp; } } else { //' mlcp == orig_lcp-1 assert(mlcp == orig_lcp-1); //' target found, but want to make sure it is the LEFTmost... right = mid; rlcp = mlcp; } c = MIN(llcp, rlcp); } sl_i = right; llcp = rlcp; } else { sl_i = left; } //' Compute RIGHT boundary of suflink interval right = tmp_right; left = sl_i; Compare(right, offset, &text[suftab[i]+1+offset], orig_lcp-1-offset, rlcp); rlcp += offset; if(rlcp < orig_lcp-1) { c = MIN(llcp,rlcp); while(right-left > 1){ mid = (left + right)/2; Compare(mid, c, &text[suftab[i]+1+c], orig_lcp-1-c, mlcp); mlcp += c; //' if target not found yet... if(mlcp < orig_lcp-1) { if(text[suftab[mid]+mlcp] < text[suftab[i]+mlcp+1]) { //' target is on the right half left = mid; llcp = mlcp; } else { //' target is on the left half right = mid; rlcp = mlcp; } } else { //' mlcp == orig_lcp-1 assert(mlcp == orig_lcp-1); //' target found, but want to make sure it is the RIGHTmost... left = mid; llcp = mlcp; } c = MIN(llcp, rlcp); } sl_j = left; } else { sl_j = right; } assert(sl_i < sl_j); return NOERROR; } #endif /** * Find suffix link interval, [p..q], for a child interval, [c_i..c_j], given its * parent interval [p_i..p_j]. * * Pre : 1. Suffix link interval for parent interval has been computed. * 2. [child_i..child_j] is not a singleton interval. * * * \param parent_i - (IN) Left bound of parent interval. * \param parent_j - (IN) Right bound of parent interval. * \param child_i - (IN) Left bound of child interval. * \param child_j - (IN) Right bound of child interval. * \param sl_i - (OUT) Left bound of suffix link interval of child interval * \param sl_j - (OUT) Right bound of suffix link interval of child interval */ ErrorCode ESA::FindSuflink(const UInt32 &parent_i, const UInt32 &parent_j, const UInt32 &child_i, const UInt32 &child_j, UInt32 &sl_i, UInt32 &sl_j) { assert(child_i != child_j); //' Variables SYMBOL ch; UInt32 tmp_i = 0; UInt32 tmp_j = 0; UInt32 lcp_child = 0; UInt32 lcp_parent = 0; UInt32 lcp_sl = 0; //' Step 1: Get suffix link interval of parent interval and its lcp value. //' 2: Get lcp values of parent and child intervals. //' Shortcut! if(parent_i ==0 && parent_j == size-1) { //' this is root interval //' (1) sl_i = 0; sl_j = size-1; lcp_sl = 0; //' (2) lcp_parent = 0; GetLcp(child_i,child_j,lcp_child); assert(lcp_child > 0); } else { //' (1) GetSuflink(parent_i,parent_j,sl_i,sl_j); GetLcp(sl_i, sl_j, lcp_sl); //' (2) GetLcp(parent_i,parent_j,lcp_parent); GetLcp(child_i,child_j,lcp_child); assert(lcp_child > 0); } //' Traversing down the subtree of [sl_i..sl_j] and looking for //' the suffix link interval of child interval. while (lcp_sl < lcp_child-1) { //' The character that we want to look for in suflink interval. ch = text[suftab[child_i]+lcp_sl+1]; tmp_i = sl_i; tmp_j = sl_j; GetIntervalByChar(tmp_i, tmp_j, ch, lcp_sl, sl_i, sl_j); assert(sl_i > q; //' The interval queue std::pair interval; //' Step 0: Push root onto queue. And define itself as its suflink. q.push(std::make_pair((unsigned int)0,size-1)); UInt32 idx = 0; childtab.l_idx(0,size-1,idx); suflink[idx+idx] = 0; //' left bound of suffix link interval suflink[idx+idx+1] = size-1; //' right bound of suffix link interval //' Step 1: Breadth first traversal. while (!q.empty()) { //' Step 1.1: Pop interval from queue. interval = q.front(); q.pop(); //' Step 1.2: For each non-singleton child-intervals, [p..q], "find" its //' suffix link interval and then "push" it onto the interval queue. UInt32 i=0,j=0, sl_i=0, sl_j=0, start_idx=interval.first; do { //' Notes: interval.first := left bound of suffix link interval //' interval.second := right bound of suffix link interval assert(interval.first>=0 && interval.second < size); GetIntervalByIndex(interval.first, interval.second, start_idx, i, j); if(j > i) { //' [i..j] is non-singleton interval FindSuflink(interval.first, interval.second, i,j, sl_i, sl_j); assert(!(sl_i == i && sl_j == j)); //' Store suflink of [i..j] UInt32 idx=0; childtab.l_idx(i, j, idx); suflink[idx+idx] = sl_i; suflink[idx+idx+1] = sl_j; //' Push suflink interval onto queue q.push(std::make_pair(i,j)); } start_idx = j+1; //' prepare to get next child interval }while(start_idx < interval.second); } return NOERROR; } /** * Get all child-intervals, including singletons. * Store all non-singleton intervals onto #q#, where interval is defined as * (i,j) where i and j are left and right bounds. * * \param lb - (IN) Left bound of current interval. * \param rb - (IN) Right bound of current interval. * \param q - (OUT) Storage for intervals. */ ErrorCode ESA::GetChildIntervals(const UInt32 &lb, const UInt32 &rb, std::vector > &q) { //' Variables UInt32 k=0; //' general index UInt32 i=0,j=0; //' for interval [i..j] //' Input validation assert(rb-lb >= 1); k = lb; do { assert(lb>=0 && rb 0) { if(j > i) { // chteo: saved 1 operation ;) [260906] //' Non-singleton interval q.push_back(std::make_pair(i,j)); } k = j+1; }while(k < rb); return NOERROR; } /** * Given an l-interval, l-[i..j] and a starting index, idx \in [i..j], * return the child-interval, k-[p..q], of l-[i..j] where p == idx. * * Reference: Abo05::algorithm 4.6.4 * * Pre: #start_idx# is a l-index or equal to parent_i. * * \param parent_i - (IN) Left bound of parent interval. * \param parent_j - (IN) Right bound of parent interval. * \param start_idx - (IN) Predefined left bound of child interval. * \param child_i - (OUT) Left bound of child interval. * \param child_j - (OUT) Right bound of child interval. * * Time complexity: O(|alphabet set|) */ ErrorCode ESA::GetIntervalByIndex(const UInt32 &parent_i, const UInt32 &parent_j, const UInt32 &start_idx, UInt32 &child_i, UInt32 &child_j) { //' Variables UInt32 lcp_child_i = 0; UInt32 lcp_child_j = 0; //' Input validation assert( (parent_i < parent_j) && (parent_i >= 0) && (parent_j < size) && (start_idx >= parent_i) && (start_idx < parent_j)); child_i = start_idx; //' #start_idx# is not and l-index, i.e. #start_idx == #parent_i# if(child_i == parent_i) { childtab.l_idx(parent_i,parent_j,child_j); child_j--; return NOERROR; } //' #start_idx# is a l-index // svnvish:BUGBUG child_j = childtab[child_i]; lcp_child_i = lcptab[child_i]; lcp_child_j = lcptab[child_j]; if(child_i < child_j && lcp_child_i == lcp_child_j) child_j--; else { //' child_i is the left bound of last child interval child_j = parent_j; } return NOERROR; } /** * Given an l-interval, l-[i..j] and a starting character, ch \in alphabet set, * return the child-interval, k-[p..q], of l-[i..j] such that text[sa[p]+depth] == ch. * * Reference: Abo05::algorithm 4.6.4 * * Post: Return [i..j]. If interval was found, i<=j, otherwise, i>j. * * \param parent_i - (IN) Left bound of parent interval. * \param parent_j - (IN) Right bound of parent interval. * \param ch - (IN) Starting character of left bound (suffix) of child interval. * \param depth - (IN) The position where #ch# is located in #text# * i.e. ch = text[suftab[parent_i]+depth]. * \param child_i - (OUT) Left bound of child interval. * \param child_j - (OUT) Right bound of child interval. * * Time complexity: O(|alphabet set|) */ ErrorCode ESA::GetIntervalByChar(const UInt32 &parent_i, const UInt32 &parent_j, const SYMBOL &ch, const UInt32 &depth, UInt32 &child_i, UInt32 &child_j) { //' Input validation assert(parent_i < parent_j && parent_i >= 0 && parent_j < size); //' Variables UInt32 idx = 0; UInt32 idx_next = 0; UInt32 lcp_idx = 0; UInt32 lcp_idx_next = 0; UInt32 lcp = 0; //' #depth# is actually equal to the following statement! //ec = GetLcp(parent_i, parent_j, lcp); CHECKERROR(ec); lcp = depth; //' Step 1: Check if #ch# falls in the initial range. if(text[suftab[parent_i]+lcp] > ch || text[suftab[parent_j]+lcp] < ch) { //' No child interval starts with #ch#, so, return undefined interval. child_i = 1; child_j = 0; return NOERROR; } //' Step 2: #ch# is in the initial range, but not necessarily exists in the range. //' Step 2.1: Get first l-index childtab.l_idx(parent_i, parent_j, idx); assert(idx > parent_i && idx <= parent_j); if(text[suftab[idx-1]+lcp] == ch) { child_i = parent_i; child_j = idx-1; return NOERROR; } //' Step 3: Look for child interval which starts with #ch# // svnvish: BUGBUG //ec = childtab.NextlIndex(idx, idx_next); CHECKERROR(ec); idx_next = childtab[idx]; lcp_idx = lcptab[idx]; lcp_idx_next = lcptab[idx_next]; while(idx < idx_next && lcp_idx == lcp_idx_next && text[suftab[idx]+lcp] < ch) { idx = idx_next; // svnvish: BUGBUG // ec = childtab.NextlIndex(idx, idx_next); CHECKERROR(ec); idx_next = childtab[idx]; lcp_idx = lcptab[idx]; lcp_idx_next = lcptab[idx_next]; } if(text[suftab[idx]+lcp] == ch) { child_i = idx; if(idx < idx_next && lcp_idx == lcp_idx_next) child_j = idx_next - 1; else child_j = parent_j; return NOERROR; } //' Child interval starts with #ch# not found child_i = 1; child_j = 0; return NOERROR; } /** * Return the lcp value of a given interval, l-[i..j]. * * Pre: [i..j] \subseteq [0..size] * * \param i - (IN) Left bound of the interval. * \param j - (IN) Right bound of the interval. * \param val - (OUT) The lcp value of the interval. */ ErrorCode ESA::GetLcp(const UInt32 &i, const UInt32 &j, UInt32 &val) { //' Input validation assert(i < j && i >= 0 && j < size); //' Variables UInt32 up, down; //' 0-[0..size-1]. This is a shortcut! if(i == 0 && j == size) { val = 0; } else { childtab.up(j+1,up); if( (i < up) && (up <= j)) { val = lcptab[up]; } else { childtab.down(i,down); val = lcptab[down]; } } return NOERROR; } /** * Compare #pattern# string to text[suftab[#idx#]..size] and return the * length of the substring matched. * * \param idx - (IN) The index of esa. * \param depth - (IN) The start position of matching mechanism. * \param pattern - (IN) The pattern string. * \param p_len - (IN) The length of #pattern#. * \param matched_len - (OUT) The length of matched substring. */ ErrorCode ESA::Compare(const UInt32 &idx, const UInt32 &depth, SYMBOL *pattern, const UInt32 &p_len, UInt32 &matched_len) { //' Variables UInt32 min=0; min = (p_len < size-(suftab[idx]+depth)) ? p_len : size-(suftab[idx]+depth); matched_len = 0; for(UInt32 k=0; k < min; k++) { if(text[suftab[idx]+depth+k] == pattern[k]) matched_len++; else break; } return NOERROR; } /** * Find the longest matching of text and pattern. * * Note: undefinded interval := [i..j] where i>j * * Post: Return "floor" and "ceil" of longest substring of pattern that exists in text. * Otherwise, that is, no substring of pattern ever exists in text, * return the starting interval, [i..j]. * * \param i - (IN) Left bound of the starting interval. * \param j - (IN) Right bound of the starting interval. * \param offset - (IN) The number of characters between the head of suffix and the * position to start matching. * \param pattern - (IN) The pattern string to match to esa. * \param p_len - (IN) The length of #pattern# * \param lb - (OUT) The left bound of the interval containing * longest matched suffix. * \param rb - (OUT) The right bound of the interval containing * longest matched suffix. * \param matched_len - (OUT) The length of the longest matched suffix. * \param floor_lb - (OUT) Left bound of floor interval of [lb..rb]. * \param floor_rb - (OUT) Right bound of floor interval of [lb..rb]. * \param floor_len - (OUT) The lcp value of floor interval. */ ErrorCode ESA::ExactSuffixMatch(const UInt32 &i, const UInt32 &j, const UInt32 &offset, SYMBOL *pattern, const UInt32 p_len, UInt32 &lb, UInt32 &rb, UInt32 &matched_len, UInt32 &floor_lb, UInt32 &floor_rb, UInt32 &floor_len) { //' Input validation assert(i != j); //' Variables UInt32 min, lcp; bool queryFound = true; //' Initial setting. floor_lb = lb = i; floor_rb = rb = j; matched_len = offset; //' Step 1: Get lcp of floor/starting interval. GetLcp(floor_lb, floor_rb, lcp); floor_len = lcp; //' Step 2: Skipping #offset# characters while(lcp < matched_len) { floor_lb = lb; floor_rb = rb; floor_len = lcp; GetIntervalByChar(floor_lb, floor_rb, pattern[lcp], lcp, lb, rb); // printf("lb, rb : %i, %i\n", lb, rb); assert(lb <= rb); if(lb == rb) break; GetLcp(lb, rb, lcp); } //' Step 3: Continue matching from the point (either an interval or singleton) we stopped. while( (lb<=rb) && queryFound ) { if(lb != rb) { GetLcp(lb, rb, lcp); min = (lcp < p_len) ? lcp : p_len; while(matched_len < min) { queryFound = (text[suftab[lb]+matched_len] == pattern[matched_len]); if(queryFound) matched_len++; else return NOERROR; } assert(matched_len == min); //' Full pattern found! if(matched_len == p_len) return NOERROR; floor_lb = lb; floor_rb = rb; floor_len = lcp; GetIntervalByChar(floor_lb, floor_rb,pattern[matched_len],matched_len,lb,rb); }else { //' lb == rb, i.e. singleton interval. min = (p_len < size-suftab[lb]) ? p_len : size-suftab[lb]; while(matched_len rb) { lb = floor_lb; rb = floor_rb; } return NOERROR; } #endif kernlab/src/expdecayweight.h0000644000175100001440000000342012234152620015654 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ExpDecayWeight.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef EXPDECAYWEIGHT_H #define EXPDECAYWEIGHT_H #include "datatype.h" #include "errorcode.h" #include "iweightfactory.h" #include class ExpDecayWeight : public I_WeightFactory { public: Real lambda; /// Constructors //' NOTE: lambda shouldn't be equal to 1, othexrwise there will be //' divide-by-zero error. ExpDecayWeight(const Real &lambda_=2.0):lambda(lambda_) {} /// Destructor virtual ~ExpDecayWeight(){} /// Compute weight ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight); }; #endif kernlab/src/Makevars0000644000175100001440000000006011470002321014153 0ustar hornikusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) kernlab/src/dprecond.c0000644000175100001440000000200011654244033014431 0ustar hornikusers#include #include #include #include /* LAPACK */ /* extern int dpotf2_(char *, int *, double *, int *, int *); */ double dcholfact(int n, double *A, double *L) { /* if A is p.d. , A = L*L' if A is p.s.d. , A + lambda*I = L*L'; */ int indef, i; static double lambda = 1e-3/512/512; memcpy(L, A, sizeof(double)*n*n); F77_CALL(dpotf2)("L", &n, L, &n, &indef); if (indef != 0) { memcpy(L, A, sizeof(double)*n*n); for (i=0;i #include #include /* LEVEL 1 BLAS */ /*extern double ddot_(int *, double *, int *, double *, int *); */ /* LEVEL 2 BLAS */ /*extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *);*/ /* MINPACK 2 */ extern void dtron(int, double *, double *, double *, double, double, double, double, int, double); struct BQP { double eps; int n; double *x, *C, *Q, *p; }; int nfev, inc = 1; double one = 1, zero = 0, *A, *g0; int uhes(int n, double *x, double **H) { *H = A; return 0; } int ugrad(int n, double *x, double *g) { /* evaluate the gradient g = A*x + g0 */ memcpy(g, g0, sizeof(double)*n); F77_CALL(dsymv)("U", &n, &one, A, &n, x, &inc, &one, g, &inc); return 0; } int ufv(int n, double *x, double *f) { /* evaluate the function value f(x) = 0.5*x'*A*x + g0'*x */ double *t = (double *) malloc(sizeof(double)*n); F77_CALL(dsymv)("U", &n, &one, A, &n, x, &inc, &zero, t, &inc); *f = F77_CALL(ddot)(&n, x, &inc, g0, &inc) + 0.5 * F77_CALL(ddot)(&n, x, &inc, t, &inc); free(t); return ++nfev; } void solvebqp(struct BQP *qp) { /* driver for positive semidefinite quadratic programing version of tron */ int i, n, maxfev; double *x, *xl, *xu; double frtol, fatol, fmin, gtol, cgtol; n = qp->n; maxfev = 1000; /* ? */ nfev = 0; x = qp->x; xu = qp->C; A = qp->Q; g0 = qp->p; xl = (double *) malloc(sizeof(double)*n); for (i=0;ieps; dtron(n, x, xl, xu, gtol, frtol, fatol, fmin, maxfev, cgtol); free(xl); } kernlab/src/wmsufsort.h0000644000175100001440000000347512234152620014725 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/W_msufsort.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 13 Jul 2007 : use MSufSort v3.1 instead of v2.2 // Wrapper for Michael Maniscalco's MSufSort version 3.1 algorithm #ifndef W_MSUFSORT_H #define W_MSUFSORT_H #include "datatype.h" #include "isafactory.h" #include "msufsort.h" class W_msufsort : public I_SAFactory { public: ///Variables //'Declaration of object POINTERS, no initialization needed. //'If Declaration of objects, initialize them in member initialization list. MSufSort *msuffixsorter; ///Constructor W_msufsort(); ///Destructor virtual ~W_msufsort(); ///Methods ErrorCode ConstructSA(SYMBOL *text, const UInt32 &len, UInt32 *&array); }; #endif kernlab/src/dtron.c0000644000175100001440000001705713561526627014016 0ustar hornikusers#include #include #include #include #include extern void *xmalloc(size_t); extern double mymin(double, double); extern double mymax(double, double); extern int ufv(int, double *, double *); extern int ugrad(int, double *, double *); extern int uhes(int, double *, double **); /* LEVEL 1 BLAS */ /*extern double dnrm2_(int *, double *, int *);*/ /*extern double ddot_(int *, double *, int *, double *, int *);*/ /* LEVEL 2 BLAS */ /*extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *);*/ /* MINPACK 2 */ extern double dgpnrm(int, double *, double *, double *, double *); extern void dcauchy(int, double *, double *, double *, double *, double *, double, double *, double *, double *); extern void dspcg(int, double *, double *, double *, double *, double *, double, double, double *, int *); void dtron(int n, double *x, double *xl, double *xu, double gtol, double frtol, double fatol, double fmin, int maxfev, double cgtol) { /* c ********* c c Subroutine dtron c c The optimization problem of BSVM is a bound-constrained quadratic c optimization problem and its Hessian matrix is positive semidefinite. c We modified the optimization solver TRON by Chih-Jen Lin and c Jorge More' into this version which is suitable for this c special case. c c This subroutine implements a trust region Newton method for the c solution of large bound-constrained quadratic optimization problems c c min { f(x)=0.5*x'*A*x + g0'*x : xl <= x <= xu } c c where the Hessian matrix A is dense and positive semidefinite. The c user must define functions which evaluate the function, gradient, c and the Hessian matrix. c c The user must choose an initial approximation x to the minimizer, c lower bounds, upper bounds, quadratic terms, linear terms, and c constants about termination criterion. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is the final minimizer. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c gtol is a double precision variable. c On entry gtol specifies the relative error of the projected c gradient. c On exit gtol is unchanged. c c frtol is a double precision variable. c On entry frtol specifies the relative error desired in the c function. Convergence occurs if the estimate of the c relative error between f(x) and f(xsol), where xsol c is a local minimizer, is less than frtol. c On exit frtol is unchanged. c c fatol is a double precision variable. c On entry fatol specifies the absolute error desired in the c function. Convergence occurs if the estimate of the c absolute error between f(x) and f(xsol), where xsol c is a local minimizer, is less than fatol. c On exit fatol is unchanged. c c fmin is a double precision variable. c On entry fmin specifies a lower bound for the function. c The subroutine exits with a warning if f < fmin. c On exit fmin is unchanged. c c maxfev is an integer variable. c On entry maxfev specifies the limit of function evaluations. c On exit maxfev is unchanged. c c cgtol is a double precision variable. c On entry gqttol specifies the convergence criteria for c subproblems. c On exit gqttol is unchanged. c c ********** */ /* Parameters for updating the iterates. */ double eta0 = 1e-4, eta1 = 0.25, eta2 = 0.75; /* Parameters for updating the trust region size delta. */ double sigma1 = 0.25, sigma2 = 0.5, sigma3 = 4; double p5 = 0.5, one = 1; double gnorm, gnorm0, delta, snorm; double alphac = 1, alpha, f, fc, prered, actred, gs; int search = 1, iter = 1, info, inc = 1; double *xc = (double *) xmalloc(sizeof(double)*n); double *s = (double *) xmalloc(sizeof(double)*n); double *wa = (double *) xmalloc(sizeof(double)*n); double *g = (double *) xmalloc(sizeof(double)*n); double *A = NULL; uhes(n, x, &A); ugrad(n, x, g); ufv(n, x, &f); gnorm0 = F77_CALL(dnrm2)(&n, g, &inc); delta = 1000*gnorm0; gnorm = dgpnrm(n, x, xl, xu, g); if (gnorm <= gtol*gnorm0) { /* //printf("CONVERGENCE: GTOL TEST SATISFIED\n"); */ search = 0; } while (search) { /* Save the best function value and the best x. */ fc = f; memcpy(xc, x, sizeof(double)*n); /* Compute the Cauchy step and store in s. */ dcauchy(n, x, xl, xu, A, g, delta, &alphac, s, wa); /* Compute the projected Newton step. */ dspcg(n, x, xl, xu, A, g, delta, cgtol, s, &info); if (ufv(n, x, &f) > maxfev) { /* //printf("ERROR: NFEV > MAXFEV\n"); */ search = 0; continue; } /* Compute the predicted reduction. */ memcpy(wa, g, sizeof(double)*n); F77_CALL(dsymv)("U", &n, &p5, A, &n, s, &inc, &one, wa, &inc); prered = -F77_CALL(ddot)(&n, s, &inc, wa, &inc); /* Compute the actual reduction. */ actred = fc - f; /* On the first iteration, adjust the initial step bound. */ snorm = F77_CALL(dnrm2)(&n, s, &inc); if (iter == 1) delta = mymin(delta, snorm); /* Compute prediction alpha*snorm of the step. */ gs = F77_CALL(ddot)(&n, g, &inc, s, &inc); if (f - fc - gs <= 0) alpha = sigma3; else alpha = mymax(sigma1, -0.5*(gs/(f - fc - gs))); /* Update the trust region bound according to the ratio of actual to predicted reduction. */ if (actred < eta0*prered) /* Reduce delta. Step is not successful. */ delta = mymin(mymax(alpha, sigma1)*snorm, sigma2*delta); else { if (actred < eta1*prered) /* Reduce delta. Step is not sufficiently successful. */ delta = mymax(sigma1*delta, mymin(alpha*snorm, sigma2*delta)); else if (actred < eta2*prered) /* The ratio of actual to predicted reduction is in the interval (eta1,eta2). We are allowed to either increase or decrease delta. */ delta = mymax(sigma1*delta, mymin(alpha*snorm, sigma3*delta)); else /* The ratio of actual to predicted reduction exceeds eta2. Do not decrease delta. */ delta = mymax(delta, mymin(alpha*snorm, sigma3*delta)); } /* Update the iterate. */ if (actred > eta0*prered) { /* Successful iterate. */ iter++; /* uhes(n, x, &A); */ ugrad(n, x, g); gnorm = dgpnrm(n, x, xl, xu, g); if (gnorm <= gtol*gnorm0) { /* //printf("CONVERGENCE: GTOL = %g TEST SATISFIED\n", gnorm/gnorm0); */ search = 0; continue; } } else { /* Unsuccessful iterate. */ memcpy(x, xc, sizeof(double)*n); f = fc; } /* Test for convergence */ if (f < fmin) { //printf("WARNING: F .LT. FMIN\n"); search = 0; /* warning */ continue; } if (fabs(actred) <= fatol && prered <= fatol) { //printf("CONVERGENCE: FATOL TEST SATISFIED\n"); search = 0; continue; } if (fabs(actred) <= frtol*fabs(f) && prered <= frtol*fabs(f)) { /* //printf("CONVERGENCE: FRTOL TEST SATISFIED\n"); */ search = 0; continue; } } free(g); free(xc); free(s); free(wa); } kernlab/src/brweight.h0000644000175100001440000000325412234152620014462 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/BoundedRangeWeight.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef BRWEIGHT_H #define BRWEIGHT_H #include "datatype.h" #include "errorcode.h" #include "iweightfactory.h" #include //' Bounded Range weight class class BoundedRangeWeight : public I_WeightFactory { Real n; public: /// Constructor BoundedRangeWeight(const Real &n_=1): n(n_){} /// Destructor virtual ~BoundedRangeWeight(){} /// Compute weight ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight); }; #endif kernlab/src/Makevars.win0000644000175100001440000000006011470002335014754 0ustar hornikusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) kernlab/src/msufsort.h0000644000175100001440000006500412761213650014540 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #ifndef MSUFSORT_H #define MSUFSORT_H //==================================================================// // // // v // // MSufSort Version 2.2 // // Author: Michael A Maniscalco // // Date: Nov. 3, 2005 // // // // Notes: // // // //==================================================================// #include "stdio.h" #include "stack.h" #include "introsort.h" #include "inductionsort.h" //==================================================================// // Test app defines: // //==================================================================// #define SHOW_PROGRESS // display progress during sort #define CHECK_SORT // verify that sorting is correct. // #define SORT_16_BIT_SYMBOLS // enable 16 bit symbols. #define USE_INDUCTION_SORTING // enable induction sorting feature. #define USE_ENHANCED_INDUCTION_SORTING // enable enhanced induction sorting feature. #define USE_TANDEM_REPEAT_SORTING // enable the tandem repeat sorting feature. //#define USE_ALT_SORT_ORDER // enable alternative sorting order #define ENDIAN_SWAP_16(value) ((value >> 8) | (value << 8)) #define SUFFIX_SORTED 0x80000000 // flag marks suffix as sorted. #define END_OF_CHAIN 0x3ffffffe // marks the end of a chain #define SORTED_BY_ENHANCED_INDUCTION 0x3fffffff // marks suffix which will be sorted by enhanced induction sort. #ifdef SORT_16_BIT_SYMBOLS #define SYMBOL_TYPE unsigned short #else #define SYMBOL_TYPE unsigned char #endif class MSufSort { public: MSufSort(); virtual ~MSufSort(); unsigned int Sort(SYMBOL_TYPE * source, unsigned int sourceLength); unsigned int GetElapsedSortTime(); unsigned int GetMemoryUsage(); unsigned int ISA(unsigned int index); bool VerifySort(); static void ReverseAltSortOrder(SYMBOL_TYPE * data, unsigned int nBytes); private: int CompareStrings(SYMBOL_TYPE * stringA, SYMBOL_TYPE * stringB, int len); bool IsTandemRepeat2(); bool IsTandemRepeat(); void PassTandemRepeat(); bool IsSortedByInduction(); bool IsSortedByEnhancedInduction(unsigned int suffixIndex); void ProcessSuffixesSortedByInduction(); // MarkSuffixAsSorted // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. void MarkSuffixAsSorted(unsigned int suffixIndex, unsigned int & sortedIndex); void MarkSuffixAsSorted2(unsigned int suffixIndex, unsigned int & sortedIndex); void MarkSuffixAsSortedByEnhancedInductionSort(unsigned int suffixIndex); // PushNewChainsOntoStack: // Moves all new suffix chains onto the stack of partially sorted // suffixes. (makes them ready for further sub sorting). void PushNewChainsOntoStack(bool originalChains = false); void PushTandemBypassesOntoStack(); // OnSortedSuffix: // Event which is invoked with each sorted suffix at the time of // its sorting. virtual void OnSortedSuffix(unsigned int suffixIndex); // Initialize: // Initializes this object just before sorting begins. void Initialize(); // InitialSort: // This is the first sorting pass which makes the initial suffix // chains from the given source string. Pushes these chains onto // the stack for further sorting. void InitialSort(); // Value16: // Returns the two 8 bit symbols located // at positions N and N + 1 where N = the sourceIndex. unsigned short Value16(unsigned int sourceIndex); // ProcessChain: // Sorts the suffixes of a given chain by the next two symbols of // each suffix in the chain. This creates zero or more new suffix // chains with each sorted by two more symbols than the original // chain. Then pushes these new chains onto the chain stack for // further sorting. void ProcessNextChain(); void AddToSuffixChain(unsigned int suffixIndex, unsigned short suffixChain); void AddToSuffixChain(unsigned int firstSuffixIndex, unsigned int lastSuffixIndex, unsigned short suffixChain); void ProcessSuffixesSortedByEnhancedInduction(unsigned short suffixId); void ResolveTandemRepeatsNotSortedWithInduction(); unsigned int m_sortTime; Stack m_chainMatchLengthStack; Stack m_chainCountStack; Stack m_chainHeadStack; unsigned int m_endOfSuffixChain[0x10000]; unsigned int m_startOfSuffixChain[0x10000]; // m_source: // Address of the string to sort. SYMBOL_TYPE * m_source; // m_sourceLength: // The length of the string pointed to by m_source. unsigned int m_sourceLength; unsigned int m_sourceLengthMinusOne; // m_ISA: // The address of the working space which, when the sort is // completed, will contain the inverse suffix array for the // source string. unsigned int * m_ISA; // m_nextSortedSuffixValue: unsigned int m_nextSortedSuffixValue; // unsigned int m_numSortedSuffixes; // m_newChainIds // Array containing the valid chain numbers in m_newChain array. unsigned short m_newChainIds[0x10000]; // m_numNewChains: // The number of new suffix chain ids stored in m_numChainIds. unsigned int m_numNewChains; Stack m_suffixesSortedByInduction; unsigned int m_suffixMatchLength; unsigned int m_currentSuffixIndex; // m_firstSortedPosition: // For use with enhanced induction sorting. unsigned int m_firstSortedPosition[0x10000]; unsigned int m_firstSuffixByEnhancedInductionSort[0x10000]; unsigned int m_lastSuffixByEnhancedInductionSort[0x10000]; unsigned int m_currentSuffixChainId; #ifdef SHOW_PROGRESS // ShowProgress: // Update the progress indicator. void ShowProgress(); // m_nextProgressUpdate: // Indicates when to update the progress indicator. unsigned int m_nextProgressUpdate; // m_progressUpdateIncrement: // Indicates how many suffixes should be sorted before // incrementing the progress indicator. unsigned int m_progressUpdateIncrement; #endif // members used if alternate sorting order should be applied. SYMBOL_TYPE m_forwardAltSortOrder[256]; static SYMBOL_TYPE m_reverseAltSortOrder[256]; // for tandem repeat sorting bool m_hasTandemRepeatSortedByInduction; unsigned int m_firstUnsortedTandemRepeat; unsigned int m_lastUnsortedTandemRepeat; bool m_hasEvenLengthTandemRepeats; unsigned int m_tandemRepeatDepth; unsigned int m_firstSortedTandemRepeat; unsigned int m_lastSortedTandemRepeat; unsigned int m_tandemRepeatLength; }; //inline unsigned short MSufSort::Value16(unsigned int sourceIndex) //{ // return (sourceIndex < m_sourceLengthMinusOne) ? *(unsigned short *)(m_source + sourceIndex) : m_source[sourceIndex]; //} // fix by Brian Ripley inline unsigned short MSufSort::Value16(unsigned int sourceIndex) { union {unsigned short u; unsigned char b[2];} u16; u16.b[0] = m_source[sourceIndex]; u16.b[1] = (sourceIndex < m_sourceLengthMinusOne) ? m_source[sourceIndex + 1] : 0; return u16.u; } inline bool MSufSort::IsSortedByInduction() { unsigned int n = m_currentSuffixIndex + m_suffixMatchLength - 1; #ifndef USE_INDUCTION_SORTING if (n < m_sourceLengthMinusOne) return false; #endif if ((m_ISA[n] & SUFFIX_SORTED) && ((m_ISA[n] & 0x3fffffff) < m_nextSortedSuffixValue)) { InductionSortObject i(0, m_ISA[n], m_currentSuffixIndex); m_suffixesSortedByInduction.Push(i); } else if ((m_ISA[n + 1] & SUFFIX_SORTED) && ((m_ISA[n + 1] & 0x3fffffff) < m_nextSortedSuffixValue)) { InductionSortObject i(1, m_ISA[n + 1], m_currentSuffixIndex); m_suffixesSortedByInduction.Push(i); } else return false; return true; } inline bool MSufSort::IsSortedByEnhancedInduction(unsigned int suffixIndex) { if (suffixIndex > 0) if (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION) return true; return false; } inline bool MSufSort::IsTandemRepeat() { #ifndef USE_TANDEM_REPEAT_SORTING return false; #else if ((!m_tandemRepeatDepth) && (m_currentSuffixIndex + m_suffixMatchLength) == (m_ISA[m_currentSuffixIndex] + 1)) return true; #ifndef SORT_16_BIT_SYMBOLS if ((!m_tandemRepeatDepth) && ((m_currentSuffixIndex + m_suffixMatchLength) == (m_ISA[m_currentSuffixIndex]))) { m_hasEvenLengthTandemRepeats = true; return false; } #endif return false; #endif } inline void MSufSort::PassTandemRepeat() { unsigned int nextIndex; unsigned int lastIndex; // unsigned int firstIndex = m_currentSuffixIndex; while ((m_currentSuffixIndex + m_suffixMatchLength) == ((nextIndex = m_ISA[m_currentSuffixIndex]) + 1)) { lastIndex = m_currentSuffixIndex; m_currentSuffixIndex = nextIndex; } if (IsSortedByInduction()) { m_hasTandemRepeatSortedByInduction = true; m_currentSuffixIndex = m_ISA[m_currentSuffixIndex]; } else { if (m_firstUnsortedTandemRepeat == END_OF_CHAIN) m_firstUnsortedTandemRepeat = m_lastUnsortedTandemRepeat = lastIndex; else m_lastUnsortedTandemRepeat = (m_ISA[m_lastUnsortedTandemRepeat] = lastIndex); } } inline void MSufSort::PushNewChainsOntoStack(bool originalChains) { // Moves all new suffix chains onto the stack of partially sorted // suffixes. (makes them ready for further sub sorting). #ifdef SORT_16_BIT_SYMBOLS unsigned int newSuffixMatchLength = m_suffixMatchLength + 1; #else unsigned int newSuffixMatchLength = m_suffixMatchLength + 2; #endif if (m_numNewChains) { if (m_hasEvenLengthTandemRepeats) { m_chainCountStack.Push(m_numNewChains - 1); m_chainMatchLengthStack.Push(newSuffixMatchLength); m_chainCountStack.Push(1); m_chainMatchLengthStack.Push(newSuffixMatchLength - 1); } else { m_chainCountStack.Push(m_numNewChains); m_chainMatchLengthStack.Push(newSuffixMatchLength); } if (m_numNewChains > 1) IntroSort(m_newChainIds, m_numNewChains); while (m_numNewChains) { unsigned short chainId = m_newChainIds[--m_numNewChains]; chainId = ENDIAN_SWAP_16(chainId); // unsigned int n = m_startOfSuffixChain[chainId]; m_chainHeadStack.Push(m_startOfSuffixChain[chainId]); m_startOfSuffixChain[chainId] = END_OF_CHAIN; m_ISA[m_endOfSuffixChain[chainId]] = END_OF_CHAIN; } } m_hasEvenLengthTandemRepeats = false; if (m_firstUnsortedTandemRepeat != END_OF_CHAIN) { // Tandem repeats with a terminating suffix that did not get // sorted via induction has occurred (at least once). // We have a suffix chain (indicated by m_firstTandemRepeatWithoutSuffix) // of the suffix in each tandem repeat which immediately proceeded the // terminating suffix in each chain. We want to sort them relative to // each other and then process the tandem repeats. unsigned int tandemRepeatLength = m_suffixMatchLength - 1; unsigned int numChains = m_chainHeadStack.Count(); m_chainHeadStack.Push(m_firstUnsortedTandemRepeat); m_chainCountStack.Push(1); m_chainMatchLengthStack.Push((m_suffixMatchLength << 1) - 1); m_ISA[m_lastUnsortedTandemRepeat] = END_OF_CHAIN; m_firstUnsortedTandemRepeat = END_OF_CHAIN; m_tandemRepeatDepth = 1; while (m_chainHeadStack.Count() > numChains) ProcessNextChain(); m_suffixMatchLength = tandemRepeatLength + 1; ResolveTandemRepeatsNotSortedWithInduction(); m_tandemRepeatDepth = 0; } } inline void MSufSort::AddToSuffixChain(unsigned int suffixIndex, unsigned short suffixChain) { if (m_startOfSuffixChain[suffixChain] == END_OF_CHAIN) { m_endOfSuffixChain[suffixChain] = m_startOfSuffixChain[suffixChain] = suffixIndex; m_newChainIds[m_numNewChains++] = ENDIAN_SWAP_16(suffixChain); } else m_endOfSuffixChain[suffixChain] = m_ISA[m_endOfSuffixChain[suffixChain]] = suffixIndex; } inline void MSufSort::AddToSuffixChain(unsigned int firstSuffixIndex, unsigned int lastSuffixIndex, unsigned short suffixChain) { if (m_startOfSuffixChain[suffixChain] == END_OF_CHAIN) { m_startOfSuffixChain[suffixChain] = firstSuffixIndex; m_endOfSuffixChain[suffixChain] = lastSuffixIndex; m_newChainIds[m_numNewChains++] = ENDIAN_SWAP_16(suffixChain); } else { m_ISA[m_endOfSuffixChain[suffixChain]] = firstSuffixIndex; m_endOfSuffixChain[suffixChain] = lastSuffixIndex; } } inline void MSufSort::OnSortedSuffix(unsigned int suffixIndex) { // Event which is invoked with each sorted suffix at the time of // its sorting. m_numSortedSuffixes++; #ifdef SHOW_PROGRESS if (m_numSortedSuffixes >= m_nextProgressUpdate) { m_nextProgressUpdate += m_progressUpdateIncrement; ShowProgress(); } #endif } #ifdef SORT_16_BIT_SYMBOLS inline void MSufSort::MarkSuffixAsSorted(unsigned int suffixIndex, unsigned int & sortedIndex) { // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. if (m_tandemRepeatDepth) { // we are processing a list of suffixes which we the second to last in tandem repeats // that were not terminated via induction. These suffixes are not actually to be // marked as sorted yet. Instead, they are to be linked together in sorted order. if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = suffixIndex; else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = suffixIndex); return; } m_ISA[suffixIndex] = (sortedIndex++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif #ifdef USE_ENHANCED_INDUCTION_SORTING if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { suffixIndex--; unsigned short symbol = Value16(suffixIndex); m_ISA[suffixIndex] = (m_firstSortedPosition[symbol]++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { suffixIndex--; symbol = ENDIAN_SWAP_16(symbol); if (m_firstSuffixByEnhancedInductionSort[symbol] == END_OF_CHAIN) m_firstSuffixByEnhancedInductionSort[symbol] = m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; else { m_ISA[m_lastSuffixByEnhancedInductionSort[symbol]] = suffixIndex; m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; } } } #endif } inline void MSufSort::MarkSuffixAsSorted2(unsigned int suffixIndex, unsigned int & sortedIndex) { // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. if (m_tandemRepeatDepth) { // we are processing a list of suffixes which we the second to last in tandem repeats // that were not terminated via induction. These suffixes are not actually to be // marked as sorted yet. Instead, they are to be linked together in sorted order. if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = suffixIndex; else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = suffixIndex); return; } m_ISA[suffixIndex] = (sortedIndex++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif #ifdef USE_ENHANCED_INDUCTION_SORTING if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { unsigned short symbol = Value16(suffixIndex); symbol = ENDIAN_SWAP_16(symbol); suffixIndex--; if (m_firstSuffixByEnhancedInductionSort[symbol] == END_OF_CHAIN) m_firstSuffixByEnhancedInductionSort[symbol] = m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; else { m_ISA[m_lastSuffixByEnhancedInductionSort[symbol]] = suffixIndex; m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; } } #endif } #else inline void MSufSort::MarkSuffixAsSorted(unsigned int suffixIndex, unsigned int & sortedIndex) { // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. if (m_tandemRepeatDepth) { // we are processing a list of suffixes which we the second to last in tandem repeats // that were not terminated via induction. These suffixes are not actually to be // marked as sorted yet. Instead, they are to be linked together in sorted order. if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = suffixIndex; else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = suffixIndex); return; } m_ISA[suffixIndex] = (sortedIndex++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif #ifdef USE_ENHANCED_INDUCTION_SORTING if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { suffixIndex--; unsigned short symbol = Value16(suffixIndex); m_ISA[suffixIndex] = (m_firstSortedPosition[symbol]++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { suffixIndex--; unsigned short symbol2 = symbol; symbol = Value16(suffixIndex); m_ISA[suffixIndex] = (m_firstSortedPosition[symbol]++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { if (m_source[suffixIndex] < m_source[suffixIndex + 1]) symbol2 = ENDIAN_SWAP_16(symbol); else symbol2 = ENDIAN_SWAP_16(symbol2); suffixIndex--; if (m_firstSuffixByEnhancedInductionSort[symbol2] == END_OF_CHAIN) m_firstSuffixByEnhancedInductionSort[symbol2] = m_lastSuffixByEnhancedInductionSort[symbol2] = suffixIndex; else { m_ISA[m_lastSuffixByEnhancedInductionSort[symbol2]] = suffixIndex; m_lastSuffixByEnhancedInductionSort[symbol2] = suffixIndex; } } } } #endif } inline void MSufSort::MarkSuffixAsSorted2(unsigned int suffixIndex, unsigned int & sortedIndex) { // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. if (m_tandemRepeatDepth) { // we are processing a list of suffixes which we the second to last in tandem repeats // that were not terminated via induction. These suffixes are not actually to be // marked as sorted yet. Instead, they are to be linked together in sorted order. if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = suffixIndex; else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = suffixIndex); return; } m_ISA[suffixIndex] = (sortedIndex++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif #ifdef USE_ENHANCED_INDUCTION_SORTING if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { unsigned short symbol; if (m_source[suffixIndex] < m_source[suffixIndex + 1]) symbol = Value16(suffixIndex); else symbol = Value16(suffixIndex + 1); symbol = ENDIAN_SWAP_16(symbol); suffixIndex--; if (m_firstSuffixByEnhancedInductionSort[symbol] == END_OF_CHAIN) m_firstSuffixByEnhancedInductionSort[symbol] = m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; else { m_ISA[m_lastSuffixByEnhancedInductionSort[symbol]] = suffixIndex; m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; } } #endif } #endif inline void MSufSort::ProcessNextChain() { // Sorts the suffixes of a given chain by the next two symbols of // each suffix in the chain. This creates zero or more new suffix // chains with each sorted by two more symbols than the original // chain. Then pushes these new chains onto the chain stack for // further sorting. while (--m_chainCountStack.Top() < 0) { m_chainCountStack.Pop(); m_chainMatchLengthStack.Pop(); } m_suffixMatchLength = m_chainMatchLengthStack.Top(); m_currentSuffixIndex = m_chainHeadStack.Pop(); #ifdef USE_ENHANCED_INDUCTION_SORTING if (m_chainMatchLengthStack.Count() == 1) { // one of the original buckets from InitialSort(). This is important // when enhanced induction sorting is enabled. unsigned short chainId = Value16(m_currentSuffixIndex); unsigned short temp = chainId; chainId = ENDIAN_SWAP_16(chainId); while (m_currentSuffixChainId <= chainId) ProcessSuffixesSortedByEnhancedInduction(m_currentSuffixChainId++); m_nextSortedSuffixValue = m_firstSortedPosition[temp]; } #endif if (m_ISA[m_currentSuffixIndex] == END_OF_CHAIN) MarkSuffixAsSorted(m_currentSuffixIndex, m_nextSortedSuffixValue); // only one suffix in bucket so it is sorted. else { do { if (IsTandemRepeat()) PassTandemRepeat(); else if ((m_currentSuffixIndex != END_OF_CHAIN) && (IsSortedByInduction())) m_currentSuffixIndex = m_ISA[m_currentSuffixIndex]; else { unsigned int firstSuffixIndex = m_currentSuffixIndex; unsigned int lastSuffixIndex = m_currentSuffixIndex; unsigned short targetSymbol = Value16(m_currentSuffixIndex + m_suffixMatchLength); unsigned int nextSuffix; do { nextSuffix = m_ISA[lastSuffixIndex = m_currentSuffixIndex]; if ((m_currentSuffixIndex = nextSuffix) == END_OF_CHAIN) break; else if (IsTandemRepeat()) { PassTandemRepeat(); break; } else if (IsSortedByInduction()) { m_currentSuffixIndex = m_ISA[nextSuffix]; break; } } while (Value16(m_currentSuffixIndex + m_suffixMatchLength) == targetSymbol); AddToSuffixChain(firstSuffixIndex, lastSuffixIndex, targetSymbol); } } while (m_currentSuffixIndex != END_OF_CHAIN); ProcessSuffixesSortedByInduction(); PushNewChainsOntoStack(); } } inline void MSufSort::ProcessSuffixesSortedByInduction() { unsigned int numSuffixes = m_suffixesSortedByInduction.Count(); if (numSuffixes) { InductionSortObject * objects = m_suffixesSortedByInduction.m_stack; if (numSuffixes > 1) IntroSort(objects, numSuffixes); if (m_hasTandemRepeatSortedByInduction) { // During the last pass some suffixes which were sorted via induction were also // determined to be the terminal suffix in a tandem repeat. So when we mark // the suffixes as sorted (where were sorted via induction) we make chain together // the preceding suffix in the tandem repeat (if there is one). unsigned int firstTandemRepeatIndex = END_OF_CHAIN; unsigned int lastTandemRepeatIndex = END_OF_CHAIN; unsigned int tandemRepeatLength = m_suffixMatchLength - 1; m_hasTandemRepeatSortedByInduction = false; for (unsigned int i = 0; i < numSuffixes; i++) { unsigned int suffixIndex = (objects[i].m_sortValue[1] & 0x3fffffff); if ((suffixIndex >= tandemRepeatLength) && (m_ISA[suffixIndex - tandemRepeatLength] == suffixIndex)) { // this suffix was a terminating suffix in a tandem repeat. // add the preceding suffix in the tandem repeat to the list. if (firstTandemRepeatIndex == END_OF_CHAIN) firstTandemRepeatIndex = lastTandemRepeatIndex = (suffixIndex - tandemRepeatLength); else lastTandemRepeatIndex = (m_ISA[lastTandemRepeatIndex] = (suffixIndex - tandemRepeatLength)); } MarkSuffixAsSorted(suffixIndex, m_nextSortedSuffixValue); } // now process each suffix in the tandem repeat list making each as sorted. // build a new list for tandem repeats which preceded each in the list until there are // no preceding tandem suffix for any suffix in the list. while (firstTandemRepeatIndex != END_OF_CHAIN) { m_ISA[lastTandemRepeatIndex] = END_OF_CHAIN; unsigned int suffixIndex = firstTandemRepeatIndex; firstTandemRepeatIndex = END_OF_CHAIN; while (suffixIndex != END_OF_CHAIN) { if ((suffixIndex >= tandemRepeatLength) && (m_ISA[suffixIndex - tandemRepeatLength] == suffixIndex)) { // this suffix was a terminating suffix in a tandem repeat. // add the preceding suffix in the tandem repeat to the list. if (firstTandemRepeatIndex == END_OF_CHAIN) firstTandemRepeatIndex = lastTandemRepeatIndex = (suffixIndex - tandemRepeatLength); else lastTandemRepeatIndex = (m_ISA[lastTandemRepeatIndex] = (suffixIndex - tandemRepeatLength)); } unsigned int nextSuffix = m_ISA[suffixIndex]; MarkSuffixAsSorted(suffixIndex, m_nextSortedSuffixValue); suffixIndex = nextSuffix; } } // finished. } else { // This is the typical branch on the condition. There were no tandem repeats // encountered during the last chain that were terminated with a suffix that // was sorted via induction. In this case we just mark the suffixes as sorted // and we are done. for (unsigned int i = 0; i < numSuffixes; i++) MarkSuffixAsSorted(objects[i].m_sortValue[1] & 0x3fffffff, m_nextSortedSuffixValue); } m_suffixesSortedByInduction.Clear(); } } inline void MSufSort::ProcessSuffixesSortedByEnhancedInduction(unsigned short suffixId) { // if (m_firstSuffixByEnhancedInductionSort[suffixId] != END_OF_CHAIN) { unsigned int currentSuffixIndex = m_firstSuffixByEnhancedInductionSort[suffixId]; unsigned int lastSuffixIndex = m_lastSuffixByEnhancedInductionSort[suffixId]; m_firstSuffixByEnhancedInductionSort[suffixId] = END_OF_CHAIN; m_lastSuffixByEnhancedInductionSort[suffixId] = END_OF_CHAIN; do { unsigned short symbol = Value16(currentSuffixIndex); unsigned int nextIndex = m_ISA[currentSuffixIndex]; MarkSuffixAsSorted2(currentSuffixIndex, m_firstSortedPosition[symbol]); if (currentSuffixIndex == lastSuffixIndex) { if (m_firstSuffixByEnhancedInductionSort[suffixId] == END_OF_CHAIN) return; currentSuffixIndex = m_firstSuffixByEnhancedInductionSort[suffixId]; lastSuffixIndex = m_lastSuffixByEnhancedInductionSort[suffixId]; m_firstSuffixByEnhancedInductionSort[suffixId] = END_OF_CHAIN; m_lastSuffixByEnhancedInductionSort[suffixId] = END_OF_CHAIN; } else currentSuffixIndex = nextIndex; } while (true); } } #ifdef SHOW_PROGRESS inline void MSufSort::ShowProgress() { // Update the progress indicator. //double p = ((double)(m_numSortedSuffixes & 0x3fffffff) / m_sourceLength) * 100; // printf("Progress: %.2f%% %c", p, 13); } #endif #endif kernlab/src/dprsrch.c0000644000175100001440000001041611304023134014277 0ustar hornikusers#include #include #include extern double mymin(double, double); extern double mymax(double, double); extern void *xmalloc(size_t); /* LEVEL 1 BLAS */ /*extern double ddot_(int *, double *, int *, double *, int *);*/ /*extern int daxpy_(int *, double *, double *, int *, double *, int *);*/ /* LEVEL 2 BLAS */ /*extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *);*/ /* MINPACK 2 */ extern void dbreakpt(int, double *, double *, double *, double *, int *, double *, double *); extern void dgpstep(int, double *, double *, double *, double, double *, double *); void dprsrch(int n, double *x, double *xl, double *xu, double *A, double *g, double *w) { /* c ********** c c Subroutine dprsrch c c This subroutine uses a projected search to compute a step c that satisfies a sufficient decrease condition for the quadratic c c q(s) = 0.5*s'*A*s + g'*s, c c where A is a symmetric matrix and g is a vector. Given the c parameter alpha, the step is c c s[alpha] = P[x + alpha*w] - x, c c where w is the search direction and P the projection onto the c n-dimensional interval [xl,xu]. The final step s = s[alpha] c satisfies the sufficient decrease condition c c q(s) <= mu_0*(g'*s), c c where mu_0 is a constant in (0,1). c c The search direction w must be a descent direction for the c quadratic q at x such that the quadratic is decreasing c in the ray x + alpha*w for 0 <= alpha <= 1. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is set to the final point P[x + alpha*w]. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c A is a double precision array of dimension n*n. c On entry A specifies the matrix A c On exit A is unchanged. c c g is a double precision array of dimension n. c On entry g specifies the vector g. c On exit g is unchanged. c c w is a double prevision array of dimension n. c On entry w specifies the search direction. c On exit w is the step s[alpha]. c c ********** */ double one = 1, zero = 0; /* Constant that defines sufficient decrease. */ /* Interpolation factor. */ double mu0 = 0.01, interpf = 0.5; double *wa1 = (double *) xmalloc(sizeof(double)*n); double *wa2 = (double *) xmalloc(sizeof(double)*n); /* Set the initial alpha = 1 because the quadratic function is decreasing in the ray x + alpha*w for 0 <= alpha <= 1 */ double alpha = 1, brptmin, brptmax, gts, q; int search = 1, nbrpt, nsteps = 0, i, inc = 1; /* Find the smallest break-point on the ray x + alpha*w. */ dbreakpt(n, x, xl, xu, w, &nbrpt, &brptmin, &brptmax); /* Reduce alpha until the sufficient decrease condition is satisfied or x + alpha*w is feasible. */ while (search && alpha > brptmin) { /* Calculate P[x + alpha*w] - x and check the sufficient decrease condition. */ nsteps++; dgpstep(n, x, xl, xu, alpha, w, wa1); F77_CALL(dsymv)("U", &n, &one, A, &n, wa1, &inc, &zero, wa2, &inc); gts = F77_CALL(ddot)(&n, g, &inc, wa1, &inc); q = 0.5*F77_CALL(ddot)(&n, wa1, &inc, wa2, &inc) + gts; if (q <= mu0*gts) search = 0; else /* This is a crude interpolation procedure that will be replaced in future versions of the code. */ alpha *= interpf; } /* Force at least one more constraint to be added to the active set if alpha < brptmin and the full step is not successful. There is sufficient decrease because the quadratic function is decreasing in the ray x + alpha*w for 0 <= alpha <= 1. */ if (alpha < 1 && alpha < brptmin) alpha = brptmin; /* Compute the final iterate and step. */ dgpstep(n, x, xl, xu, alpha, w, wa1); F77_CALL(daxpy)(&n, &alpha, w, &inc, x, &inc); for (i=0;i * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/StringKernel.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 // 10 Aug 2006 // 11 Oct 2006 #ifndef STRINGKERNEL_CPP #define STRINGKERNEL_CPP #include #include #include #include #include #include #include #include #include "stringkernel.h" StringKernel::StringKernel(): esa(0), weigher(0), val(0), lvs(0) {} /** * Construct string kernel given constructed enhanced suffix array. * * \param esa_ - ESA instance. */ StringKernel::StringKernel(ESA *esa_, int weightfn, Real param, int verb): esa(esa_), val(new Real[esa_->size + 1]), lvs(0), _verb(verb) { switch (weightfn) { case CONSTANT: weigher = new ConstantWeight(); break; case EXPDECAY: weigher = new ExpDecayWeight(param); break; case KSPECTRUM: weigher = new KSpectrumWeight(param); break; case BOUNDRANGE: weigher = new BoundedRangeWeight(param); break; default: weigher = new ConstantWeight(); //int nothing = 0; } } /** * Construct string kernel when given only text and its length. * * \param text - (IN) The text which SuffixArray and StringKernel correspond to. * \param text_length - (IN) The length of #_text#. * \param verb - (IN) Verbosity level. */ StringKernel::StringKernel(const UInt32 &size, SYMBOL *text, int weightfn, Real param, int verb): lvs(0), _verb(verb) { // Build ESA. esa = new ESA(size, text, verb); // Allocate memory space for #val# val = new Real[esa->size + 1]; // Instantiate weigher. switch (weightfn) { case CONSTANT: weigher = new ConstantWeight(); break; case EXPDECAY: weigher = new ExpDecayWeight(param); break; case KSPECTRUM: weigher = new KSpectrumWeight(param); break; case BOUNDRANGE: weigher = new BoundedRangeWeight(param); break; default: weigher = new ConstantWeight(); //int nothing = 0; } } /** * StringKernel destructor. * */ StringKernel::~StringKernel() { //' Delete objects and release allocated memory space. if (esa) { delete esa; esa = 0; } if (val) { delete [] val; val = 0; } if (lvs) { delete [] lvs; lvs = 0; } if (weigher) { delete weigher; weigher = 0; } } /** * An Iterative auxiliary function used in PrecomputeVal(). * * Note: Every lcp-interval can be represented by its first l-index. * Hence, 'val' is stored in val[] at the index := first l-index. * * Pre: val[] is initialised to 0. * * @param left Left bound of current interval * @param right Right bound of current interval */ void StringKernel::IterativeCompute(const UInt32 &left, const UInt32 &right) { //std::cout << "In IterativeCompute() " << std::endl; //' Variables queue > q; vector > childlist; pair p; UInt32 lb = 0; UInt32 rb = 0; UInt32 floor_len = 0; UInt32 x_len = 0; Real cur_val = 0.0; Real edge_weight = 0.0; //' Step 1: At root, 0-[0..size-1]. Store all non-single child-intervals onto #q#. lb = left; //' Should be equal to 0. rb = right; //' Should be equal to size-1. esa->GetChildIntervals(lb, rb, childlist); for (UInt32 jj = 0; jj < childlist.size(); jj++) q.push(childlist[jj]); //' Step 2: Do breadth-first traversal. For every interval, compute val and add //' it to all its non-singleton child-intervals' val-entries in val[]. //' Start with child-interval [i..j] of 0-[0..size-1]. //' assert(j != size-1) while (!q.empty()) { //' Step 2.1: Get an interval from queue, #q#. p = q.front(); q.pop(); //' step 2.2: Get the lcp of floor interval. UInt32 a = 0, b = 0; a = esa->lcptab[p.first]; //svnvish: BUGBUG // Glorious hack. We have to remove it later. // This gives the lcp of parent interval if (p.second < esa->size - 1) { b = esa->lcptab[p.second + 1]; } else { b = 0; } floor_len = (a > b) ? a : b; //' Step 2.3: Get the lcp of current interval. esa->GetLcp(p.first, p.second, x_len); //' Step 2.4: Compute val of current interval. weigher->ComputeWeight(floor_len, x_len, edge_weight); cur_val = edge_weight * (lvs[p.second + 1] - lvs[p.first]); //' Step 2.5: Add #cur_val# to val[]. UInt32 firstlIndex1 = 0; esa->childtab.l_idx(p.first, p.second, firstlIndex1); val[firstlIndex1] += cur_val; // std::cout << "p.first:"<GetChildIntervals(p.first, p.second, childlist); //' Step 2.7: (a) Add #cur_val# to child-intervals' val-entries in val[]. //' (b) Push child-interval onto #q#. for (UInt32 kk = 0; kk < childlist.size(); kk++) { //' (a) UInt32 firstlIndex2 = 0; pair tmp_p = childlist[kk]; if (esa->text[esa->suftab[tmp_p.first]] == SENTINEL) continue; esa->childtab.l_idx(tmp_p.first, tmp_p.second, firstlIndex2); // assert( val[firstlIndex2] == 0 ); val[firstlIndex2] = val[firstlIndex1]; // cur_val; //' (b) q.push(make_pair(tmp_p.first, tmp_p.second)); } } //std::cout << "Out IterativeCompute() " << std::endl; } /** * Precomputation of val(t) of string kernel. * Observation :Every internal node of a suffix tree can be represented by at * least one index of the corresponding lcp array. So, the val * of a node is stored in val[] at the index corresponding to that of * the fist representative lcp value in lcp[]. */ void StringKernel::PrecomputeVal() { //' Memory space requirement check. assert(val != 0); //' Initialise all val entries to zero! memset(val, 0, sizeof(Real)*esa->size + 1); //' Start iterative precomputation of val[] IterativeCompute(0, esa->size - 1); } /** * Compute k(text,x) by performing Chang and Lawler's matching statistics collection * algorithm on the enhanced suffix array. * * \param x - (IN) The input string which is to be evaluated together with * the text in esa. * \param x_len - (IN) The length of #x#. * \param value - (IN) The value of k(x,x'). */ void StringKernel::Compute_K(SYMBOL *x, const UInt32 &x_len, Real &value) { //' Variables UInt32 floor_i = 0; UInt32 floor_j = 0; UInt32 i = 0; UInt32 j = 0; UInt32 lb = 0; UInt32 rb = 0; UInt32 matched_len = 0; UInt32 offset = 0; UInt32 floor_len = 0; UInt32 firstlIndex = 0; Real edge_weight = 0.0; //' Initialisation value = 0.0; lb = 0; rb = esa->size - 1; //' for each suffix, xprime[k..xprime_len-1], find longest match in text for (UInt32 k = 0; k < x_len; k++) { //' Step 1: Matching esa->ExactSuffixMatch(lb, rb, offset, &x[k], x_len - k, i, j, matched_len, floor_i, floor_j, floor_len); //' Step 2: Get suffix link for [floor_i..floor_j] esa->GetSuflink(floor_i, floor_j, lb, rb); assert((floor_j - floor_i) <= (rb - lb)); //' Range check //' Step 3: Compute contribution of this matched substring esa->childtab.l_idx(floor_i, floor_j, firstlIndex); assert(firstlIndex > floor_i && firstlIndex <= floor_j); assert(floor_len <= matched_len); weigher->ComputeWeight(floor_len, matched_len, edge_weight); value += val[firstlIndex] + edge_weight * (lvs[j + 1] - lvs[i]); // std::cout << "i:"<size); //' Allocate memory space for lvs[] lvs = new (nothrow) Real[esa->size + 1]; assert(lvs); //' Assign leaf weight to lvs element according to its position in text. for (UInt32 j = 0; j < esa->size; j++) { pos = esa->suftab[j]; UInt32 *p = upper_bound(clen, clen + m, pos); //' O(log n) lvs[j + 1] = leafWeight[p - clen]; } //' Compute cumulative lvs[]. To be used in matching statistics computation later. lvs[0] = 0.0; partial_sum(lvs, lvs + esa->size + 1, lvs); //chteo: [101006] delete [] clen; clen = 0; } /** * Set lvs[i] = i, for i = 0 to esa->size * Memory space for lvs[] will be allocated. */ void StringKernel::Set_Lvs() { //' Clean up previous lvs, if any. if (lvs) { delete lvs; lvs = 0; } //' Allocate memory space for lvs[] lvs = new (nothrow) Real[esa->size + 1]; //' Check if memory correctly allocated. assert(lvs != 0); //' Range := [0..esa->size] UInt32 localsize = esa->size; for (UInt32 i = 0; i <= localsize; i++) lvs[i] = i; } #endif #include #include #include extern "C" { SEXP stringtv(SEXP rtext, // text document SEXP ltext, // list or vector of text documents to compute kvalues against SEXP nltext, // number of text documents in ltext SEXP vnchar, // number of characters in text SEXP vnlchar, // characters per document in ltext SEXP stype, // type of kernel SEXP param) // parameter for kernel { // R interface for text and list of text computation. Should return a vector of computed kernel values. // Construct ESASK UInt32 text_size = *INTEGER(vnchar); int number_ltext = *INTEGER(nltext); unsigned int *ltext_size = (unsigned int *) malloc (sizeof(unsigned int) * number_ltext); memcpy(ltext_size, INTEGER(vnlchar), number_ltext*sizeof(int)); int weightfn = *INTEGER(stype); const char *text = CHAR(STRING_ELT(rtext,0)); Real kparam = *REAL(param); double kVal; SEXP alpha; PROTECT(alpha = allocVector(REALSXP, number_ltext)); // Check if stringlength reported from R is correct if(strlen(text)!= text_size) text_size= strlen(text); StringKernel sk(text_size, (SYMBOL*)text, (weightfn - 1), kparam, 0); sk.Set_Lvs(); sk.PrecomputeVal(); for (int i=0; i * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/DataType.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef DATATYPE_H #define DATATYPE_H // #define UInt32 unsigned int // #define UInt64 unsigned long long // #define Byte1 unsigned char // #define Byte2 unsigned short // #define Real double typedef unsigned int UInt32; // Seems that even using __extension__ g++ 4.6 will complain that // ISO C++ 1998 does not support 'long long' ... /* #if defined __GNUC__ && __GNUC__ >= 2 __extension__ typedef unsigned long long UInt64; #else typedef unsigned long long UInt64; #endif */ #include typedef uint64_t UInt64; typedef unsigned char Byte1; typedef unsigned short Byte2; typedef double Real; // #define SENTINEL '\n' // #define SENTINEL2 '\0' const char SENTINEL = '\n'; const char SENTINEL2 = '\0'; #ifndef UNICODE // # define SYMBOL Byte1 typedef Byte1 SYMBOL; #else // # define SYMBOL Byte2 typedef Byte2 SYMBOL; #endif #endif kernlab/src/inductionsort.cpp0000644000175100001440000000264612234152620016112 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #include "inductionsort.h" InductionSortObject::InductionSortObject(unsigned int inductionPosition, unsigned int inductionValue, unsigned int suffixIndex) { // sort value is 64 bits long. // bits are ... // 63 - 60: induction position (0 - 15) // 59 - 29: induction value at induction position (0 - (2^30 -1)) // 28 - 0: suffix index for the suffix sorted by induction (0 - (2^30) - 1) m_sortValue[0] = inductionPosition << 28; m_sortValue[0] |= ((inductionValue & 0x3fffffff) >> 2); m_sortValue[1] = (inductionValue << 30); m_sortValue[1] |= suffixIndex; } kernlab/src/wkasailcp.cpp0000644000175100001440000000452112234152620015156 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/W_kasai_lcp.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef W_KASAI_LCP_CPP #define W_KASAI_LCP_CPP #include "wkasailcp.h" #include /** * Compute LCP array. Algorithm adapted from Manzini's SWAT2004 paper. * Modification: array indexing changed from 1-based to 0-based. * * \param text - (IN) The text which corresponds to SA. * \param len - (IN) Length of text. * \param sa - (IN) Suffix array. * \param lcp - (OUT) Computed LCP array. */ ErrorCode W_kasai_lcp::ComputeLCP(const SYMBOL *text, const UInt32 &len, const UInt32 *sa, LCP& lcp) { //chteo: [111006:0141] //std::vector isa(len); UInt32 *isa = new UInt32[len]; //' Step 1: Compute inverse suffix array for(UInt32 i=0; i0) h--; } //chteo: [111006:0141] delete [] isa; isa = 0; return NOERROR; } #endif kernlab/src/introsort.h0000644000175100001440000001560012234152620014710 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #ifndef TERNARY_INTRO_SORT_H #define TERNARY_INTRO_SORT_H //======================================================================// // Class: IntroSort // // // // Template based implementation of Introspective sorting algorithm // // using a ternary quicksort. // // // // Author: M.A. Maniscalco // // Date: January 20, 2005 // // // //======================================================================// // *** COMPILER WARNING DISABLED *** // Disable a warning which appears in MSVC // "conversion from '__w64 int' to ''" // Just plain annoying ... Restored at end of this file. #ifdef WIN32 #pragma warning (disable : 4244) #endif #define MIN_LENGTH_FOR_QUICKSORT 32 #define MAX_DEPTH_BEFORE_HEAPSORT 128 //===================================================================== // IntroSort class declaration // Notes: Any object used with this class must implement the following // the operators: <=, >=, == //===================================================================== template void IntroSort(T * array, unsigned int count); template void Partition(T * left, unsigned int count, unsigned int depth = 0); template T SelectPivot(T value1, T value2, T value3); template void Swap(T * valueA, T * valueB); template void InsertionSort(T * array, unsigned int count); template void HeapSort(T * array, int length); template void HeapSort(T * array, int k, int N); template inline void IntroSort(T * array, unsigned int count) { // Public method used to invoke the sort. // Call quick sort partition method if there are enough // elements to warrant it or insertion sort otherwise. if (count >= MIN_LENGTH_FOR_QUICKSORT) Partition(array, count); InsertionSort(array, count); } template inline void Swap(T * valueA, T * valueB) { // do the ol' "switch-a-me-do" on two values. T temp = *valueA; *valueA = *valueB; *valueB = temp; } template inline T SelectPivot(T value1, T value2, T value3) { // middle of three method. if (value1 < value2) return ((value2 < value3) ? value2 : (value1 < value3) ? value3 : value1); return ((value1 < value3) ? value1 : (value2 < value3) ? value3 : value2); } template inline void Partition(T * left, unsigned int count, unsigned int depth) { if (++depth > MAX_DEPTH_BEFORE_HEAPSORT) { // If enough recursion has happened then we bail to heap sort since it looks // as if we are experiencing a 'worst case' for quick sort. This should not // happen very often at all. HeapSort(left, count); return; } T * right = left + count - 1; T * startingLeft = left; T * startingRight = right; T * equalLeft = left; T * equalRight = right; // select the pivot value. T pivot = SelectPivot(left[0], right[0], left[((right - left) >> 1)]); // do three way partitioning. do { while ((left < right) && (*left <= pivot)) if (*(left++) == pivot) Swap(equalLeft++, left - 1); // equal to pivot value. move to far left. while ((left < right) && (*right >= pivot)) if (*(right--) == pivot) Swap(equalRight--, right + 1); // equal to pivot value. move to far right. if (left >= right) { if (left == right) { if (*left >= pivot) left--; if (*right <= pivot) right++; } else { left--; right++; } break; // done partitioning } // left and right are ready for swaping Swap(left++, right--); } while (true); // move values that were equal to pivot from the far left into the middle. // these values are now placed in their final sorted position. if (equalLeft > startingLeft) while (equalLeft > startingLeft) Swap(--equalLeft, left--); // move values that were equal to pivot from the far right into the middle. // these values are now placed in their final sorted position. if (equalRight < startingRight) while (equalRight < startingRight) Swap(++equalRight, right++); // Calculate new partition sizes ... unsigned int leftSize = left - startingLeft + 1; unsigned int rightSize = startingRight - right + 1; // Partition left (less than pivot) if there are enough values to warrant it // otherwise do insertion sort on the values. if (leftSize >= MIN_LENGTH_FOR_QUICKSORT) Partition(startingLeft, leftSize, depth); // Partition right (greater than pivot) if there are enough values to warrant it // otherwise do insertion sort on the values. if (rightSize >= MIN_LENGTH_FOR_QUICKSORT) Partition(right, rightSize, depth); } template inline void InsertionSort(T * array, unsigned int count) { // A basic insertion sort. if (count < 3) { if ((count == 2) && (array[0] > array[1])) Swap(array, array + 1); return; } T * ptr2, * ptr3 = array + 1, * ptr4 = array + count; if (array[0] > array[1]) Swap(array, array + 1); while (true) { while ((++ptr3 < ptr4) && (ptr3[0] >= ptr3[-1])); if (ptr3 >= ptr4) break; if (ptr3[-2] <= ptr3[0]) { if (ptr3[-1] > ptr3[0]) Swap(ptr3, ptr3 - 1); } else { ptr2 = ptr3 - 1; T v = *ptr3; while ((ptr2 >= array) && (ptr2[0] > v)) { ptr2[1] = ptr2[0]; ptr2--; } ptr2[1] = v; } } } template inline void HeapSort(T * array, int length) { // A basic heapsort. for (int k = length >> 1; k > 0; k--) HeapSort(array, k, length); do { Swap(array, array + (--length)); HeapSort(array, 1, length); } while (length > 1); } template inline void HeapSort(T * array, int k, int N) { // A basic heapsort. T temp = array[k - 1]; int n = N >> 1; int j = (k << 1); while (k <= n) { if ((j < N) && (array[j - 1] < array[j])) j++; if (temp >= array[j - 1]) break; else { array[k - 1] = array[j - 1]; k = j; j <<= 1; } } array[k - 1] = temp; } // Restore the default warning which appears in MSVC for // warning #4244 which was disabled at top of this file. #ifdef WIN32 #pragma warning (default : 4244) #endif #endif kernlab/src/ctable.cpp0000644000175100001440000000661712234152620014442 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ChildTable.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef CTABLE_CPP #define CTABLE_CPP #include "ctable.h" #include /** * Return the value of idx-th "up" field of child table. * val = childtab[idx -1]; * * \param idx - (IN) The index of child table. * \param val - (OUT) The value of idx-th entry in child table's "up" field. */ ErrorCode ChildTable::up(const UInt32 &idx, UInt32 &val){ if(idx == size()) { // Special case: To get the first 0-index val = (*this)[idx-1]; return NOERROR; } // svnvish: BUGBUG // Do we need to this in production code? UInt32 lcp_idx = 0, lcp_prev_idx = 0; lcp_idx = _lcptab[idx]; lcp_prev_idx = _lcptab[idx-1]; assert(lcp_prev_idx > lcp_idx); val = (*this)[idx-1]; return NOERROR; } /** * Return the value of idx-th "down" field of child table. Deprecated. * Instead use val = childtab[idx]; * * \param idx - (IN) The index of child table. * \param val - (OUT) The value of idx-th entry in child table's "down" field. */ ErrorCode ChildTable::down(const UInt32 &idx, UInt32 &val){ // For a l-interval, l-[i..j], childtab[i].down == childtab[j+1].up // If l-[i..j] is last child-interval of its parent OR 0-[0..n], // childtab[i].nextlIndex == childtab[i].down // svnvish: BUGBUG // Do we need to this in production code? // UInt32 lcp_idx = 0, lcp_nextidx = 0; // lcp_nextidx = _lcptab[(*this)[idx]]; // lcp_idx = _lcptab[idx]; // assert(lcp_nextidx > lcp_idx); // childtab[i].down := childtab[i].nextlIndex val = (*this)[idx]; return NOERROR; } /** * Return the first l-index of a given l-[i..j] interval. * * \param i - (IN) Left bound of l-[i..j] * \param j - (IN) Right bound of l-[i..j] * \param idx - (OUT) The first l-index. */ ErrorCode ChildTable::l_idx(const UInt32 &i, const UInt32 &j, UInt32 &idx){ UInt32 up = (*this)[j]; if(i < up && up <= j){ idx = up; }else { idx = (*this)[i]; } return NOERROR; } /** * Dump array elements to output stream * * \param os - (IN) Output stream. * \param ct - (IN) ChildTable object. */ std::ostream& operator << (std::ostream& os, const ChildTable& ct){ for( UInt32 i = 0; i < ct.size(); i++ ){ os << "ct[ " << i << "]: " << ct[i] << std::endl; } return os; } #endif kernlab/src/misc.c0000644000175100001440000000055311304023134013566 0ustar hornikusers#include #include void *xmalloc(size_t size) { void *ptr = (void *) malloc(size); return ptr; } double mymax(double a, double b) { if (a > b) return a; return b; } double mymin(double a, double b) { if (a < b) return a; return b; } double sign(double a, double b) { if (b >= 0) return fabs(a); return -fabs(a); } kernlab/src/kspectrumweight.h0000644000175100001440000000326212234152620016073 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/KSpectrumWeight.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef KSPECTRUMWEIGHT_H #define KSPECTRUMWEIGHT_H #include "datatype.h" #include "errorcode.h" #include "iweightfactory.h" #include //' K-spectrum weight class class KSpectrumWeight : public I_WeightFactory { Real k; public: /// Constructor KSpectrumWeight(const Real & k_=5.0):k(k_) {} /// Destructor virtual ~KSpectrumWeight(){} /// Compute weight ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight); }; #endif kernlab/src/wmsufsort.cpp0000644000175100001440000000442512234152620015254 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/W_msufsort.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 //' Wrapper for Michael Maniscalco's MSufSort version 2.2 algorithm #ifndef W_MSUFSORT_CPP #define W_MSUFSORT_CPP #include #include #include #include "wmsufsort.h" W_msufsort::W_msufsort() { msuffixsorter = new MSufSort(); } W_msufsort::~W_msufsort() { delete msuffixsorter; } /** * Construct Suffix Array using Michael Maniscalco's algorithm * * \param _text - (IN) The text which resultant SA corresponds to. * \param _len - (IN) The length of the text. * \param _sa - (OUT) Suffix array instance. */ ErrorCode W_msufsort::ConstructSA(SYMBOL *text, const UInt32 &len, UInt32 *&array){ //' A temporary copy of text SYMBOL *text_copy = new SYMBOL[len]; //' chteo: BUGBUG //' redundant? assert(text_copy != NULL); memcpy(text_copy, text, sizeof(SYMBOL) * len); msuffixsorter->Sort(text_copy, len); //' Code adapted from MSufSort::verifySort() for (UInt32 i = 0; i < len; i++) { UInt32 tmp = msuffixsorter->ISA(i)-1; array[tmp] = i; } //' Deallocate the memory allocated for #text_copy# delete [] text_copy; return NOERROR; } #endif kernlab/src/dgpstep.c0000644000175100001440000000275111304023134014303 0ustar hornikusersvoid dgpstep(int n, double *x, double *xl, double *xu, double alpha, double *w, double *s) { /* c ********** c c Subroutine dgpstep c c This subroutine computes the gradient projection step c c s = P[x + alpha*w] - x, c c where P is the projection on the n-dimensional interval [xl,xu]. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is unchanged. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c alpha is a double precision variable. c On entry alpha specifies the scalar alpha. c On exit alpha is unchanged. c c w is a double precision array of dimension n. c On entry w specifies the vector w. c On exit w is unchanged. c c s is a double precision array of dimension n. c On entry s need not be specified. c On exit s contains the gradient projection step. c c ********** */ int i; for (i=0;i xu[i]) s[i] = xu[i] - x[i]; else s[i] = alpha*w[i]; } kernlab/src/stringkernel.h0000644000175100001440000000542612761213650015367 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/StringKernel.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 // 10 Aug 2006 #ifndef STRINGKERNEL_H #define STRINGKERNEL_H #include "datatype.h" #include "errorcode.h" #include "esa.h" #include "isafactory.h" #include "ilcpfactory.h" #include "iweightfactory.h" //#include "W_msufsort.h" #include "wkasailcp.h" #include "cweight.h" #include "expdecayweight.h" #include "brweight.h" #include "kspectrumweight.h" //' Types of substring weighting functions enum WeightFunction{CONSTANT, EXPDECAY, KSPECTRUM, BOUNDRANGE}; using namespace std; class StringKernel { public: /// Variables ESA *esa; I_WeightFactory *weigher; Real *val; //' val array. Storing precomputed val(t) values. Real *lvs; //' leaves array. Storing weights for leaves. /// Constructors StringKernel(); //' Given contructed suffix array StringKernel(ESA *esa_, int weightfn, Real param, int verb=INFO); //' Given text, build suffix array for it StringKernel(const UInt32 &size, SYMBOL *text, int weightfn, Real param, int verb=INFO); /// Destructor virtual ~StringKernel(); //' Methods /// Precompute the contribution of each intervals (or internal nodes) void PrecomputeVal(); /// Compute Kernel matrix void Compute_K(SYMBOL *xprime, const UInt32 &xprime_len, Real &value); /// Set leaves array, lvs[] void Set_Lvs(const Real *leafWeight, const UInt32 *len, const UInt32 &m); /// Set leaves array as lvs[i]=i for i=0 to esa->length void Set_Lvs(); private: int _verb; /// An iterative auxiliary function used in PrecomputeVal() void IterativeCompute(const UInt32 &left, const UInt32 &right); }; #endif kernlab/src/msufsort.cpp0000644000175100001440000002410012774377717015106 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #include "msufsort.h" #include #include #include #include //============================================================================= // MSufSort. //============================================================================= SYMBOL_TYPE MSufSort::m_reverseAltSortOrder[256]; // chteo: Changed the member initialisation order to get rid of compilation warning [181006] // MSufSort::MSufSort():m_ISA(0), m_chainHeadStack(8192, 0x20000, true), m_suffixesSortedByInduction(120000, 1000000, true), // m_chainMatchLengthStack(8192, 0x10000, true), m_chainCountStack(8192, 0x10000, true) MSufSort::MSufSort():m_chainMatchLengthStack(8192, 0x10000, true), m_chainCountStack(8192, 0x10000, true), m_chainHeadStack(8192, 0x20000, true), m_ISA(0), m_suffixesSortedByInduction(120000, 1000000, true) { // constructor. unsigned char array[10] = {'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'}; int n = 0; for (; n < 10; n++) { m_forwardAltSortOrder[array[n]] = n; m_reverseAltSortOrder[n] = array[n]; } for (int i = 0; i < 256; i++) { bool unresolved = true; for (int j = 0; j < 10; j++) if (array[j] == i) unresolved = false; if (unresolved) { m_forwardAltSortOrder[i] = n; m_reverseAltSortOrder[n++] = i; } } } MSufSort::~MSufSort() { // destructor. // delete the inverse suffix array if allocated. if (m_ISA) delete [] m_ISA; m_ISA = 0; } void MSufSort::ReverseAltSortOrder(SYMBOL_TYPE * data, unsigned int nBytes) { #ifndef SORT_16_BIT_SYMBOLS for (unsigned int i = 0; i < nBytes; i++) data[i] = m_reverseAltSortOrder[data[i]]; #endif } unsigned int MSufSort::GetElapsedSortTime() { return m_sortTime; } unsigned int MSufSort::GetMemoryUsage() { /* unsigned int ret = 5 * m_sourceLength; ret += (m_chainStack.m_stackSize * 4); ret += (m_suffixesSortedByInduction.m_stackSize * 8); ret += sizeof(*this); */ return 0; } unsigned int MSufSort::Sort(SYMBOL_TYPE * source, unsigned int sourceLength) { ///tch: //printf("\nIn MSufSort::Sort()\n"); // set the member variables to the source string and its length. m_source = source; m_sourceLength = sourceLength; m_sourceLengthMinusOne = sourceLength - 1; Initialize(); unsigned int start = clock(); InitialSort(); while (m_chainHeadStack.Count()) ProcessNextChain(); while (m_currentSuffixChainId <= 0xffff) ProcessSuffixesSortedByEnhancedInduction(m_currentSuffixChainId++); unsigned int finish = clock(); m_sortTime = finish - start; ///tch: //printf("\nFinished MSufSort::Sort()\nPress any key to continue...\n"); //printf("%s\n",m_source); //system("pause"); //getchar(); // printf(" %c", 13); return ISA(0); } void MSufSort::Initialize() { // Initializes this object just before sorting begins. if (m_ISA) delete [] m_ISA; m_ISA = new unsigned int[m_sourceLength + 1]; memset(m_ISA, 0, sizeof(unsigned int) * (m_sourceLength + 1)); m_nextSortedSuffixValue = 0; m_numSortedSuffixes = 0; m_suffixMatchLength = 0; m_currentSuffixChainId = 0; m_tandemRepeatDepth = 0; m_firstSortedTandemRepeat = END_OF_CHAIN; m_hasTandemRepeatSortedByInduction = false; m_hasEvenLengthTandemRepeats = false; m_firstUnsortedTandemRepeat = END_OF_CHAIN; for (unsigned int i = 0; i < 0x10000; i++) m_startOfSuffixChain[i] = m_endOfSuffixChain[i] = m_firstSuffixByEnhancedInductionSort[i] = END_OF_CHAIN; for (unsigned int i = 0; i < 0x10000; i++) m_firstSortedPosition[i] = 0; m_numNewChains = 0; #ifdef SHOW_PROGRESS m_progressUpdateIncrement = (unsigned int)(m_sourceLength / 100); m_nextProgressUpdate = 1; #endif } void MSufSort::InitialSort() { // This is the first sorting pass which makes the initial suffix // chains from the given source string. Pushes these chains onto // the stack for further sorting. #ifndef SORT_16_BIT_SYMBOLS #ifdef USE_ALT_SORT_ORDER for (unsigned int suffixIndex = 0; suffixIndex < m_sourceLength; suffixIndex++) m_source[suffixIndex] = m_forwardAltSortOrder[m_source[suffixIndex]]; #endif #endif #ifdef USE_ENHANCED_INDUCTION_SORTING m_ISA[m_sourceLength - 1] = m_ISA[m_sourceLength - 2] = SORTED_BY_ENHANCED_INDUCTION; m_firstSortedPosition[Value16(m_sourceLength - 1)]++; m_firstSortedPosition[Value16(m_sourceLength - 2)]++; for (int suffixIndex = m_sourceLength - 3; suffixIndex >= 0; suffixIndex--) { unsigned short symbol = Value16(suffixIndex); m_firstSortedPosition[symbol]++; #ifdef SORT_16_BIT_SYMBOLS unsigned short valA = ENDIAN_SWAP_16(m_source[suffixIndex]); unsigned short valB = ENDIAN_SWAP_16(m_source[suffixIndex + 1]); if ((suffixIndex == m_sourceLengthMinusOne) || (valA > valB)) m_ISA[suffixIndex] = SORTED_BY_ENHANCED_INDUCTION; else AddToSuffixChain(suffixIndex, symbol); #else bool useEIS = false; if ((m_source[suffixIndex] > m_source[suffixIndex + 1]) || ((m_source[suffixIndex] < m_source[suffixIndex + 1]) && (m_source[suffixIndex] > m_source[suffixIndex + 2]))) useEIS = true; if (!useEIS) { if (m_endOfSuffixChain[symbol] == END_OF_CHAIN) { m_endOfSuffixChain[symbol] = m_startOfSuffixChain[symbol] = suffixIndex; m_newChainIds[m_numNewChains++] = ENDIAN_SWAP_16(symbol); } else { m_ISA[suffixIndex] = m_startOfSuffixChain[symbol]; m_startOfSuffixChain[symbol] = suffixIndex; } } else m_ISA[suffixIndex] = SORTED_BY_ENHANCED_INDUCTION; #endif } #else for (unsigned int suffixIndex = 0; suffixIndex < m_sourceLength; suffixIndex++) { unsigned short symbol = Value16(suffixIndex); AddToSuffixChain(suffixIndex, symbol); } #endif #ifdef USE_ENHANCED_INDUCTION_SORTING unsigned int n = 1; for (unsigned int i = 0; i < 0x10000; i++) { unsigned short p = ENDIAN_SWAP_16(i); unsigned int temp = m_firstSortedPosition[p]; if (temp) { m_firstSortedPosition[p] = n; n += temp; } } #endif MarkSuffixAsSorted(m_sourceLength, m_nextSortedSuffixValue); PushNewChainsOntoStack(true); } void MSufSort::ResolveTandemRepeatsNotSortedWithInduction() { unsigned int tandemRepeatLength = m_suffixMatchLength - 1; unsigned int startOfFinalList = END_OF_CHAIN; while (m_firstSortedTandemRepeat != END_OF_CHAIN) { unsigned int stopLoopAtIndex = startOfFinalList; m_ISA[m_lastSortedTandemRepeat] = startOfFinalList; startOfFinalList = m_firstSortedTandemRepeat; unsigned int suffixIndex = m_firstSortedTandemRepeat; m_firstSortedTandemRepeat = END_OF_CHAIN; while (suffixIndex != stopLoopAtIndex) { if ((suffixIndex >= tandemRepeatLength) && (m_ISA[suffixIndex - tandemRepeatLength] == suffixIndex)) { if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = (suffixIndex - tandemRepeatLength); else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = (suffixIndex - tandemRepeatLength)); } suffixIndex = m_ISA[suffixIndex]; } } m_tandemRepeatDepth--; if (!m_tandemRepeatDepth) { while (startOfFinalList != END_OF_CHAIN) { unsigned int next = m_ISA[startOfFinalList]; MarkSuffixAsSorted(startOfFinalList, m_nextSortedSuffixValue); startOfFinalList = next; } } else { m_firstSortedTandemRepeat = startOfFinalList; } } unsigned int MSufSort::ISA(unsigned int index) { return (m_ISA[index] & 0x3fffffff); } int MSufSort::CompareStrings(SYMBOL_TYPE * stringA, SYMBOL_TYPE * stringB, int len) { #ifdef SORT_16_BIT_SYMBOLS while (len) { unsigned short valA = ENDIAN_SWAP_16(stringA[0]); unsigned short valB = ENDIAN_SWAP_16(stringB[0]); if (valA > valB) return 1; if (valA < valB) return -1; stringA++; stringB++; len--; } #else while (len) { if (stringA[0] > stringB[0]) return 1; if (stringA[0] < stringB[0]) return -1; stringA++; stringB++; len--; } #endif return 0; } bool MSufSort::VerifySort() { //printf("\n\nVerifying sort\n\n"); bool error = false; int progressMax = m_sourceLength; int progressValue = 0; int progressUpdateStep = progressMax / 100; int nextProgressUpdate = 1; unsigned int * suffixArray = new unsigned int[m_sourceLength]; for (unsigned int i = 0; ((!error) && (i < m_sourceLength)); i++) { if (!(m_ISA[i] & 0x80000000)) error = true; unsigned int n = (m_ISA[i] & 0x3fffffff) - 1; suffixArray[n] = i; } // all ok so far. // now compare the suffixes in lexicographically sorted order to confirm the sort was good. for (unsigned int suffixIndex = 0; ((!error) && (suffixIndex < (m_sourceLength - 1))); suffixIndex++) { if (++progressValue == nextProgressUpdate) { nextProgressUpdate += progressUpdateStep; //printf("Verify sort: %.2f%% complete%c", ((double)progressValue / progressMax) * 100, 13); } SYMBOL_TYPE * ptrA = &m_source[suffixArray[suffixIndex]]; SYMBOL_TYPE * ptrB = &m_source[suffixArray[suffixIndex + 1]]; int maxLen = (ptrA < ptrB) ? m_sourceLength - (ptrB - m_source) : m_sourceLength - (ptrA - m_source); int c = CompareStrings(ptrA, ptrB, maxLen); if (c > 0) error = true; else if ((c == 0) && (ptrB > ptrA)) error = true; } //printf(" %c", 13); delete [] suffixArray; return !error; } kernlab/src/stringk.c0000644000175100001440000001100611304023134014307 0ustar hornikusers#include #include #include #include #include #include #include #include #include #include double ***cache ; double kaux (const char *u, int p, const char *v, int q, int n, double lambda) { register int j; double tmp; /* case 1: if a full substring length is processed, return*/ if (n == 0) return (1.0); /* check, if the value was already computed in a previous computation */ if (cache [n] [p] [q] != -1.0) return (cache [n] [p] [q]); /* case 2: at least one substring is to short */ if (p < n || q < n) return (0.0); /* case 3: recursion */ for (j= 0, tmp = 0; j < q; j++) { if (v [j] == u [p - 1]) tmp += kaux (u, p - 1, v, j, n - 1, lambda) * pow (lambda, (float) (q - j + 1)); } cache [n] [p] [q] = lambda * kaux (u, p - 1, v, q, n, lambda) + tmp; return (cache [n] [p] [q]); } double seqk (const char *u, int p, const char *v, int q, int n, double lambda) { register int j; double kp; /* the simple case: (at least) one string is to short */ if (p < n || q < n) return (0.0); /* the recursion: use kaux for the t'th substrings*/ for (j = 0, kp = 0.0; j < q; j++) { if (v [j] == u [p - 1]) kp += kaux (u, p - 1, v, j, n - 1, lambda) * lambda * lambda; } return (seqk (u, p - 1, v, q, n, lambda) + kp); } /* recursively computes the subsequence kernel between s1 and s2 where subsequences of exactly length n are considered */ SEXP subsequencek(SEXP s1, SEXP s2, SEXP l1, SEXP l2, SEXP nr, SEXP lambdar) { const char *u = CHAR(STRING_ELT(s1, 0)); const char *v = CHAR(STRING_ELT(s2, 0)); int p = *INTEGER(l1); int q = *INTEGER(l2); int n = *INTEGER(nr); double lambda = *REAL(lambdar); int i, j, k; SEXP ret; /* allocate memory for auxiallary cache variable */ cache = (double ***) malloc (n * sizeof (double **)); for (i = 1; i < n; i++) { cache [i] = (double **) malloc (p * sizeof (double *)); for (j = 0; j < p; j++) { cache [i] [j] = (double *) malloc (q * sizeof (double)); for (k = 0; k < q; k++) cache [i] [j] [k] = -1.0; } } PROTECT(ret = allocVector(REALSXP, 1)); /* invoke recursion */ REAL(ret)[0] = seqk (u, p, v, q, n, lambda); /* free memory */ for (i = 1; i < n; i++) { for (j = 0; j < p; j++) free (cache [i] [j]); free (cache [i]); } free (cache); UNPROTECT(1); return (ret); } /* computes the substring kernel between s1 and s2 where substrings up to length n are considered */ SEXP fullsubstringk (SEXP s1, SEXP s2, SEXP l1, SEXP l2, SEXP nr, SEXP lambdar) { const char *u = CHAR(STRING_ELT(s1, 0)); const char *v = CHAR(STRING_ELT(s2, 0)); int p = *INTEGER(l1); int q = *INTEGER(l2); int n = *INTEGER(nr); double lambda = *REAL(lambdar); register int i, j, k; double ret, tmp; SEXP retk; /* computes the substring kernel */ for (ret = 0.0, i = 0; i < p; i++) { for (j = 0; j < q; j++) if (u [i] == v [j]) { for (k = 0, tmp = lambda * lambda; /* starting condition */ (i + k < p) && (j + k < q) && (u [i + k] == v [j + k]) && (k < n); /* stop conditions */ k++, tmp *= (lambda * lambda)) /* update per iteration */ ret += tmp; } } PROTECT(retk = allocVector(REALSXP, 1)); REAL(retk)[0] = ret; UNPROTECT(1); return (retk); } /* computes the substring kernel between s1 and s2 where substrings of exactly length n are considered */ SEXP substringk (SEXP s1, SEXP s2, SEXP l1, SEXP l2, SEXP nr, SEXP lambdar) { const char *u = CHAR(STRING_ELT(s1, 0)); const char *v = CHAR(STRING_ELT(s2, 0)); int p = *INTEGER(l1); int q = *INTEGER(l2); int n = *INTEGER(nr); double lambda = *REAL(lambdar); SEXP retk; register int i, j, k; double ret, tmp; /* computes the substring kernel */ for (ret = 0.0, i = 0; i < p; i++) { for (j = 0; j < q; j++) { for (k = 0, tmp = lambda * lambda; /* starting condition */ (i + k < p) && (j + k < q) && (u [i + k] == v [j + k]) && (k < n); /* stop conditions */ k++, tmp *= (lambda * lambda)); /* update per iteration */ if (k == n) ret += tmp; /* update features in case of full match */ } } PROTECT(retk = allocVector(REALSXP, 1)); REAL(retk)[0] = ret; UNPROTECT(1); return (retk); } kernlab/src/kspectrumweight.cpp0000644000175100001440000000652312234152620016431 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/KSpectrumWeight.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef KSPECTRUMWEIGHT_CPP #define KSPECTRUMWEIGHT_CPP #include "kspectrumweight.h" #include /** * K-spectrum weight function. Compute number of common (exactly) k character substring. * * \param floor_len - (IN) Length of floor interval of matched substring. (cf. gamma in VisSmo02). * \param x_len - (IN) Length of the matched substring. (cf. tau in VisSmo02). * \param weight - (OUT) The weight value. * */ ErrorCode KSpectrumWeight::ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) { //' Input validation assert(x_len >= floor_len); //' x_len == floor_len when the substring found ends on an interval. weight = 0.0; if(floor_len < k && x_len >= k) weight = 1.0; // std::cout << "floor_len : " << floor_len // << " x_len : " << x_len // << " weight : " << weight << std::endl; return NOERROR; } #endif //' Question: Why return only 0 or 1? //' Answer : In k-spectrum method, any length of matched substring other than k //' does not play a significant role in the string kernel. So, returning 1 //' means that the substring weight equals to # of suffix in the current interval. //' When 0 is returned, it means that substring weight equals to the floor //' interval entry in val[]. (See the definition of substring weight in //' StringKernel.cpp) //' Question: Why is the following a correct implementation of k-spectrum ? //' Answer : [Val precomputation phase] Every Interval with lcp < k has val := 0. //' For intervals with (lcp==k) or (lcp>k but floor_lcp= k but floor interval //' has val := 0 (floor_lcp < k). Hence, returning weight:=1 will make substring //' weight equals to the size of the immediate ceil interval (# of substring in common). kernlab/vignettes/0000755000175100001440000000000013562451344013725 5ustar hornikuserskernlab/vignettes/A.cls0000644000175100001440000001273612055335060014611 0ustar hornikusers\def\fileversion{1.0} \def\filename{A} \def\filedate{2004/10/08} %% %% \NeedsTeXFormat{LaTeX2e} \ProvidesClass{A}[\filedate\space\fileversion\space A class ] %% options \LoadClass[10pt,a4paper,twoside]{article} \newif\if@notitle \@notitlefalse \DeclareOption{notitle}{\@notitletrue} \ProcessOptions %% required packages \RequirePackage{graphicx,a4wide,color,hyperref,ae,fancyvrb,thumbpdf} \RequirePackage[T1]{fontenc} \usepackage[authoryear,round,longnamesfirst]{natbib} \bibpunct{(}{)}{;}{a}{}{,} \bibliographystyle{jss} %% paragraphs \setlength{\parskip}{0.7ex plus0.1ex minus0.1ex} \setlength{\parindent}{0em} %% commands \let\code=\texttt \let\proglang=\textsf \newcommand{\pkg}[1]{{\normalfont\fontseries{b}\selectfont #1}} \newcommand{\email}[1]{\href{mailto:#1}{\normalfont\texttt{#1}}} \newcommand{\E}{\mathsf{E}} \newcommand{\VAR}{\mathsf{VAR}} \newcommand{\COV}{\mathsf{COV}} \newcommand{\Prob}{\mathsf{P}} %% for all publications \newcommand{\Plaintitle}[1]{\def\@Plaintitle{#1}} \newcommand{\Shorttitle}[1]{\def\@Shorttitle{#1}} \newcommand{\Plainauthor}[1]{\def\@Plainauthor{#1}} \newcommand{\Keywords}[1]{\def\@Keywords{#1}} \newcommand{\Plainkeywords}[1]{\def\@Plainkeywords{#1}} \newcommand{\Abstract}[1]{\def\@Abstract{#1}} %% defaults \author{Firstname Lastname\\Affiliation} \title{Title} \Abstract{---!!!---an abstract is required---!!!---} \Plainauthor{\@author} \Plaintitle{\@title} \Shorttitle{\@title} \Keywords{---!!!---at least one keyword is required---!!!---} \Plainkeywords{\@Keywords} %% Sweave(-like) %\DefineVerbatimEnvironment{Sinput}{Verbatim}{fontshape=sl} %\DefineVerbatimEnvironment{Soutput}{Verbatim}{} %\DefineVerbatimEnvironment{Scode}{Verbatim}{fontshape=sl} %\newenvironment{Schunk}{}{} \DefineVerbatimEnvironment{Code}{Verbatim}{} \DefineVerbatimEnvironment{CodeInput}{Verbatim}{fontshape=sl} \DefineVerbatimEnvironment{CodeOutput}{Verbatim}{} \newenvironment{CodeChunk}{}{} \setkeys{Gin}{width=0.8\textwidth} %% new \maketitle \def\maketitle{ \begingroup \def\thefootnote{\fnsymbol{footnote}} \def\@makefnmark{\hbox to 0pt{$^{\@thefnmark}$\hss}} \long\def\@makefntext##1{\parindent 1em\noindent \hbox to1.8em{\hss $\m@th ^{\@thefnmark}$}##1} \@maketitle \@thanks \endgroup \setcounter{footnote}{0} \thispagestyle{empty} \markboth{\centerline{\@Shorttitle}}{\centerline{\@Plainauthor}} \pagestyle{myheadings} \let\maketitle\relax \let\@maketitle\relax \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax } \def\@maketitle{\vbox{\hsize\textwidth \linewidth\hsize {\centering {\LARGE\bf \@title\par} \def\And{\end{tabular}\hfil\linebreak[0]\hfil \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces}% \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\@author\end{tabular}% \vskip 0.3in minus 0.1in \hrule \begin{abstract} \@Abstract \end{abstract}} \textit{Keywords}:~\@Keywords. \vskip 0.1in minus 0.05in \hrule \vskip 0.2in minus 0.1in }} %% sections, subsections, and subsubsections \newlength{\preXLskip} \newlength{\preLskip} \newlength{\preMskip} \newlength{\preSskip} \newlength{\postMskip} \newlength{\postSskip} \setlength{\preXLskip}{1.8\baselineskip plus 0.5ex minus 0ex} \setlength{\preLskip}{1.5\baselineskip plus 0.3ex minus 0ex} \setlength{\preMskip}{1\baselineskip plus 0.2ex minus 0ex} \setlength{\preSskip}{.8\baselineskip plus 0.2ex minus 0ex} \setlength{\postMskip}{.5\baselineskip plus 0ex minus 0.1ex} \setlength{\postSskip}{.3\baselineskip plus 0ex minus 0.1ex} \newcommand{\jsssec}[2][default]{\vskip \preXLskip% \pdfbookmark[1]{#1}{Section.\thesection.#1}% \refstepcounter{section}% \centerline{\textbf{\Large \thesection. #2}} \nopagebreak \vskip \postMskip \nopagebreak} \newcommand{\jsssecnn}[1]{\vskip \preXLskip% \centerline{\textbf{\Large #1}} \nopagebreak \vskip \postMskip \nopagebreak} \newcommand{\jsssubsec}[2][default]{\vskip \preMskip% \pdfbookmark[2]{#1}{Subsection.\thesubsection.#1}% \refstepcounter{subsection}% \textbf{\large \thesubsection. #2} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssubsecnn}[1]{\vskip \preMskip% \textbf{\large #1} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssubsubsec}[2][default]{\vskip \preSskip% \pdfbookmark[3]{#1}{Subsubsection.\thesubsubsection.#1}% \refstepcounter{subsubsection}% {\large \textit{#2}} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssubsubsecnn}[1]{\vskip \preSskip% {\textit{\large #1}} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssimplesec}[2][default]{\vskip \preLskip% %% \pdfbookmark[1]{#1}{Section.\thesection.#1}% \refstepcounter{section}% \textbf{\large #1} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssimplesecnn}[1]{\vskip \preLskip% \textbf{\large #1} \nopagebreak \vskip \postSskip \nopagebreak} \renewcommand{\section}{\secdef \jsssec \jsssecnn} \renewcommand{\subsection}{\secdef \jsssubsec \jsssubsecnn} \renewcommand{\subsubsection}{\secdef \jsssubsubsec \jsssubsubsecnn} %% colors \definecolor{Red}{rgb}{0.7,0,0} \definecolor{Blue}{rgb}{0,0,0.8} \hypersetup{% hyperindex = {true}, colorlinks = {true}, linktocpage = {true}, plainpages = {false}, linkcolor = {Blue}, citecolor = {Blue}, urlcolor = {Red}, pdfstartview = {Fit}, pdfpagemode = {UseOutlines}, pdfview = {XYZ null null null} } \AtBeginDocument{ \hypersetup{% pdfauthor = {\@Plainauthor}, pdftitle = {\@Plaintitle}, pdfkeywords = {\@Plainkeywords} } } \if@notitle %% \AtBeginDocument{\maketitle} \else \AtBeginDocument{\maketitle} \fi kernlab/vignettes/jss.bib0000644000175100001440000003411013271617252015200 0ustar hornikusers@Article{kernlab:Karatzoglou+Smola+Hornik:2004, author = {Alexandros Karatzoglou and Alex Smola and Kurt Hornik and Achim Zeileis}, title = {kernlab -- An \proglang{S4} Package for Kernel Methods in \proglang{R}}, year = {2004}, journal = {Journal of Statistical Software}, volume = {11}, number = {9}, pages = {1--20}, url = {http://www.jstatsoft.org/v11/i09/} } @Book{kernlab:Schoelkopf+Smola:2002, author = {Bernhard Sch\"olkopf and Alex Smola}, title = {Learning with Kernels}, publisher = {MIT Press}, year = 2002, } @Book{kernlab:Chambers:1998, Author = {John M. Chambers}, title = {Programming with Data}, Publisher = {Springer, New York}, Year = 1998, note = {ISBN 0-387-98503-4}, } @Book{kernlab:Hastie:2001, author = {T. Hastie and R. Tibshirani and J. H. Friedman}, title = {The Elements of Statistical Learning}, publisher = {Springer}, Year = 2001, } @Article{kernlab:Vanderbei:1999, author = {Robert Vanderbei}, title = {{LOQO}: An Interior Point Code for Quadratic Programming}, journal = {Optimization Methods and Software}, year = 1999, volume = 12, pages = {251--484}, url = {http://www.sor.princeton.edu/~rvdb/ps/loqo6.pdf}, } @Misc{kernlab:Leisch+Dimitriadou, author = {Fiedrich Leisch and Evgenia Dimitriadou}, title = {\pkg{mlbench}---{A} Collection for Artificial and Real-world Machine Learning Benchmarking Problems}, howpublished = {\textsf{R} package, Version 0.5-6}, note = {Available from \url{http://CRAN.R-project.org}}, year = 2001, month = 12, } @Misc{kernlab:Roever:2004, author = {Christian Roever and Nils Raabe and Karsten Luebke and Uwe Ligges}, title = { \pkg{klaR} -- Classification and Visualization}, howpublished = {\textsf{R} package, Version 0.3-3}, note = {Available from \url{http://cran.R-project.org}}, year = 2004, month = 7, } @Article{kernlab:Hsu+Lin:2002, author = {C.-W. Hsu and Chih-Jen Lin}, title = {A Comparison of Methods for Multi-class Support Vector Machines}, journal = {IEEE Transactions on Neural Networks}, year = 2002, volume = 13, pages = {415--425}, url = {http://www.csie.ntu.edu.tw/~cjlin/papers/multisvm.ps.gz}, } @Misc{kernlab:Chang+Lin:2001, author = {Chih-Chung Chang and Chih-Jen Lin}, title = {{LIBSVM}: A Library for Support Vector Machines}, note = {Software available at \url{http://www.csie.ntu.edu.tw/~cjlin/libsvm}}, year = 2001, } @Article{kernlab:Platt:2000, Author = {J. C. Platt}, Title = {Probabilistic Outputs for Support Vector Machines and Comparison to Regularized Likelihood Methods}, Journal = {Advances in Large Margin Classifiers, A. Smola, P. Bartlett, B. Sch\"olkopf and D. Schuurmans, Eds.}, Year = 2000, publisher = {Cambridge, MA: MIT Press}, url = {http://citeseer.nj.nec.com/platt99probabilistic.html}, } @Article{kernlab:Platt:1998, Author = {J. C. Platt}, Title = {Probabilistic Outputs for Support Vector Machines and Comparison to Regularized Likelihood Methods}, Journal = {B. Sch\"olkopf, C. J. C. Burges, A. J. Smola, editors, Advances in Kernel Methods --- Support Vector Learning}, Year = 1998, publisher = {Cambridge, MA: MIT Press}, url = {http://research.microsoft.com/~jplatt/abstracts/smo.html}, } @Article{kernlab:Keerthi:2002, Author = {S. S. Kerthi and E. G. Gilbert}, Title = {Convergence of a Generalized {SMO} Algorithm for {SVM} Classifier Design}, Journal = {Machine Learning}, pages = {351--360}, Year = 2002, volume = 46, url = {http://guppy.mpe.nus.edu.sg/~mpessk/svm/conv_ml.ps.gz}, } @Article{kernlab:Olvi:2000, Author = {Alex J. Smola and Olvi L. Mangasarian and Bernhard Sch\"olkopf}, Title = {Sparse Kernel Feature Analysis}, Journal = {24th Annual Conference of Gesellschaft f\"ur Klassifikation}, publisher = {University of Passau}, Year = 2000, url = {ftp://ftp.cs.wisc.edu/pub/dmi/tech-reports/99-04.ps}, } @Unpublished{kernlab:Lin:2001, Author = {H.-T. Lin and Chih-Jen Lin and R. C. Weng}, Title = {A Note on {Platt's} Probabilistic Outputs for Support Vector Machines}, Year = 2001, note = {Available at \url{http://www.csie.ntu.edu.tw/~cjlin/papers/plattprob.ps}}, } @Unpublished{kernlab:Weng:2004, Author = {C.-J Lin and R C. Weng}, Title = {Probabilistic Predictions for Support Vector Regression}, Year = 2004, note = {Available at \url{http://www.csie.ntu.edu.tw/~cjlin/papers/svrprob.pdf}}, } @Article{kernlab:Crammer:2000, Author = {K. Crammer and Y. Singer}, Title = {On the Learnability and Design of Output Codes for Multiclass Prolems}, Year = 2000, Journal = {Computational Learning Theory}, Pages = {35--46}, url = {http://www.cs.huji.ac.il/~kobics/publications/mlj01.ps.gz}, } @Article{kernlab:joachim:1999, Author = {Thorsten Joachims}, Title = {Making Large-scale {SVM} Learning Practical}, Journal = {In Advances in Kernel Methods --- Support Vector Learning}, Chapter = 11, Year = 1999, publisher = {MIT Press}, url = {http://www-ai.cs.uni-dortmund.de/DOKUMENTE/joachims_99a.ps.gz}, } @Article{kernlab:Meyer:2001, author = {David Meyer}, title = {Support Vector Machines}, journal = {R News}, year = 2001, volume = 1, number = 3, pages = {23--26}, month = {September}, url = {http://CRAN.R-project.org/doc/Rnews/}, note = {\url{http://CRAN.R-project.org/doc/Rnews/}} } @ARTICLE{kernlab:meyer+leisch+hornik:2003, AUTHOR = {David Meyer and Friedrich Leisch and Kurt Hornik}, TITLE = {The Support Vector Machine under Test}, JOURNAL = {Neurocomputing}, YEAR = 2003, MONTH = {September}, PAGES = {169--186}, VOLUME = 55, } @Book{kernlab:Vapnik:1998, author = {Vladimir Vapnik}, Title = {Statistical Learning Theory}, Year = 1998, publisher = {Wiley, New York}, } @Book{kernlab:Vapnik2:1995, author = {Vladimir Vapnik}, Title = {The Nature of Statistical Learning Theory}, Year = 1995, publisher = {Springer, NY}, } @Article{kernlab:Wu:2003, Author = {Ting-Fan Wu and Chih-Jen Lin and Ruby C. Weng}, Title = {Probability Estimates for Multi-class Classification by Pairwise Coupling}, Year = 2003, Journal = {Advances in Neural Information Processing}, Publisher = {MIT Press Cambridge Mass.}, Volume = 16, url = {http://books.nips.cc/papers/files/nips16/NIPS2003_0538.pdf}, } @Article{kernlab:Williams:1995, Author = {Christopher K. I. Williams and Carl Edward Rasmussen}, Title = {Gaussian Processes for Regression}, Year = 1995, Journal = {Advances in Neural Information Processing}, Publisher = {MIT Press Cambridge Mass.}, Volume = 8, url = {http://books.nips.cc/papers/files/nips08/0514.pdf}, } @Article{kernlab:Schoelkopf:1998, Author = {B. Sch\"olkopf and A. Smola and K. R. M\"uller}, Title = {Nonlinear Component Analysis as a Kernel Eigenvalue Problem}, Journal = {Neural Computation}, Volume = 10, Pages = {1299--1319}, Year = 1998, url = {http://mlg.anu.edu.au/~smola/papers/SchSmoMul98.pdf}, } @Article{kernlab:Tipping:2001, Author = {M. E. Tipping}, Title = {Sparse Bayesian Learning and the Relevance Vector Machine}, Journal = {Journal of Machine Learning Research}, Volume = 1, Year = 2001, Pages = {211--244}, url = {http://www.jmlr.org/papers/volume1/tipping01a/tipping01a.pdf}, } @Article{kernlab:Zhou:2003, Author = {D. Zhou and J. Weston and A. Gretton and O. Bousquet and B. Sch\"olkopf}, Title = {Ranking on Data Manifolds}, Journal = {Advances in Neural Information Processing Systems}, Volume = 16, Year = 2003, Publisher = {MIT Press Cambridge Mass.}, url = {http://www.kyb.mpg.de/publications/pdfs/pdf2334.pdf}, } @Article{kernlab:Andrew:2001, Author = {Andrew Y. Ng and Michael I. Jordan and Yair Weiss}, Title = {On Spectral Clustering: Analysis and an Algorithm}, Journal = {Advances in Neural Information Processing Systems}, Volume = 14, Publisher = {MIT Press Cambridge Mass.}, url = {http://www.nips.cc/NIPS2001/papers/psgz/AA35.ps.gz}, } @Article{kernlab:Caputo:2002, Author = {B. Caputo and K. Sim and F. Furesjo and A. Smola}, Title = {Appearance-based Object Recognition using {SVMs}: Which Kernel Should {I} Use?}, Journal = {Proc of NIPS workshop on Statistical methods for computational experiments in visual processing and computer vision, Whistler, 2002}, Year = 2002, } @Article{kernlab:Putten:2000, Author = {Peter van der Putten and Michel de Ruiter and Maarten van Someren}, Title = {CoIL Challenge 2000 Tasks and Results: Predicting and Explaining Caravan Policy Ownership}, Journal = {Coil Challenge 2000}, Year = 2000, url = {http://www.liacs.nl/~putten/library/cc2000/}, } @Article{kernlab:Hsu:2002, Author = {C.-W. Hsu and Chih-Jen Lin}, Title = {A Simple Decomposition Method for Support Vector Machines}, Journal = {Machine Learning}, Year = 2002, Pages = {291--314}, volume = 46, url = {http://www.csie.ntu.edu.tw/~cjlin/papers/decomp.ps.gz}, } @Article{kernlab:Knerr:1990, Author = {S. Knerr and L. Personnaz and G. Dreyfus}, Title = {Single-layer Learning Revisited: A Stepwise Procedure for Building and Training a Neural Network.}, Journal = {J. Fogelman, editor, Neurocomputing: Algorithms, Architectures and Applications}, Publisher = {Springer-Verlag}, Year = 1990, } @Article{kernlab:Kressel:1999, Author = {U. Kre{\ss}el}, Title = {Pairwise Classification and Support Vector Machines}, Year = 1999, Journal = {B. Sch\"olkopf, C. J. C. Burges, A. J. Smola, editors, Advances in Kernel Methods --- Support Vector Learning}, Pages = {255--268}, Publisher = {Cambridge, MA, MIT Press}, } @Article{kernlab:Hsu2:2002, Title = {A Comparison of Methods for Multi-class Support Vector Machines}, Author = {C.-W. Hsu and Chih-Jen Lin}, Journal = {IEEE Transactions on Neural Networks}, Volume = 13, Year = 2002, Pages = {1045--1052}, url = {http://www.csie.ntu.edu.tw/~cjlin/papers/multisvm.ps.gz}, } @Article{kernlab:Tax:1999, Title = {Support Vector Domain Description}, Author = {David M. J. Tax and Robert P. W. Duin}, Journal = {Pattern Recognition Letters}, Volume = 20, Pages = {1191--1199}, Year = 1999, Publisher = {Elsevier}, url = {http://www.ph.tn.tudelft.nl/People/bob/papers/prl_99_svdd.pdf}, } @Article{kernlab:Williamson:1999, Title = {Estimating the Support of a High-Dimensonal Distribution}, Author = {B. Sch\"olkopf and J. Platt and J. Shawe-Taylor and A. J. Smola and R. C. Williamson}, Journal = {Microsoft Research, Redmond, WA}, Volume = {TR 87}, Year = 1999, url = {http://research.microsoft.com/research/pubs/view.aspx?msr_tr_id=MSR-TR-99-87}, } @Article{kernlab:Smola1:2000, Title = {New Support Vector Algorithms}, Author = {B. Sch\"olkopf and A. J. Smola and R. C. Williamson and P. L. Bartlett}, Journal = {Neural Computation}, Volume = 12, Year = 2000, Pages = {1207--1245}, url = {http://caliban.ingentaselect.com/vl=3338649/cl=47/nw=1/rpsv/cgi-bin/cgi?body=linker&reqidx=0899-7667(2000)12:5L.1207}, } @Article{kernlab:Wright:1999, Title = {Modified {Cholesky} Factorizations in Interior-point Algorithms for Linear Programming}, Author = {S. Wright}, Journal = {Journal in Optimization}, Volume = 9, publisher = {SIAM}, Year = 1999, Pages = {1159--1191}, ur = {http://www-unix.mcs.anl.gov/~wright/papers/P600.pdf}, } @Article{kernlab:more:1999, Title = {Newton's Method for Large-scale Bound Constrained Problems}, Author = {Chih-Jen Lin and J. J. More}, Journal = {SIAM Journal on Optimization}, volume = 9, pages = {1100--1127}, Year = 1999, } @Article{kernlab:Ng:2001, Title = {On Spectral Clustering: Analysis and an Algorithm}, Author = {Andrew Y. Ng and Michael I. Jordan and Yair Weiss}, Journal = {Neural Information Processing Symposium 2001}, Year = 2001, url = {http://www.nips.cc/NIPS2001/papers/psgz/AA35.ps.gz} } @Article{kernlab:kuss:2003, Title = {The Geometry of Kernel Canonical Correlation Analysis}, Author = {Malte Kuss and Thore Graepel}, Journal = {MPI-Technical Reports}, url = {http://www.kyb.mpg.de/publication.html?publ=2233}, Year = 2003, } %% Mathias Seeger gp pub. @Article{kernlab:Kivinen:2004, Title = {Online Learning with Kernels}, Author = {Jyrki Kivinen and Alexander Smola and Robert Williamson}, Journal ={IEEE Transactions on Signal Processing}, volume = 52, Year = 2004, url = {http://mlg.anu.edu.au/~smola/papers/KivSmoWil03.pdf}, } kernlab/vignettes/kernlab.Rnw0000644000175100001440000014230512055335060016030 0ustar hornikusers\documentclass{A} \usepackage{amsfonts,thumbpdf,alltt} \newenvironment{smallverbatim}{\small\verbatim}{\endverbatim} \newenvironment{smallexample}{\begin{alltt}\small}{\end{alltt}} \SweaveOpts{engine=R,eps=FALSE} %\VignetteIndexEntry{kernlab - An S4 Package for Kernel Methods in R} %\VignetteDepends{kernlab} %\VignetteKeywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, S4, R} %\VignettePackage{kernlab} <>= library(kernlab) options(width = 70) @ \title{\pkg{kernlab} -- An \proglang{S4} Package for Kernel Methods in \proglang{R}} \Plaintitle{kernlab - An S4 Package for Kernel Methods in R} \author{Alexandros Karatzoglou\\Technische Universit\"at Wien \And Alex Smola\\Australian National University, NICTA \And Kurt Hornik\\Wirtschaftsuniversit\"at Wien } \Plainauthor{Alexandros Karatzoglou, Alex Smola, Kurt Hornik} \Abstract{ \pkg{kernlab} is an extensible package for kernel-based machine learning methods in \proglang{R}. It takes advantage of \proglang{R}'s new \proglang{S4} object model and provides a framework for creating and using kernel-based algorithms. The package contains dot product primitives (kernels), implementations of support vector machines and the relevance vector machine, Gaussian processes, a ranking algorithm, kernel PCA, kernel CCA, kernel feature analysis, online kernel methods and a spectral clustering algorithm. Moreover it provides a general purpose quadratic programming solver, and an incomplete Cholesky decomposition method. } \Keywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, \proglang{S4}, \proglang{R}} \Plainkeywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, S4, R} \begin{document} \section{Introduction} Machine learning is all about extracting structure from data, but it is often difficult to solve problems like classification, regression and clustering in the space in which the underlying observations have been made. Kernel-based learning methods use an implicit mapping of the input data into a high dimensional feature space defined by a kernel function, i.e., a function returning the inner product $ \langle \Phi(x),\Phi(y) \rangle$ between the images of two data points $x, y$ in the feature space. The learning then takes place in the feature space, provided the learning algorithm can be entirely rewritten so that the data points only appear inside dot products with other points. This is often referred to as the ``kernel trick'' \citep{kernlab:Schoelkopf+Smola:2002}. More precisely, if a projection $\Phi: X \rightarrow H$ is used, the dot product $\langle\Phi(x),\Phi(y)\rangle$ can be represented by a kernel function~$k$ \begin{equation} \label{eq:kernel} k(x,y)= \langle \Phi(x),\Phi(y) \rangle, \end{equation} which is computationally simpler than explicitly projecting $x$ and $y$ into the feature space~$H$. One interesting property of kernel-based systems is that, once a valid kernel function has been selected, one can practically work in spaces of any dimension without paying any computational cost, since feature mapping is never effectively performed. In fact, one does not even need to know which features are being used. Another advantage is the that one can design and use a kernel for a particular problem that could be applied directly to the data without the need for a feature extraction process. This is particularly important in problems where a lot of structure of the data is lost by the feature extraction process (e.g., text processing). The inherent modularity of kernel-based learning methods allows one to use any valid kernel on a kernel-based algorithm. \subsection{Software review} The most prominent kernel based learning algorithm is without doubt the support vector machine (SVM), so the existence of many support vector machine packages comes as little surprise. Most of the existing SVM software is written in \proglang{C} or \proglang{C++}, e.g.\ the award winning \pkg{libsvm}\footnote{\url{http://www.csie.ntu.edu.tw/~cjlin/libsvm/}} \citep{kernlab:Chang+Lin:2001}, \pkg{SVMlight}\footnote{\url{http://svmlight.joachims.org}} \citep{kernlab:joachim:1999}, \pkg{SVMTorch}\footnote{\url{http://www.torch.ch}}, Royal Holloway Support Vector Machines\footnote{\url{http://svm.dcs.rhbnc.ac.uk}}, \pkg{mySVM}\footnote{\url{http://www-ai.cs.uni-dortmund.de/SOFTWARE/MYSVM/index.eng.html}}, and \pkg{M-SVM}\footnote{\url{http://www.loria.fr/~guermeur/}} with many packages providing interfaces to \proglang{MATLAB} (such as \pkg{libsvm}), and even some native \proglang{MATLAB} toolboxes\footnote{ \url{http://www.isis.ecs.soton.ac.uk/resources/svminfo/}}\,\footnote{ \url{http://asi.insa-rouen.fr/~arakotom/toolbox/index}}\,\footnote{ \url{http://www.cis.tugraz.at/igi/aschwaig/software.html}}. Putting SVM specific software aside and considering the abundance of other kernel-based algorithms published nowadays, there is little software available implementing a wider range of kernel methods with some exceptions like the \pkg{Spider}\footnote{\url{http://www.kyb.tuebingen.mpg.de/bs/people/spider/}} software which provides a \proglang{MATLAB} interface to various \proglang{C}/\proglang{C++} SVM libraries and \proglang{MATLAB} implementations of various kernel-based algorithms, \pkg{Torch} \footnote{\url{http://www.torch.ch}} which also includes more traditional machine learning algorithms, and the occasional \proglang{MATLAB} or \proglang{C} program found on a personal web page where an author includes code from a published paper. \subsection[R software]{\proglang{R} software} The \proglang{R} package \pkg{e1071} offers an interface to the award winning \pkg{libsvm} \citep{kernlab:Chang+Lin:2001}, a very efficient SVM implementation. \pkg{libsvm} provides a robust and fast SVM implementation and produces state of the art results on most classification and regression problems \citep{kernlab:Meyer+Leisch+Hornik:2003}. The \proglang{R} interface provided in \pkg{e1071} adds all standard \proglang{R} functionality like object orientation and formula interfaces to \pkg{libsvm}. Another SVM related \proglang{R} package which was made recently available is \pkg{klaR} \citep{kernlab:Roever:2004} which includes an interface to \pkg{SVMlight}, a popular SVM implementation along with other classification tools like Regularized Discriminant Analysis. However, most of the \pkg{libsvm} and \pkg{klaR} SVM code is in \proglang{C++}. Therefore, if one would like to extend or enhance the code with e.g.\ new kernels or different optimizers, one would have to modify the core \proglang{C++} code. \section[kernlab]{\pkg{kernlab}} \pkg{kernlab} aims to provide the \proglang{R} user with basic kernel functionality (e.g., like computing a kernel matrix using a particular kernel), along with some utility functions commonly used in kernel-based methods like a quadratic programming solver, and modern kernel-based algorithms based on the functionality that the package provides. Taking advantage of the inherent modularity of kernel-based methods, \pkg{kernlab} aims to allow the user to switch between kernels on an existing algorithm and even create and use own kernel functions for the kernel methods provided in the package. \subsection[S4 objects]{\proglang{S4} objects} \pkg{kernlab} uses \proglang{R}'s new object model described in ``Programming with Data'' \citep{kernlab:Chambers:1998} which is known as the \proglang{S4} class system and is implemented in the \pkg{methods} package. In contrast with the older \proglang{S3} model for objects in \proglang{R}, classes, slots, and methods relationships must be declared explicitly when using the \proglang{S4} system. The number and types of slots in an instance of a class have to be established at the time the class is defined. The objects from the class are validated against this definition and have to comply to it at any time. \proglang{S4} also requires formal declarations of methods, unlike the informal system of using function names to identify a certain method in \proglang{S3}. An \proglang{S4} method is declared by a call to \code{setMethod} along with the name and a ``signature'' of the arguments. The signature is used to identify the classes of one or more arguments of the method. Generic functions can be declared using the \code{setGeneric} function. Although such formal declarations require package authors to be more disciplined than when using the informal \proglang{S3} classes, they provide assurance that each object in a class has the required slots and that the names and classes of data in the slots are consistent. An example of a class used in \pkg{kernlab} is shown below. Typically, in a return object we want to include information on the result of the method along with additional information and parameters. Usually \pkg{kernlab}'s classes include slots for the kernel function used and the results and additional useful information. \begin{smallexample} setClass("specc", representation("vector", # the vector containing the cluster centers="matrix", # the cluster centers size="vector", # size of each cluster kernelf="function", # kernel function used withinss = "vector"), # within cluster sum of squares prototype = structure(.Data = vector(), centers = matrix(), size = matrix(), kernelf = ls, withinss = vector())) \end{smallexample} Accessor and assignment function are defined and used to access the content of each slot which can be also accessed with the \verb|@| operator. \subsection{Namespace} Namespaces were introduced in \proglang{R} 1.7.0 and provide a means for packages to control the way global variables and methods are being made available. Due to the number of assignment and accessor function involved, a namespace is used to control the methods which are being made visible outside the package. Since \proglang{S4} methods are being used, the \pkg{kernlab} namespace also imports methods and variables from the \pkg{methods} package. \subsection{Data} The \pkg{kernlab} package also includes data set which will be used to illustrate the methods included in the package. The \code{spam} data set \citep{kernlab:Hastie:2001} set collected at Hewlett-Packard Labs contains data on 2788 and 1813 e-mails classified as non-spam and spam, respectively. The 57 variables of each data vector indicate the frequency of certain words and characters in the e-mail. Another data set included in \pkg{kernlab}, the \code{income} data set \citep{kernlab:Hastie:2001}, is taken by a marketing survey in the San Francisco Bay concerning the income of shopping mall customers. It consists of 14 demographic attributes (nominal and ordinal variables) including the income and 8993 observations. The \code{ticdata} data set \citep{kernlab:Putten:2000} was used in the 2000 Coil Challenge and contains information on customers of an insurance company. The data consists of 86 variables and includes product usage data and socio-demographic data derived from zip area codes. The data was collected to answer the following question: Can you predict who would be interested in buying a caravan insurance policy and give an explanation why? The \code{promotergene} is a data set of E. Coli promoter gene sequences (DNA) with 106 observations and 58 variables available at the UCI Machine Learning repository. Promoters have a region where a protein (RNA polymerase) must make contact and the helical DNA sequence must have a valid conformation so that the two pieces of the contact region spatially align. The data contains DNA sequences of promoters and non-promoters. The \code{spirals} data set was created by the \code{mlbench.spirals} function in the \pkg{mlbench} package \citep{kernlab:Leisch+Dimitriadou}. This two-dimensional data set with 300 data points consists of two spirals where Gaussian noise is added to each data point. \subsection{Kernels} A kernel function~$k$ calculates the inner product of two vectors $x$, $x'$ in a given feature mapping $\Phi: X \rightarrow H$. The notion of a kernel is obviously central in the making of any kernel-based algorithm and consequently also in any software package containing kernel-based methods. Kernels in \pkg{kernlab} are \proglang{S4} objects of class \code{kernel} extending the \code{function} class with one additional slot containing a list with the kernel hyper-parameters. Package \pkg{kernlab} includes 7 different kernel classes which all contain the class \code{kernel} and are used to implement the existing kernels. These classes are used in the function dispatch mechanism of the kernel utility functions described below. Existing kernel functions are initialized by ``creator'' functions. All kernel functions take two feature vectors as parameters and return the scalar dot product of the vectors. An example of the functionality of a kernel in \pkg{kernlab}: <>= ## create a RBF kernel function with sigma hyper-parameter 0.05 rbf <- rbfdot(sigma = 0.05) rbf ## create two random feature vectors x <- rnorm(10) y <- rnorm(10) ## compute dot product between x,y rbf(x, y) @ The package includes implementations of the following kernels: \begin{itemize} \item the linear \code{vanilladot} kernel implements the simplest of all kernel functions \begin{equation} k(x,x') = \langle x, x' \rangle \end{equation} which is useful specially when dealing with large sparse data vectors~$x$ as is usually the case in text categorization. \item the Gaussian radial basis function \code{rbfdot} \begin{equation} k(x,x') = \exp(-\sigma \|x - x'\|^2) \end{equation} which is a general purpose kernel and is typically used when no further prior knowledge is available about the data. \item the polynomial kernel \code{polydot} \begin{equation} k(x, x') = \left( \mathrm{scale} \cdot \langle x, x' \rangle + \mathrm{offset} \right)^\mathrm{degree}. \end{equation} which is used in classification of images. \item the hyperbolic tangent kernel \code{tanhdot} \begin{equation} k(x, x') = \tanh \left( \mathrm{scale} \cdot \langle x, x' \rangle + \mathrm{offset} \right) \end{equation} which is mainly used as a proxy for neural networks. \item the Bessel function of the first kind kernel \code{besseldot} \begin{equation} k(x, x') = \frac{\mathrm{Bessel}_{(\nu+1)}^n(\sigma \|x - x'\|)} {(\|x-x'\|)^{-n(\nu+1)}}. \end{equation} is a general purpose kernel and is typically used when no further prior knowledge is available and mainly popular in the Gaussian process community. \item the Laplace radial basis kernel \code{laplacedot} \begin{equation} k(x, x') = \exp(-\sigma \|x - x'\|) \end{equation} which is a general purpose kernel and is typically used when no further prior knowledge is available. \item the ANOVA radial basis kernel \code{anovadot} performs well in multidimensional regression problems \begin{equation} k(x, x') = \left(\sum_{k=1}^{n}\exp(-\sigma(x^k-{x'}^k)^2)\right)^{d} \end{equation} where $x^k$ is the $k$th component of $x$. \end{itemize} \subsection{Kernel utility methods} The package also includes methods for computing commonly used kernel expressions (e.g., the Gram matrix). These methods are written in such a way that they take functions (i.e., kernels) and matrices (i.e., vectors of patterns) as arguments. These can be either the kernel functions already included in \pkg{kernlab} or any other function implementing a valid dot product (taking two vector arguments and returning a scalar). In case one of the already implemented kernels is used, the function calls a vectorized implementation of the corresponding function. Moreover, in the case of symmetric matrices (e.g., the dot product matrix of a Support Vector Machine) they only require one argument rather than having to pass the same matrix twice (for rows and columns). The computations for the kernels already available in the package are vectorized whenever possible which guarantees good performance and acceptable memory requirements. Users can define their own kernel by creating a function which takes two vectors as arguments (the data points) and returns a scalar (the dot product). This function can then be based as an argument to the kernel utility methods. For a user defined kernel the dispatch mechanism calls a generic method implementation which calculates the expression by passing the kernel function through a pair of \code{for} loops. The kernel methods included are: \begin{description} \item[\code{kernelMatrix}] This is the most commonly used function. It computes $k(x, x')$, i.e., it computes the matrix $K$ where $K_{ij} = k(x_i, x_j)$ and $x$ is a \emph{row} vector. In particular, \begin{verbatim} K <- kernelMatrix(kernel, x) \end{verbatim} computes the matrix $K_{ij} = k(x_i, x_j)$ where the $x_i$ are the columns of $X$ and \begin{verbatim} K <- kernelMatrix(kernel, x1, x2) \end{verbatim} computes the matrix $K_{ij} = k(x1_i, x2_j)$. \item[\code{kernelFast}] This method is different to \code{kernelMatrix} for \code{rbfdot}, \code{besseldot}, and the \code{laplacedot} kernel, which are all RBF kernels. It is identical to \code{kernelMatrix}, except that it also requires the squared norm of the first argument as additional input. It is mainly used in kernel algorithms, where columns of the kernel matrix are computed per invocation. In these cases, evaluating the norm of each column-entry as it is done on a \code{kernelMatrix} invocation on an RBF kernel, over and over again would cause significant computational overhead. Its invocation is via \begin{verbatim} K = kernelFast(kernel, x1, x2, a) \end{verbatim} Here $a$ is a vector containing the squared norms of $x1$. \item[\code{kernelMult}] is a convenient way of computing kernel expansions. It returns the vector $f = (f(x_1), \dots, f(x_m))$ where \begin{equation} f(x_i) = \sum_{j=1}^{m} k(x_i, x_j) \alpha_j, \mbox{~hence~} f = K \alpha. \end{equation} The need for such a function arises from the fact that $K$ may sometimes be larger than the memory available. Therefore, it is convenient to compute $K$ only in stripes and discard the latter after the corresponding part of $K \alpha$ has been computed. The parameter \code{blocksize} determines the number of rows in the stripes. In particular, \begin{verbatim} f <- kernelMult(kernel, x, alpha) \end{verbatim} computes $f_i = \sum_{j=1}^m k(x_i, x_j) \alpha_j$ and \begin{verbatim} f <- kernelMult(kernel, x1, x2, alpha) \end{verbatim} computes $f_i = \sum_{j=1}^m k(x1_i, x2_j) \alpha_j$. \item[\code{kernelPol}] is a method very similar to \code{kernelMatrix} with the only difference that rather than computing $K_{ij} = k(x_i, x_j)$ it computes $K_{ij} = y_i y_j k(x_i, x_j)$. This means that \begin{verbatim} K <- kernelPol(kernel, x, y) \end{verbatim} computes the matrix $K_{ij} = y_i y_j k(x_i, x_j)$ where the $x_i$ are the columns of $x$ and $y_i$ are elements of the vector~$y$. Moreover, \begin{verbatim} K <- kernelPol(kernel, x1, x2, y1, y2) \end{verbatim} computes the matrix $K_{ij} = y1_i y2_j k(x1_i, x2_j)$. Both \code{x1} and \code{x2} may be matrices and \code{y1} and \code{y2} vectors. \end{description} An example using these functions : <>= ## create a RBF kernel function with sigma hyper-parameter 0.05 poly <- polydot(degree=2) ## create artificial data set x <- matrix(rnorm(60), 6, 10) y <- matrix(rnorm(40), 4, 10) ## compute kernel matrix kx <- kernelMatrix(poly, x) kxy <- kernelMatrix(poly, x, y) @ \section{Kernel methods} Providing a solid base for creating kernel-based methods is part of what we are trying to achieve with this package, the other being to provide a wider range of kernel-based methods in \proglang{R}. In the rest of the paper we present the kernel-based methods available in \pkg{kernlab}. All the methods in \pkg{kernlab} can be used with any of the kernels included in the package as well as with any valid user-defined kernel. User defined kernel functions can be passed to existing kernel-methods in the \code{kernel} argument. \subsection{Support vector machine} Support vector machines \citep{kernlab:Vapnik:1998} have gained prominence in the field of machine learning and pattern classification and regression. The solutions to classification and regression problems sought by kernel-based algorithms such as the SVM are linear functions in the feature space: \begin{equation} f(x) = w^\top \Phi(x) \end{equation} for some weight vector $w \in F$. The kernel trick can be exploited in this whenever the weight vector~$w$ can be expressed as a linear combination of the training points, $w = \sum_{i=1}^{n} \alpha_i \Phi(x_i)$, implying that $f$ can be written as \begin{equation} f(x) = \sum_{i=1}^{n}\alpha_i k(x_i, x) \end{equation} A very important issue that arises is that of choosing a kernel~$k$ for a given learning task. Intuitively, we wish to choose a kernel that induces the ``right'' metric in the space. Support Vector Machines choose a function $f$ that is linear in the feature space by optimizing some criterion over the sample. In the case of the 2-norm Soft Margin classification the optimization problem takes the form: \begin{eqnarray} \nonumber \mathrm{minimize} && t(w,\xi) = \frac{1}{2}{\|w\|}^2+\frac{C}{m}\sum_{i=1}^{m}\xi_i \\ \mbox{subject to~} && y_i ( \langle x_i , w \rangle +b ) \geq 1- \xi_i \qquad (i=1,\dots,m)\\ \nonumber && \xi_i \ge 0 \qquad (i=1,\dots, m) \end{eqnarray} Based on similar methodology, SVMs deal with the problem of novelty detection (or one class classification) and regression. \pkg{kernlab}'s implementation of support vector machines, \code{ksvm}, is based on the optimizers found in \pkg{bsvm}\footnote{\url{http://www.csie.ntu.edu.tw/~cjlin/bsvm}} \citep{kernlab:Hsu:2002} and \pkg{libsvm} \citep{kernlab:Chang+Lin:2001} which includes a very efficient version of the Sequential Minimization Optimization (SMO). SMO decomposes the SVM Quadratic Problem (QP) without using any numerical QP optimization steps. Instead, it chooses to solve the smallest possible optimization problem involving two elements of $\alpha_i$ because they must obey one linear equality constraint. At every step, SMO chooses two $\alpha_i$ to jointly optimize and finds the optimal values for these $\alpha_i$ analytically, thus avoiding numerical QP optimization, and updates the SVM to reflect the new optimal values. The SVM implementations available in \code{ksvm} include the C-SVM classification algorithm along with the $\nu$-SVM classification formulation which is equivalent to the former but has a more natural ($\nu$) model parameter taking values in $[0,1]$ and is proportional to the fraction of support vectors found in the data set and the training error. For classification problems which include more than two classes (multi-class) a one-against-one or pairwise classification method \citep{kernlab:Knerr:1990, kernlab:Kressel:1999} is used. This method constructs ${k \choose 2}$ classifiers where each one is trained on data from two classes. Prediction is done by voting where each classifier gives a prediction and the class which is predicted more often wins (``Max Wins''). This method has been shown to produce robust results when used with SVMs \citep{kernlab:Hsu2:2002}. Furthermore the \code{ksvm} implementation provides the ability to produce class probabilities as output instead of class labels. This is done by an improved implementation \citep{kernlab:Lin:2001} of Platt's posteriori probabilities \citep{kernlab:Platt:2000} where a sigmoid function \begin{equation} P(y=1\mid f) = \frac{1}{1+ e^{Af+B}} \end{equation} is fitted on the decision values~$f$ of the binary SVM classifiers, $A$ and $B$ are estimated by minimizing the negative log-likelihood function. To extend the class probabilities to the multi-class case, each binary classifiers class probability output is combined by the \code{couple} method which implements methods for combing class probabilities proposed in \citep{kernlab:Wu:2003}. Another approach for multIn order to create a similar probability output for regression, following \cite{kernlab:Weng:2004}, we suppose that the SVM is trained on data from the model \begin{equation} y_i = f(x_i) + \delta_i \end{equation} where $f(x_i)$ is the underlying function and $\delta_i$ is independent and identical distributed random noise. Given a test data $x$ the distribution of $y$ given $x$ and allows one to draw probabilistic inferences about $y$ e.g. one can construct a predictive interval $\Phi = \Phi(x)$ such that $y \in \Phi$ with a certain probability. If $\hat{f}$ is the estimated (predicted) function of the SVM on new data then $\eta = \eta(x) = y - \hat{f}(x)$ is the prediction error and $y \in \Phi$ is equivalent to $\eta \in \Phi $. Empirical observation shows that the distribution of the residuals $\eta$ can be modeled both by a Gaussian and a Laplacian distribution with zero mean. In this implementation the Laplacian with zero mean is used : \begin{equation} p(z) = \frac{1}{2\sigma}e^{-\frac{|z|}{\sigma}} \end{equation} Assuming that $\eta$ are independent the scale parameter $\sigma$ is estimated by maximizing the likelihood. The data for the estimation is produced by a three-fold cross-validation. For the Laplace distribution the maximum likelihood estimate is : \begin{equation} \sigma = \frac{\sum_{i=1}^m|\eta_i|}{m} \end{equation} i-class classification supported by the \code{ksvm} function is the one proposed in \cite{kernlab:Crammer:2000}. This algorithm works by solving a single optimization problem including the data from all classes: \begin{eqnarray} \nonumber \mathrm{minimize} && t(w_n,\xi) = \frac{1}{2}\sum_{n=1}^k{\|w_n\|}^2+\frac{C}{m}\sum_{i=1}^{m}\xi_i \\ \mbox{subject to~} && \langle x_i , w_{y_i} \rangle - \langle x_i , w_{n} \rangle \geq b_i^n - \xi_i \qquad (i=1,\dots,m) \\ \mbox{where} && b_i^n = 1 - \delta_{y_i,n} \end{eqnarray} where the decision function is \begin{equation} \mathrm{argmax}_{m=1,\dots,k} \langle x_i , w_{n} \rangle \end{equation} This optimization problem is solved by a decomposition method proposed in \cite{kernlab:Hsu:2002} where optimal working sets are found (that is, sets of $\alpha_i$ values which have a high probability of being non-zero). The QP sub-problems are then solved by a modified version of the \pkg{TRON}\footnote{\url{http://www-unix.mcs.anl.gov/~more/tron/}} \citep{kernlab:more:1999} optimization software. One-class classification or novelty detection \citep{kernlab:Williamson:1999, kernlab:Tax:1999}, where essentially an SVM detects outliers in a data set, is another algorithm supported by \code{ksvm}. SVM novelty detection works by creating a spherical decision boundary around a set of data points by a set of support vectors describing the spheres boundary. The $\nu$ parameter is used to control the volume of the sphere and consequently the number of outliers found. Again, the value of $\nu$ represents the fraction of outliers found. Furthermore, $\epsilon$-SVM \citep{kernlab:Vapnik2:1995} and $\nu$-SVM \citep{kernlab:Smola1:2000} regression are also available. The problem of model selection is partially addressed by an empirical observation for the popular Gaussian RBF kernel \citep{kernlab:Caputo:2002}, where the optimal values of the hyper-parameter of sigma are shown to lie in between the 0.1 and 0.9 quantile of the $\|x- x'\| $ statistics. The \code{sigest} function uses a sample of the training set to estimate the quantiles and returns a vector containing the values of the quantiles. Pretty much any value within this interval leads to good performance. An example for the \code{ksvm} function is shown below. <>= ## simple example using the promotergene data set data(promotergene) ## create test and training set tindex <- sample(1:dim(promotergene)[1],5) genetrain <- promotergene[-tindex, ] genetest <- promotergene[tindex,] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot",kpar="automatic",C=60,cross=3,prob.model=TRUE) gene predict(gene, genetest) predict(gene, genetest, type="probabilities") @ \begin{figure} \centering <>= set.seed(123) x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) @ \caption{A contour plot of the SVM decision values for a toy binary classification problem using the \code{plot} function} \label{fig:ksvm Plot} \end{figure} \subsection{Relevance vector machine} The relevance vector machine \citep{kernlab:Tipping:2001} is a probabilistic sparse kernel model identical in functional form to the SVM making predictions based on a function of the form \begin{equation} y(x) = \sum_{n=1}^{N} \alpha_n K(\mathbf{x},\mathbf{x}_n) + a_0 \end{equation} where $\alpha_n$ are the model ``weights'' and $K(\cdotp,\cdotp)$ is a kernel function. It adopts a Bayesian approach to learning, by introducing a prior over the weights $\alpha$ \begin{equation} p(\alpha, \beta) = \prod_{i=1}^m N(\beta_i \mid 0 , a_i^{-1}) \mathrm{Gamma}(\beta_i\mid \beta_\beta , \alpha_\beta) \end{equation} governed by a set of hyper-parameters $\beta$, one associated with each weight, whose most probable values are iteratively estimated for the data. Sparsity is achieved because in practice the posterior distribution in many of the weights is sharply peaked around zero. Furthermore, unlike the SVM classifier, the non-zero weights in the RVM are not associated with examples close to the decision boundary, but rather appear to represent ``prototypical'' examples. These examples are termed \emph{relevance vectors}. \pkg{kernlab} currently has an implementation of the RVM based on a type~II maximum likelihood method which can be used for regression. The functions returns an \proglang{S4} object containing the model parameters along with indexes for the relevance vectors and the kernel function and hyper-parameters used. <>= x <- seq(-20, 20, 0.5) y <- sin(x)/x + rnorm(81, sd = 0.03) y[41] <- 1 @ <>= rvmm <- rvm(x, y,kernel="rbfdot",kpar=list(sigma=0.1)) rvmm ytest <- predict(rvmm, x) @ \begin{figure} \centering <>= plot(x, y, cex=0.5) lines(x, ytest, col = "red") points(x[RVindex(rvmm)],y[RVindex(rvmm)],pch=21) @ \caption{Relevance vector regression on data points created by the $sinc(x)$ function, relevance vectors are shown circled.} \label{fig:RVM sigmoid} \end{figure} \subsection{Gaussian processes} Gaussian processes \citep{kernlab:Williams:1995} are based on the ``prior'' assumption that adjacent observations should convey information about each other. In particular, it is assumed that the observed variables are normal, and that the coupling between them takes place by means of the covariance matrix of a normal distribution. Using the kernel matrix as the covariance matrix is a convenient way of extending Bayesian modeling of linear estimators to nonlinear situations. Furthermore it represents the counterpart of the ``kernel trick'' in methods minimizing the regularized risk. For regression estimation we assume that rather than observing $t(x_i)$ we observe $y_i = t(x_i) + \xi_i$ where $\xi_i$ is assumed to be independent Gaussian distributed noise with zero mean. The posterior distribution is given by \begin{equation} p(\mathbf{y}\mid \mathbf{t}) = \left[ \prod_ip(y_i - t(x_i)) \right] \frac{1}{\sqrt{(2\pi)^m \det(K)}} \exp \left(\frac{1}{2}\mathbf{t}^T K^{-1} \mathbf{t} \right) \end{equation} and after substituting $\mathbf{t} = K\mathbf{\alpha}$ and taking logarithms \begin{equation} \ln{p(\mathbf{\alpha} \mid \mathbf{y})} = - \frac{1}{2\sigma^2}\| \mathbf{y} - K \mathbf{\alpha} \|^2 -\frac{1}{2}\mathbf{\alpha}^T K \mathbf{\alpha} +c \end{equation} and maximizing $\ln{p(\mathbf{\alpha} \mid \mathbf{y})}$ for $\mathbf{\alpha}$ to obtain the maximum a posteriori approximation yields \begin{equation} \mathbf{\alpha} = (K + \sigma^2\mathbf{1})^{-1} \mathbf{y} \end{equation} Knowing $\mathbf{\alpha}$ allows for prediction of $y$ at a new location $x$ through $y = K(x,x_i){\mathbf{\alpha}}$. In similar fashion Gaussian processes can be used for classification. \code{gausspr} is the function in \pkg{kernlab} implementing Gaussian processes for classification and regression. \subsection{Ranking} The success of Google has vividly demonstrated the value of a good ranking algorithm in real world problems. \pkg{kernlab} includes a ranking algorithm based on work published in \citep{kernlab:Zhou:2003}. This algorithm exploits the geometric structure of the data in contrast to the more naive approach which uses the Euclidean distances or inner products of the data. Since real world data are usually highly structured, this algorithm should perform better than a simpler approach based on a Euclidean distance measure. First, a weighted network is defined on the data and an authoritative score is assigned to every point. The query points act as source nodes that continually pump their scores to the remaining points via the weighted network, and the remaining points further spread the score to their neighbors. The spreading process is repeated until convergence and the points are ranked according to the scores they received. Suppose we are given a set of data points $X = {x_1, \dots, x_{s}, x_{s+1}, \dots, x_{m}}$ in $\mathbf{R}^n$ where the first $s$ points are the query points and the rest are the points to be ranked. The algorithm works by connecting the two nearest points iteratively until a connected graph $G = (X, E)$ is obtained where $E$ is the set of edges. The affinity matrix $K$ defined e.g.\ by $K_{ij} = \exp(-\sigma\|x_i - x_j \|^2)$ if there is an edge $e(i,j) \in E$ and $0$ for the rest and diagonal elements. The matrix is normalized as $L = D^{-1/2}KD^{-1/2}$ where $D_{ii} = \sum_{j=1}^m K_{ij}$, and \begin{equation} f(t+1) = \alpha Lf(t) + (1 - \alpha)y \end{equation} is iterated until convergence, where $\alpha$ is a parameter in $[0,1)$. The points are then ranked according to their final scores $f_{i}(t_f)$. \pkg{kernlab} includes an \proglang{S4} method implementing the ranking algorithm. The algorithm can be used both with an edge-graph where the structure of the data is taken into account, and without which is equivalent to ranking the data by their distance in the projected space. \begin{figure} \centering <>= data(spirals) ran <- spirals[rowSums(abs(spirals) < 0.55) == 2,] ranked <- ranking(ran, 54, kernel = "rbfdot", kpar = list(sigma = 100), edgegraph = TRUE) ranked[54, 2] <- max(ranked[-54, 2]) c<-1:86 op <- par(mfrow = c(1, 2),pty="s") plot(ran) plot(ran, cex=c[ranked[,3]]/40) @ \caption{The points on the left are ranked according to their similarity to the upper most left point. Points with a higher rank appear bigger. Instead of ranking the points on simple Euclidean distance the structure of the data is recognized and all points on the upper structure are given a higher rank although further away in distance than points in the lower structure.} \label{fig:Ranking} \end{figure} \subsection{Online learning with kernels} The \code{onlearn} function in \pkg{kernlab} implements the online kernel algorithms for classification, novelty detection and regression described in \citep{kernlab:Kivinen:2004}. In batch learning, it is typically assumed that all the examples are immediately available and are drawn independently from some distribution $P$. One natural measure of quality for some $f$ in that case is the expected risk \begin{equation} R[f,P] := E_{(x,y)~P}[l(f(x),y)] \end{equation} Since usually $P$ is unknown a standard approach is to instead minimize the empirical risk \begin{equation} R_{emp}[f,P] := \frac{1}{m}\sum_{t=1}^m l(f(x_t),y_t) \end{equation} Minimizing $R_{emp}[f]$ may lead to overfitting (complex functions that fit well on the training data but do not generalize to unseen data). One way to avoid this is to penalize complex functions by instead minimizing the regularized risk. \begin{equation} R_{reg}[f,S] := R_{reg,\lambda}[f,S] := R_{emp}[f] = \frac{\lambda}{2}\|f\|_{H}^2 \end{equation} where $\lambda > 0$ and $\|f\|_{H} = {\langle f,f \rangle}_{H}^{\frac{1}{2}}$ does indeed measure the complexity of $f$ in a sensible way. The constant $\lambda$ needs to be chosen appropriately for each problem. Since in online learning one is interested in dealing with one example at the time the definition of an instantaneous regularized risk on a single example is needed \begin{equation} R_inst[f,x,y] := R_{inst,\lambda}[f,x,y] := R_{reg,\lambda}[f,((x,y))] \end{equation} The implemented algorithms are classical stochastic gradient descent algorithms performing gradient descent on the instantaneous risk. The general form of the update rule is : \begin{equation} f_{t+1} = f_t - \eta \partial_f R_{inst,\lambda}[f,x_t,y_t]|_{f=f_t} \end{equation} where $f_i \in H$ and $\partial_f$< is short hand for $\partial \ \partial f$ (the gradient with respect to $f$) and $\eta_t > 0$ is the learning rate. Due to the learning taking place in a \textit{reproducing kernel Hilbert space} $H$ the kernel $k$ used has the property $\langle f,k(x,\cdotp)\rangle_H = f(x)$ and therefore \begin{equation} \partial_f l(f(x_t)),y_t) = l'(f(x_t),y_t)k(x_t,\cdotp) \end{equation} where $l'(z,y) := \partial_z l(z,y)$. Since $\partial_f\|f\|_H^2 = 2f$ the update becomes \begin{equation} f_{t+1} := (1 - \eta\lambda)f_t -\eta_t \lambda '( f_t(x_t),y_t)k(x_t,\cdotp) \end{equation} The \code{onlearn} function implements the online learning algorithm for regression, classification and novelty detection. The online nature of the algorithm requires a different approach to the use of the function. An object is used to store the state of the algorithm at each iteration $t$ this object is passed to the function as an argument and is returned at each iteration $t+1$ containing the model parameter state at this step. An empty object of class \code{onlearn} is initialized using the \code{inlearn} function. <>= ## create toy data set x <- rbind(matrix(rnorm(90),,2),matrix(rnorm(90)+3,,2)) y <- matrix(c(rep(1,45),rep(-1,45)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2),type="classification") ind <- sample(1:90,90) ## learn one data point at the time for(i in ind) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) @ \subsection{Spectral clustering} Spectral clustering \citep{kernlab:Ng:2001} is a recently emerged promising alternative to common clustering algorithms. In this method one uses the top eigenvectors of a matrix created by some similarity measure to cluster the data. Similarly to the ranking algorithm, an affinity matrix is created out from the data as \begin{equation} K_{ij}=\exp(-\sigma\|x_i - x_j \|^2) \end{equation} and normalized as $L = D^{-1/2}KD^{-1/2}$ where $D_{ii} = \sum_{j=1}^m K_{ij}$. Then the top $k$ eigenvectors (where $k$ is the number of clusters to be found) of the affinity matrix are used to form an $n \times k$ matrix $Y$ where each column is normalized again to unit length. Treating each row of this matrix as a data point, \code{kmeans} is finally used to cluster the points. \pkg{kernlab} includes an \proglang{S4} method called \code{specc} implementing this algorithm which can be used through an formula interface or a matrix interface. The \proglang{S4} object returned by the method extends the class ``vector'' and contains the assigned cluster for each point along with information on the centers size and within-cluster sum of squares for each cluster. In case a Gaussian RBF kernel is being used a model selection process can be used to determine the optimal value of the $\sigma$ hyper-parameter. For a good value of $\sigma$ the values of $Y$ tend to cluster tightly and it turns out that the within cluster sum of squares is a good indicator for the ``quality'' of the sigma parameter found. We then iterate through the sigma values to find an optimal value for $\sigma$. \begin{figure} \centering <>= data(spirals) sc <- specc(spirals, centers=2) plot(spirals, pch=(23 - 2*sc)) @ \caption{Clustering the two spirals data set with \code{specc}} \label{fig:Spectral Clustering} \end{figure} \subsection{Kernel principal components analysis} Principal component analysis (PCA) is a powerful technique for extracting structure from possibly high-dimensional datasets. PCA is an orthogonal transformation of the coordinate system in which we describe the data. The new coordinates by which we represent the data are called principal components. Kernel PCA \citep{kernlab:Schoelkopf:1998} performs a nonlinear transformation of the coordinate system by finding principal components which are nonlinearly related to the input variables. Given a set of centered observations $x_k$, $k=1,\dots,M$, $x_k \in \mathbf{R}^N$, PCA diagonalizes the covariance matrix $C = \frac{1}{M}\sum_{j=1}^Mx_jx_{j}^T$ by solving the eigenvalue problem $\lambda\mathbf{v}=C\mathbf{v}$. The same computation can be done in a dot product space $F$ which is related to the input space by a possibly nonlinear map $\Phi:\mathbf{R}^N \rightarrow F$, $x \mapsto \mathbf{X}$. Assuming that we deal with centered data and use the covariance matrix in $F$, \begin{equation} \hat{C}=\frac{1}{C}\sum_{j=1}^N \Phi(x_j)\Phi(x_j)^T \end{equation} the kernel principal components are then computed by taking the eigenvectors of the centered kernel matrix $K_{ij} = \langle \Phi(x_j),\Phi(x_j) \rangle$. \code{kpca}, the the function implementing KPCA in \pkg{kernlab}, can be used both with a formula and a matrix interface, and returns an \proglang{S4} object of class \code{kpca} containing the principal components the corresponding eigenvalues along with the projection of the training data on the new coordinate system. Furthermore, the \code{predict} function can be used to embed new data points into the new coordinate system. \begin{figure} \centering <>= data(spam) train <- sample(1:dim(spam)[1],400) kpc <- kpca(~.,data=spam[train,-58],kernel="rbfdot",kpar=list(sigma=0.001),features=2) kpcv <- pcv(kpc) plot(rotated(kpc),col=as.integer(spam[train,58]),xlab="1st Principal Component",ylab="2nd Principal Component") @ \caption{Projection of the spam data on two kernel principal components using an RBF kernel} \label{fig:KPCA} \end{figure} \subsection{Kernel feature analysis} Whilst KPCA leads to very good results there are nevertheless some issues to be addressed. First the computational complexity of the standard version of KPCA, the algorithm scales $O(m^3)$ and secondly the resulting feature extractors are given as a dense expansion in terms of the of the training patterns. Sparse solutions are often achieved in supervised learning settings by using an $l_1$ penalty on the expansion coefficients. An algorithm can be derived using the same approach in feature extraction requiring only $n$ basis functions to compute the first $n$ feature. Kernel feature analysis \citep{kernlab:Olvi:2000} is computationally simple and scales approximately one order of magnitude better on large data sets than standard KPCA. Choosing $\Omega [f] = \sum_{i=1}^m |\alpha_i |$ this yields \begin{equation} F_{LP} = \{ \mathbf{w} \vert \mathbf{w} = \sum_{i=1}^m \alpha_i \Phi(x_i) \mathrm{with} \sum_{i=1}^m |\alpha_i | \leq 1 \} \end{equation} This setting leads to the first ``principal vector'' in the $l_1$ context \begin{equation} \mathbf{\nu}^1 = \mathrm{argmax}_{\mathbf{\nu} \in F_{LP}} \frac{1}{m} \sum_{i=1}^m \langle \mathbf{\nu},\mathbf{\Phi}(x_i) - \frac{1}{m}\sum_{j=1}^m\mathbf{\Phi}(x_i) \rangle^2 \end{equation} Subsequent ``principal vectors'' can be defined by enforcing optimality with respect to the remaining orthogonal subspaces. Due to the $l_1$ constrain the solution has the favorable property of being sparse in terms of the coefficients $\alpha_i$. The function \code{kfa} in \pkg{kernlab} implements Kernel Feature Analysis by using a projection pursuit technique on a sample of the data. Results are then returned in an \proglang{S4} object. \begin{figure} \centering <>= data(promotergene) f <- kfa(~.,data=promotergene,features=2,kernel="rbfdot",kpar=list(sigma=0.013)) plot(predict(f,promotergene),col=as.numeric(promotergene[,1]),xlab="1st Feature",ylab="2nd Feature") @ \caption{Projection of the spam data on two features using an RBF kernel} \label{fig:KFA} \end{figure} \subsection{Kernel canonical correlation analysis} Canonical correlation analysis (CCA) is concerned with describing the linear relations between variables. If we have two data sets $x_1$ and $x_2$, then the classical CCA attempts to find linear combination of the variables which give the maximum correlation between the combinations. I.e., if \begin{eqnarray*} && y_1 = \mathbf{w_1}\mathbf{x_1} = \sum_j w_1 x_{1j} \\ && y_2 = \mathbf{w_2}\mathbf{x_2} = \sum_j w_2 x_{2j} \end{eqnarray*} one wishes to find those values of $\mathbf{w_1}$ and $\mathbf{w_2}$ which maximize the correlation between $y_1$ and $y_2$. Similar to the KPCA algorithm, CCA can be extended and used in a dot product space~$F$ which is related to the input space by a possibly nonlinear map $\Phi:\mathbf{R}^N \rightarrow F$, $x \mapsto \mathbf{X}$ as \begin{eqnarray*} && y_1 = \mathbf{w_1}\mathbf{\Phi(x_1)} = \sum_j w_1 \Phi(x_{1j}) \\ && y_2 = \mathbf{w_2}\mathbf{\Phi(x_2)} = \sum_j w_2 \Phi(x_{2j}) \end{eqnarray*} Following \citep{kernlab:kuss:2003}, the \pkg{kernlab} implementation of a KCCA projects the data vectors on a new coordinate system using KPCA and uses linear CCA to retrieve the correlation coefficients. The \code{kcca} method in \pkg{kernlab} returns an \proglang{S4} object containing the correlation coefficients for each data set and the corresponding correlation along with the kernel used. \subsection{Interior point code quadratic optimizer} In many kernel based algorithms, learning implies the minimization of some risk function. Typically we have to deal with quadratic or general convex problems for support vector machines of the type \begin{equation} \begin{array}{ll} \mathrm{minimize} & f(x) \\ \mbox{subject to~} & c_i(x) \leq 0 \mbox{~for all~} i \in [n]. \end{array} \end{equation} $f$ and $c_i$ are convex functions and $n \in \mathbf{N}$. \pkg{kernlab} provides the \proglang{S4} method \code{ipop} implementing an optimizer of the interior point family \citep{kernlab:Vanderbei:1999} which solves the quadratic programming problem \begin{equation} \begin{array}{ll} \mathrm{minimize} & c^\top x+\frac{1}{2}x^\top H x \\ \mbox{subject to~} & b \leq Ax \leq b + r\\ & l \leq x \leq u \\ \end{array} \end{equation} This optimizer can be used in regression, classification, and novelty detection in SVMs. \subsection{Incomplete cholesky decomposition} When dealing with kernel based algorithms, calculating a full kernel matrix should be avoided since it is already a $O(N^2)$ operation. Fortunately, the fact that kernel matrices are positive semidefinite is a strong constraint and good approximations can be found with small computational cost. The Cholesky decomposition factorizes a positive semidefinite $N \times N$ matrix $K$ as $K=ZZ^T$, where $Z$ is an upper triangular $N \times N$ matrix. Exploiting the fact that kernel matrices are usually of low rank, an \emph{incomplete Cholesky decomposition} \citep{kernlab:Wright:1999} finds a matrix $\tilde{Z}$ of size $N \times M$ where $M\ll N$ such that the norm of $K-\tilde{Z}\tilde{Z}^T$ is smaller than a given tolerance $\theta$. The main difference of incomplete Cholesky decomposition to the standard Cholesky decomposition is that pivots which are below a certain threshold are simply skipped. If $L$ is the number of skipped pivots, we obtain a $\tilde{Z}$ with only $M = N - L$ columns. The algorithm works by picking a column from $K$ to be added by maximizing a lower bound on the reduction of the error of the approximation. \pkg{kernlab} has an implementation of an incomplete Cholesky factorization called \code{inc.chol} which computes the decomposed matrix $\tilde{Z}$ from the original data for any given kernel without the need to compute a full kernel matrix beforehand. This has the advantage that no full kernel matrix has to be stored in memory. \section{Conclusions} In this paper we described \pkg{kernlab}, a flexible and extensible kernel methods package for \proglang{R} with existing modern kernel algorithms along with tools for constructing new kernel based algorithms. It provides a unified framework for using and creating kernel-based algorithms in \proglang{R} while using all of \proglang{R}'s modern facilities, like \proglang{S4} classes and namespaces. Our aim for the future is to extend the package and add more kernel-based methods as well as kernel relevant tools. Sources and binaries for the latest version of \pkg{kernlab} are available at CRAN\footnote{\url{http://CRAN.R-project.org}} under the GNU Public License. A shorter version of this introduction to the \proglang{R} package \pkg{kernlab} is published as \cite{kernlab:Karatzoglou+Smola+Hornik:2004} in the \emph{Journal of Statistical Software}. \bibliography{jss} \end{document} kernlab/R/0000755000175100001440000000000013561524074012116 5ustar hornikuserskernlab/R/sigest.R0000644000175100001440000000465612676465031013555 0ustar hornikusers## sigma estimation for RBF kernels ## author: alexandros setGeneric("sigest", function(x, ...) standardGeneric("sigest")) setMethod("sigest",signature(x="formula"), function (x, data=NULL, frac = 0.5, na.action = na.omit, scaled = TRUE){ call <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) ## m$... <- NULL m$formula <- m$x m$x <- NULL m$scaled <- NULL m$frac <- NULL m[[1L]] <- quote(stats::model.frame) m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 x <- model.matrix(Terms, m) if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- sigest(x, scaled = scaled, frac = frac, na.action = na.action) return (ret) }) setMethod("sigest",signature(x="matrix"), function (x, frac = 0.5, scaled = TRUE, na.action = na.omit) { x <- na.action(x) if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp } } m <- dim(x)[1] n <- floor(frac*m) index <- sample(1:m, n, replace = TRUE) index2 <- sample(1:m, n, replace = TRUE) temp <- x[index,, drop=FALSE] - x[index2,,drop=FALSE] dist <- rowSums(temp^2) srange <- 1/quantile(dist[dist!=0],probs=c(0.9,0.5,0.1)) ## ds <- sort(dist[dist!=0]) ## sl <- ds[ceiling(0.2*length(ds))] ## su <- ds[ceiling(0.8*length(ds))] ## srange <- c(1/su,1/median(ds), 1/sl) ## names(srange) <- NULL return(srange) }) kernlab/R/ksvm.R0000644000175100001440000034644313271622514013233 0ustar hornikusers## Support Vector Machines ## author : alexandros karatzoglou ## updated : 08.02.06 setGeneric("ksvm", function(x, ...) standardGeneric("ksvm")) setMethod("ksvm",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m$scaled <- NULL m[[1L]] <- quote(stats::model.frame) m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 ## no intercept x <- model.matrix(Terms, m) y <- model.extract(m, "response") if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- ksvm(x, y, scaled = scaled, ...) kcall(ret) <- cl attr(Terms,"intercept") <- 0 ## no intercept terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("ksvm",signature(x="vector"), function(x, ...) { x <- t(t(x)) ret <- ksvm(x, ...) return(ret) }) setMethod("ksvm",signature(x="matrix"), function (x, y = NULL, scaled = TRUE, type = NULL, kernel = "rbfdot", kpar = "automatic", C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ... ,subset ,na.action = na.omit) { ## Comment out sparse code, future impl. will be based on "Matrix" ## sparse <- inherits(x, "matrix.csr") ## if (sparse) { ## if (!require(SparseM)) ## stop("Need SparseM package for handling of sparse structures!") ## } sparse <- FALSE if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","matrix")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(ksvm(as.kernelMatrix(x), y = y, type = type, C = C, nu = nu, epsilon = epsilon, prob.model = prob.model, class.weights = class.weights, cross = cross, fit = fit, cache = cache, tol = tol, shrinking = shrinking, ...)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } ## subsetting and na-handling for matrices ret <- new("ksvm") if (!missing(subset)) x <- x[subset,] if (is.null(y)) x <- na.action(x) else { df <- na.action(data.frame(y, x)) y <- df[,1] x <- as.matrix(df[,-1]) } n.action(ret) <- na.action if (is.null(type)) type(ret) <- if (is.null(y)) "one-svc" else if (is.factor(y)) "C-svc" else "eps-svr" if(!is.null(type)) type(ret) <- match.arg(type,c("C-svc", "nu-svc", "kbb-svc", "spoc-svc", "C-bsvc", "one-svc", "eps-svr", "eps-bsvr", "nu-svr")) ## ## scaling, subsetting, and NA handling ## if (sparse) { ## scale <- rep(FALSE, ncol(x)) ## if(!is.null(y)) na.fail(y) ## x <- t(t(x)) ## make shure that col-indices are sorted ## } x.scale <- y.scale <- NULL ## scaling if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")] if (is.numeric(y)&&(type(ret)!="C-svc"&&type(ret)!="nu-svc"&&type(ret)!="C-bsvc"&&type(ret)!="spoc-svc"&&type(ret)!="kbb-svc")) { y <- scale(y) y.scale <- attributes(y)[c("scaled:center","scaled:scale")] y <- as.vector(y) } } } ncols <- ncol(x) m <- nrows <- nrow(x) if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) #cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if (!is(y,"vector") && !is.factor (y) & is(y,"matrix") & !(type(ret)=="one-svc")) stop("y must be a vector or a factor.") if(!(type(ret)=="one-svc")) if(is(y,"vector") | is(y,"factor") ) ym <- length(y) else if(is(y,"matrix")) ym <- dim(y)[1] else stop("y must be a matrix or a vector") if ((type(ret) != "one-svc") && ym != m) stop("x and y don't match.") if(nu > 1|| nu <0) stop("nu must be between 0 an 1.") weightlabels <- NULL nweights <- 0 weight <- 0 wl <- 0 ## in case of classification: transform factors into integers if (type(ret) == "one-svc") # one class classification --> set dummy y <- 1 else if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (!is.null(class.weights)) { weightlabels <- match (names(class.weights),lev(ret)) if (any(is.na(weightlabels))) stop ("At least one level name is missing or misspelled.") } } else { if ((type(ret) =="C-svc" || type(ret) == "nu-svc" ||type(ret) == "C-bsvc" || type(ret) == "spoc-svc" || type(ret) == "kbb-svc") && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if (type(ret) != "eps-svr" || type(ret) != "nu-svr"|| type(ret)!="eps-bsvr") lev(ret) <- sort(unique (y)) } ## initialize nclass(ret) <- length (unique(y)) p <- 0 K <- 0 svindex <- problem <- NULL sigma <- 0.1 degree <- offset <- scale <- 1 switch(is(kernel)[1], "rbfkernel" = { sigma <- kpar(kernel)$sigma ktype <- 2 }, "tanhkernel" = { sigma <- kpar(kernel)$scale offset <- kpar(kernel)$offset ktype <- 3 }, "polykernel" = { degree <- kpar(kernel)$degree sigma <- kpar(kernel)$scale offset <- kpar(kernel)$offset ktype <- 1 }, "vanillakernel" = { ktype <- 0 }, "laplacekernel" = { ktype <- 5 sigma <- kpar(kernel)$sigma }, "besselkernel" = { ktype <- 6 sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$order offset <- kpar(kernel)$degree }, "anovakernel" = { ktype <- 7 sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$degree }, "splinekernel" = { ktype <- 8 }, { ktype <- 4 } ) prior(ret) <- list(NULL) ## C classification if(type(ret) == "C-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ## prepare the data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) if(ktype==4) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]) resv <- .Call(smo_optim, as.double(t(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE])), as.integer(li+lj), as.integer(ncol(x)), as.double(yd), as.double(K), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), ##linear term as.integer(ktype), as.integer(0), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking)) reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] ## alpha svind <- tmpres > 0 alpha(ret)[p] <- list(tmpres[svind]) ## coefficients alpha*y coef(ret)[p] <- list(alpha(ret)[[p]]*yd[reind][svind]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][svind]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][reind,,drop=FALSE][svind, ,drop=FALSE]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) ## store objective function values in a vector obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## nu classification if(type(ret) == "nu-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) if(ktype==4) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]) resv <- .Call(smo_optim, as.double(t(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE])), as.integer(li+lj), as.integer(ncol(x)), as.double(yd), as.double(K), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), #linear term as.integer(ktype), as.integer(1), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), #weightlabl. as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking)) reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] svind <- tmpres != 0 alpha(ret)[p] <- coef(ret)[p] <- list(tmpres[svind]) ##store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][svind]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][reind,,drop=FALSE][svind,,drop=FALSE]) ##save the indexes from all the SV in a vector (use unique!) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) ## store objective function values in a vector obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" problem[p] <- list(c(i,j)) param(ret)$nu <- nu ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## Bound constraint C classification if(type(ret) == "C-bsvc"){ if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) if(ktype==4) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]) resv <- .Call(tron_optim, as.double(t(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE])), as.integer(li+lj), as.integer(ncol(x)), as.double(yd), as.double(K), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ja else 0), as.integer(sparse), as.integer(2), as.double(0), ##countc as.integer(ktype), as.integer(5), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), ## cost value of alpha seeding as.double(2), ## step value of alpha seeding as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), ##qpsize as.integer(shrinking)) reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix svind <- resv[-(li+lj+1)][reind] > 0 alpha(ret)[p] <- list(resv[-(li+lj+1)][reind][svind]) ## nonzero alpha*y coef(ret)[p] <- list(alpha(ret)[[p]] * yd[reind][svind]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][svind]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][reind,,drop = FALSE][svind,,drop = FALSE]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- - sapply(coef(ret),sum) ## store obj. values in vector obj(ret) <- c(obj(ret), resv[(li+lj+1)]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## SPOC multiclass classification if(type(ret) =="spoc-svc") { if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) xd <- matrix(x[yd$ix,],nrow=dim(x)[1]) count <- 0 if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call(tron_optim, as.double(t(xd)), as.integer(nrow(xd)), as.integer(ncol(xd)), as.double(rep(yd$x-1,2)), as.double(K), as.integer(if (sparse) xd@ia else 0), as.integer(if (sparse) xd@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(7), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(C), as.double(2), #Cstep as.integer(0), #weightlabel as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking)) reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- t(matrix(resv[-(nclass(ret)*nrow(xd) + 1)],nclass(ret)))[reind,,drop=FALSE] coef(ret) <- lapply(1:nclass(ret), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) names(coef(ret)) <- lev(ret) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which(alpha(ret)[,x]!=0)) xmatrix(ret) <- x obj(ret) <- resv[(nclass(ret)*nrow(xd) + 1)] names(alphaindex(ret)) <- lev(ret) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- 0 param(ret)$C <- C } ## KBB multiclass classification if(type(ret) =="kbb-svc") { if(!is.null(class.weights)) weightedC <- weightlabels * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- x[yd$ix,,drop=FALSE] count <- sapply(unique(yd$x), function(c) length(yd$x[yd$x==c])) if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call(tron_optim, as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(yd$x-1), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(8), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(C), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking)) reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- matrix(resv[-(nrow(x)*(nclass(ret)-1)+1)],nrow(x))[reind,,drop=FALSE] xmatrix(ret) <- x<- x[reind,,drop=FALSE] coef(ret) <- lapply(1:(nclass(ret)-1), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which((y == x) & (rowSums(alpha(ret))!=0))) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- - sapply(coef(ret),sum) obj(ret) <- resv[(nrow(x)*(nclass(ret)-1)+1)] param(ret)$C <- C } ## Novelty detection if(type(ret) =="one-svc") { if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call(smo_optim, as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(matrix(rep(1,m))), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(2), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking)) tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex,,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$nu <- nu } ## epsilon regression if(type(ret) =="eps-svr") { if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call(smo_optim, as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(3), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking)) tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex, ,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$C <- C } ## nu regression if(type(ret) =="nu-svr") { if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call(smo_optim, as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(4), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking)) tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex,,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$nu <- nu } ## bound constraint eps regression if(type(ret) =="eps-bsvr") { if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call(tron_optim, as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(2), as.integer(0), as.integer(ktype), as.integer(6), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(0), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking)) tmpres <- resv[-(m + 1)] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex,,drop=FALSE] b(ret) <- -sum(alpha(ret)) obj(ret) <- resv[(m + 1)] param(ret)$epsilon <- epsilon param(ret)$C <- C } kcall(ret) <- match.call() kernelf(ret) <- kernel ymatrix(ret) <- y SVindex(ret) <- sort(unique(svindex),method="quick") nSV(ret) <- length(unique(svindex)) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, x) else NULL if(any(scaled)) scaling(ret) <- list(scaled = scaled, x.scale = x.scale, y.scale = y.scale) if (fit){ if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="one-svc") error(ret) <- sum(!fitted(ret))/m if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr"){ if (!is.null(scaling(ret)$y.scale)){ scal <- scaling(ret)$y.scale$"scaled:scale" fitted(ret) <- fitted(ret) # / scaling(ret)$y.scale$"scaled:scale" + scaling(ret)$y.scale$"scaled:center" } else scal <- 1 error(ret) <- drop(crossprod(fitted(ret) - y)/m) } } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") { if(is.null(class.weights)) cret <- ksvm(x[cind,],y[cind],type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, scaled=FALSE, cross = 0, fit = FALSE ,cache = cache) else cret <- ksvm(x[cind,],as.factor(lev(ret)[y[cind]]),type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, scaled=FALSE, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache) cres <- predict(cret, x[vgr[[i]],,drop=FALSE]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="one-svc") { cret <- ksvm(x[cind,],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol,scaled=FALSE, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]],, drop=FALSE]) cerror <- (1 - sum(cres)/length(cres))/cross + cerror } if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") { cret <- ksvm(x[cind,],y[cind],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol,scaled=FALSE, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]],,drop=FALSE]) if (!is.null(scaling(ret)$y.scale)) scal <- scaling(ret)$y.scale$"scaled:scale" else scal <- 1 cerror <- drop((scal^2)*crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } prob.model(ret) <- list(NULL) if(prob.model) { if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") { p <- 0 for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(j,i)] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(i,j)] wl <- c(0,1) nweigths <- 2 } } m <- li+lj suppressWarnings(vgr <- split(c(sample(1:li,li),sample((li+1):(li+lj),lj)),1:3)) pres <- yres <- NULL for(k in 1:3) { cind <- unsplit(vgr[-k],factor(rep((1:3)[-k],unlist(lapply(vgr[-k],length))))) if(is.null(class.weights)) cret <- ksvm(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][cind,],yd[cind],type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, scaled=FALSE, cross = 0, fit = FALSE ,cache = cache, prob.model = FALSE) else cret <- ksvm(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][cind,],as.factor(lev(ret)[y[c(indexes[[i]],indexes[[j]])][cind]]),type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, scaled=FALSE, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache, prob.model = FALSE) yres <- c(yres, yd[vgr[[k]]]) pres <- rbind(pres, predict(cret, x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][vgr[[k]],],type="decision")) } prob.model(ret)[[p]] <- .probPlatt(pres,yres) } } } if(type(ret) == "eps-svr"||type(ret) == "nu-svr"||type(ret)=="eps-bsvr"){ suppressWarnings(vgr<-split(sample(1:m,m),1:3)) pres <- NULL for(i in 1:3) { cind <- unsplit(vgr[-i],factor(rep((1:3)[-i],unlist(lapply(vgr[-i],length))))) cret <- ksvm(x[cind,],y[cind],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol,scaled=FALSE, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]],]) if (!is.null(scaling(ret)$y.scale)) cres <- cres * scaling(ret)$y.scale$"scaled:scale" + scaling(ret)$y.scale$"scaled:center" pres <- rbind(pres, cres) } pres[abs(pres) > (5*sd(pres))] <- 0 prob.model(ret) <- list(sum(abs(pres))/dim(pres)[1]) } } return(ret) }) ## kernelmatrix interface setMethod("ksvm",signature(x="kernelMatrix"), function (x, y = NULL, type = NULL, C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ...) { sparse <- FALSE ## subsetting and na-handling for matrices ret <- new("ksvm") if (is.null(type)) type(ret) <- if (is.null(y)) "one-svc" else if (is.factor(y)) "C-svc" else "eps-svr" if(!is.null(type)) type(ret) <- match.arg(type,c("C-svc", "nu-svc", "kbb-svc", "spoc-svc", "C-bsvc", "one-svc", "eps-svr", "eps-bsvr", "nu-svr")) ncols <- ncol(x) m <- nrows <- nrow(x) if (!is(y,"vector") && !is.factor (y) & !is(y,"matrix") & !(type(ret)=="one-svc")) stop("y must be a vector or a factor.") if(!(type(ret)=="one-svc")) if(is(y,"vector") | is(y,"factor")) ym <- length(y) else if(is(y,"matrix")) ym <- dim(y)[1] else stop("y must be a matrix or a vector") if ((type(ret) != "one-svc") && ym != m) stop("x and y don't match.") if(nu > 1|| nu <0) stop("nu must be between 0 an 1.") weightlabels <- NULL nweights <- 0 weight <- 0 wl <- 0 ## in case of classification: transform factors into integers if (type(ret) == "one-svc") # one class classification --> set dummy y <- 1 else if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (!is.null(class.weights)) { if (is.null(names (class.weights))) stop ("Weights have to be specified along with their according level names !") weightlabels <- match (names(class.weights),lev(ret)) if (any(is.na(weightlabels))) stop ("At least one level name is missing or misspelled.") } } else { if ((type(ret) =="C-svc" || type(ret) == "nu-svc" ||type(ret) == "C-bsvc" || type(ret) == "spoc-svc" || type(ret) == "kbb-svc") && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if (type(ret) != "eps-svr" || type(ret) != "nu-svr"|| type(ret)!="eps-bsvr") lev(ret) <- sort(unique (y)) } ## initialize nclass(ret) <- length (unique(y)) p <- 0 svindex <- problem <- NULL sigma <- 0.1 degree <- offset <- scale <- 1 ktype <- 4 prior(ret) <- list(NULL) ## C classification if(type(ret) == "C-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) xdd <- matrix(1,li+lj,1) resv <- .Call(smo_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(as.vector(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE])), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), ##linear term as.integer(ktype), as.integer(0), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking)) reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] ## alpha svind <- tmpres > 0 alpha(ret)[p] <- list(tmpres[svind]) ## coefficients alpha*y coef(ret)[p] <- list(alpha(ret)[[p]]*yd[reind][svind]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][svind]) ## store Support Vectors ## xmatrix(ret)[p] <- list(xd[svind, svind,drop=FALSE]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) ## store objective function values in vector obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C } } } ## nu classification if(type(ret) == "nu-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) ##xd <- matrix(0,(li+lj),(li+lj)) ##xdi <- 1:(li+lj) <= li ##xd[xdi,rep(TRUE,li+lj)] <- x[indexes[[i]],c(indexes[[i]],indexes[[j]])] ##xd[xdi == FALSE,rep(TRUE,li+lj)] <- x[indexes[[j]],c(indexes[[i]],indexes[[j]])] if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) xdd <- matrix(1,li+lj,1) resv <- .Call(smo_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), #linear term as.integer(ktype), as.integer(1), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), #weightlabl. as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking)) reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] alpha(ret)[p] <- coef(ret)[p] <- list(tmpres[tmpres != 0]) ##store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][tmpres != 0]) ## store Support Vectors ## xmatrix(ret)[p] <- list(xd[tmpres != 0,tmpres != 0,drop=FALSE]) ##save the indexes from all the SV in a vector (use unique!) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) ## store objective function values in vector obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" problem[p] <- list(c(i,j)) param(ret)$nu <- nu ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## Bound constraint C classification if(type(ret) == "C-bsvc"){ if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) xdd <- matrix(rnorm(li+lj),li+lj,1) resv <- .Call(tron_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ja else 0), as.integer(sparse), as.integer(2), as.double(0), ##countc as.integer(ktype), as.integer(5), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), ## cost value of alpha seeding as.double(2), ## step value of alpha seeding as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), ##qpsize as.integer(shrinking)) reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix alpha(ret)[p] <- list(resv[-(li+lj+1)][reind][resv[-(li+lj+1)][reind] > 0]) ## nonzero alpha*y coef(ret)[p] <- list(alpha(ret)[[p]] * yd[reind][resv[-(li+lj+1)][reind] > 0]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][resv[-(li+lj+1)][reind] > 0]) ## store Support Vectors ## xmatrix(ret)[p] <- list(xd[resv > 0 ,resv > 0,drop = FALSE]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- - sapply(coef(ret),sum) ## store objective function values vector obj(ret) <- c(obj(ret), resv[(li+lj+1)]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C } } } ## SPOC multiclass classification if(type(ret) =="spoc-svc") { if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- matrix(x[yd$ix,yd$ix],nrow=dim(x)[1]) count <- 0 xdd <- matrix(1,m,1) resv <- .Call(tron_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(rep(yd$x-1,2)), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(7), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(C), as.double(2), #Cstep as.integer(0), #weightlabel as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking)) reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- t(matrix(resv[-(nclass(ret)*nrow(xdd)+1)],nclass(ret)))[reind,,drop=FALSE] coef(ret) <- lapply(1:nclass(ret), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) names(coef(ret)) <- lev(ret) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which(alpha(ret)[,x]!=0)) ## xmatrix(ret) <- x names(alphaindex(ret)) <- lev(ret) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- 0 obj(ret) <- resv[(nclass(ret)*nrow(xdd)+1)] param(ret)$C <- C } ## KBB multiclass classification if(type(ret) =="kbb-svc") { if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- matrix(x[yd$ix,yd$ix],nrow=dim(x)[1]) count <- sapply(unique(yd$x), function(c) length(yd$x[yd$x==c])) xdd <- matrix(1,m,1) resv <- .Call(tron_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd$x-1), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(8), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking)) reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- matrix(resv[-(nrow(x)*(nclass(ret)-1) + 1)],nrow(x))[reind,,drop=FALSE] coef(ret) <- lapply(1:(nclass(ret)-1), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which((y == x) & (rowSums(alpha(ret))!=0))) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- - sapply(coef(ret),sum) obj(ret) <- resv[(nrow(x)*(nclass(ret)-1) + 1)] param(ret)$C <- C } ## Novelty detection if(type(ret) =="one-svc") { xdd <- matrix(1,m,1) resv <- .Call(smo_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(matrix(rep(1,m))), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(2), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking)) tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) ## xmatrix(ret) <- x[svindex,svindex,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$nu <- nu } ## epsilon regression if(type(ret) =="eps-svr") { xdd <- matrix(1,m,1) resv <- .Call(smo_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(3), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking)) tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) ## xmatrix(ret) <- x[svindex,svindex ,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$C <- C } ## nu regression if(type(ret) =="nu-svr") { xdd <- matrix(1,m,1) resv <- .Call(smo_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(4), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking)) tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) ## xmatrix(ret) <- x[svindex,svindex,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$nu <- nu } ## bound constraint eps regression if(type(ret) =="eps-bsvr") { xdd <- matrix(1,m,1) resv <- .Call(tron_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(2), as.integer(0), as.integer(ktype), as.integer(6), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(0), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking)) tmpres <- resv[-(m+1)] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) ## xmatrix(ret) <- x[svindex,,drop=FALSE] b(ret) <- -sum(alpha(ret)) obj(ret) <- resv[(m+1)] param(ret)$epsilon <- epsilon param(ret)$C <- C } kcall(ret) <- match.call() kernelf(ret) <- " Kernel matrix used as input." ymatrix(ret) <- y SVindex(ret) <- unique(sort(svindex,method="quick")) nSV(ret) <- length(unique(svindex)) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, as.kernelMatrix(x[,SVindex(ret),drop = FALSE])) else NULL if (fit){ if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="one-svc") error(ret) <- sum(!fitted(ret))/m if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr <- split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") { if(is.null(class.weights)) cret <- ksvm(as.kernelMatrix(x[cind,cind]),y[cind],type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) else cret <- ksvm(as.kernelMatrix(x[cind,cind]), as.factor(lev(ret)[y[cind]]),type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="one-svc") { cret <- ksvm(as.kernelMatrix(x[cind,cind]),type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- (1 - sum(cres)/length(cres))/cross + cerror } if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") { cret <- ksvm(as.kernelMatrix(x[cind,cind]),y[cind],type=type(ret), C=C,nu=nu,epsilon=epsilon,tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } prob.model(ret) <- list(NULL) if(prob.model) { if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") { p <- 0 for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(j,i)] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(i,j)] wl <- c(0,1) nweigths <- 2 } } m <- li+lj suppressWarnings(vgr <- split(c(sample(1:li,li),sample((li+1):(li+lj),lj)),1:3)) pres <- yres <- NULL for(k in 1:3) { cind <- unsplit(vgr[-k],factor(rep((1:3)[-k],unlist(lapply(vgr[-k],length))))) if(is.null(class.weights)) cret <- ksvm(as.kernelMatrix(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][cind,cind]),yd[cind],type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache, prob.model=FALSE) else cret <- ksvm(as.kernelMatrix(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][cind,cind]), as.factor(lev(ret)[y[c(indexes[[i]],indexes[[j]])][cind]]),type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache, prob.model=FALSE) yres <- c(yres,yd[vgr[[k]]]) pres <- rbind(pres,predict(cret, as.kernelMatrix(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][vgr[[k]], cind,drop = FALSE][,SVindex(cret),drop = FALSE]),type="decision")) } prob.model(ret)[[p]] <- .probPlatt(pres,yres) } } } if(type(ret) == "eps-svr"||type(ret) == "nu-svr"||type(ret)=="eps-bsvr"){ suppressWarnings(vgr<-split(sample(1:m,m),1:3)) pres <- NULL for(i in 1:3) { cind <- unsplit(vgr[-i],factor(rep((1:3)[-i],unlist(lapply(vgr[-i],length))))) cret <- ksvm(as.kernelMatrix(x[cind,cind]),y[cind],type=type(ret), C=C, nu=nu, epsilon=epsilon, tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind, drop = FALSE][,SVindex(cret), drop = FALSE])) pres <- rbind(pres,predict(cret, as.kernelMatrix(x[vgr[[i]],cind , drop = FALSE][,SVindex(cret) ,drop = FALSE]),type="decision")) } pres[abs(pres) > (5*sd(pres))] <- 0 prob.model(ret) <- list(sum(abs(pres))/dim(pres)[1]) } } return(ret) }) .classAgreement <- function (tab) { n <- sum(tab) if (!is.null(dimnames(tab))) { lev <- intersect(colnames(tab), rownames(tab)) p0 <- sum(diag(tab[lev, lev])) / n } else { m <- min(dim(tab)) p0 <- sum(diag(tab[1:m, 1:m])) / n } return(p0) } ## List Interface setMethod("ksvm",signature(x="list"), function (x, y = NULL, type = NULL, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ... ,na.action = na.omit) { ret <- new("ksvm") if (is.null(y)) x <- na.action(x) n.action(ret) <- na.action sparse <- FALSE if (is.null(type)) type(ret) <- if (is.null(y)) "one-svc" else if (is.factor(y)) "C-svc" else "eps-svr" if(!is.null(type)) type(ret) <- match.arg(type,c("C-svc", "nu-svc", "kbb-svc", "spoc-svc", "C-bsvc", "one-svc", "eps-svr", "eps-bsvr", "nu-svr")) m <- length(x) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","stringdot")) if(is.character(kpar)) if(kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot" || kernel == "rbfdot" || kernel == "laplacedot" ) { stop("List interface supports only the stringdot kernel.") } } if(is(kernel,"kernel") & !is(kernel,"stringkernel")) stop("List interface supports only the stringdot kernel.") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if (!is(y,"vector") && !is.factor(y) & !is(y,"matrix") & !(type(ret)=="one-svc")) stop("y must be a vector or a factor.") if(!(type(ret)=="one-svc")) if(is(y,"vector") | is(y,"factor")) ym <- length(y) else if(is(y,"matrix")) ym <- dim(y)[1] else stop("y must be a matrix or a vector") if ((type(ret) != "one-svc") && ym != m) stop("x and y don't match.") if(nu > 1|| nu <0) stop("nu must be between 0 an 1.") weightlabels <- NULL nweights <- 0 weight <- 0 wl <- 0 ## in case of classification: transform factors into integers if (type(ret) == "one-svc") # one class classification --> set dummy y <- 1 else if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (!is.null(class.weights)) { if (is.null(names (class.weights))) stop ("Weights have to be specified along with their according level names !") weightlabels <- match (names(class.weights),lev(ret)) if (any(is.na(weightlabels))) stop ("At least one level name is missing or misspelled.") } } else { if ((type(ret) =="C-svc" || type(ret) == "nu-svc" ||type(ret) == "C-bsvc" || type(ret) == "spoc-svc" || type(ret) == "kbb-svc") && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if (type(ret) != "eps-svr" || type(ret) != "nu-svr"|| type(ret)!="eps-bsvr") lev(ret) <- sort(unique (y)) } ## initialize if (type(ret) =="C-svc" || type(ret) == "nu-svc" ||type(ret) == "C-bsvc" || type(ret) == "spoc-svc" || type(ret) == "kbb-svc") nclass(ret) <- length (unique(y)) p <- 0 K <- 0 svindex <- problem <- NULL ktype <- 4 prior(ret) <- list(NULL) sigma <- 0.1 degree <- offset <- scale <- 1 ## C classification if(type(ret) == "C-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]])]) xdd <- matrix(1,li+lj,1) resv <- .Call(smo_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), ##linear term as.integer(ktype), as.integer(0), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking)) reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] ## alpha alpha(ret)[p] <- list(tmpres[tmpres > 0]) ## coefficients alpha*y coef(ret)[p] <- list(alpha(ret)[[p]]*yd[reind][tmpres > 0]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][tmpres>0]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]])][reind][tmpres > 0]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) obj(ret) <- c(obj(ret),resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## nu classification if(type(ret) == "nu-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]])]) xdd <- matrix(1,li+lj,1) resv <- .Call(smo_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), #linear term as.integer(ktype), as.integer(1), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), #weightlabl. as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking)) reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] alpha(ret)[p] <- coef(ret)[p] <- list(tmpres[tmpres != 0]) ##store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][tmpres!=0]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]])][reind][tmpres != 0]) ##save the indexes from all the SV in a vector (use unique!) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" problem[p] <- list(c(i,j)) param(ret)$nu <- nu ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## Bound constraint C classification if(type(ret) == "C-bsvc"){ if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]])]) xdd <- matrix(1,li+lj,1) resv <- .Call(tron_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(2), as.double(0), ##countc as.integer(ktype), as.integer(5), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), ## cost value of alpha seeding as.double(2), ## step value of alpha seeding as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), ##qpsize as.integer(shrinking)) reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix alpha(ret)[p] <- list(resv[-(li+lj+1)][reind][resv[-(li+lj+1)][reind] > 0]) ## nonzero alpha*y coef(ret)[p] <- list(alpha(ret)[[p]] * yd[reind][resv[-(li+lj+1)][reind] > 0]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][resv[-(li+lj+1)][reind] > 0]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]])][reind][resv[-(li+lj+1)][reind] > 0]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- - sapply(coef(ret),sum) obj(ret) <- c(obj(ret),resv[(li+lj+1)]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## SPOC multiclass classification if(type(ret) =="spoc-svc") { if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- x[yd$ix] count <- 0 K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call(tron_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(rep(yd$x-1,2)), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(7), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(C), as.double(2), #Cstep as.integer(0), #weightlabel as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking)) reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- t(matrix(resv[-(nclass(ret)*nrow(xdd) + 1)],nclass(ret)))[reind,,drop=FALSE] coef(ret) <- lapply(1:nclass(ret), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) names(coef(ret)) <- lev(ret) alphaindex(ret) <- lapply(1:nclass(ret), function(x) which(alpha(ret)[,x]!=0)) names(alphaindex(ret)) <- lev(ret) xmatrix(ret) <- x svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- 0 obj(ret) <- resv[(nclass(ret)*nrow(xdd) + 1)] param(ret)$C <- C } ## KBB multiclass classification if(type(ret) =="kbb-svc") { if(!is.null(class.weights)) weightedC <- weightlabels * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- x[yd$ix] count <- sapply(unique(yd$x), function(c) length(yd$x[yd$x==c])) K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call(tron_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd$x-1), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(8), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking)) reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- matrix(resv[-((nclass(ret)-1)*length(x)+1)],length(x))[reind,,drop=FALSE] xmatrix(ret) <- x<- x[reind] coef(ret) <- lapply(1:(nclass(ret)-1), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which((y == x) & (rowSums(alpha(ret))!=0))) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- - sapply(coef(ret),sum) obj(ret) <- resv[((nclass(ret)-1)*length(x)+1)] param(ret)$C <- C } ## Novelty detection if(type(ret) =="one-svc") { K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call(smo_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(matrix(rep(1,m))), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(2), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking)) tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres !=0) xmatrix(ret) <- x[svindex] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$nu <- nu } ## epsilon regression if(type(ret) =="eps-svr") { K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call(smo_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(3), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking)) tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$C <- C } ## nu regression if(type(ret) =="nu-svr") { K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call(smo_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(4), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking)) tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$nu <- nu } ## bound constraint eps regression if(type(ret) =="eps-bsvr") { K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call(tron_optim, as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(2), as.integer(0), as.integer(ktype), as.integer(6), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(0), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking)) tmpres <- resv[-(m+1)] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex] b(ret) <- -sum(alpha(ret)) obj(ret) <- resv[(m+1)] param(ret)$epsilon <- epsilon param(ret)$C <- C } kcall(ret) <- match.call() kernelf(ret) <- kernel ymatrix(ret) <- y SVindex(ret) <- unique(svindex) nSV(ret) <- length(unique(svindex)) if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") nclass(ret) <- m if(type(ret)=="one-svc") nclass(ret) <- 1 if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) { if((type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") & nclass(ret) > 2) predict(ret, x) else if((type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc"||type(ret)=="spoc-bsvc"||type(ret)=="kbb-bsvc")) predict(ret,as.kernelMatrix(K[reind,reind][,SVindex(ret), drop=FALSE])) else predict(ret,as.kernelMatrix(K[,SVindex(ret), drop=FALSE])) } else NULL if (fit){ if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="one-svc") error(ret) <- sum(!fitted(ret))/m if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(!((type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") & nclass(ret) > 2)) { if((type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc"||type(ret)=="spoc-bsvc"||type(ret)=="kbb-bsvc")) K <- as.kernelMatrix(K[reind,reind]) if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr <- split(sample(1:dim(K)[1],dim(K)[1]),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") { if(is.null(class.weights)) cret <- ksvm(as.kernelMatrix(K[cind,cind]),y[cind],type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) else cret <- ksvm(as.kernelMatrix(K[cind,cind]),as.factor(lev(ret)[y[cind]]),type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache) cres <- predict(cret, as.kernelMatrix(K[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="one-svc") { cret <- ksvm(as.kernelMatrix(K[cind,cind]), type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) cres <- predict(cret, as.kernelMatrix(K[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- (1 - sum(cres)/length(cres))/cross + cerror } if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") { cret <- ksvm(as.kernelMatrix(K[cind,cind]),y[cind],type=type(ret), C=C,nu=nu,epsilon=epsilon,tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, as.kernelMatrix(K[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } prob.model(ret) <- list(NULL) if(prob.model) { if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") { p <- 0 for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(j,i)] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(i,j)] wl <- c(0,1) nweigths <- 2 } } m <- li+lj suppressWarnings(vgr <- split(c(sample(1:li,li),sample((li+1):(li+lj),lj)),1:3)) pres <- yres <- NULL for(k in 1:3) { cind <- unsplit(vgr[-k],factor(rep((1:3)[-k],unlist(lapply(vgr[-k],length))))) cret <- ksvm(as.kernelMatrix(as.kernelMatrix(K[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][cind,cind])), yd[cind], type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model=FALSE) yres <- c(yres,yd[vgr[[k]]]) pres <- rbind(pres,predict(cret, as.kernelMatrix(K[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][vgr[[k]], cind,drop = FALSE][,SVindex(cret),drop = FALSE]),type="decision")) } prob.model(ret)[[p]] <- .probPlatt(pres,yres) } } } if(type(ret) == "eps-svr"||type(ret) == "nu-svr"||type(ret)=="eps-bsvr"){ suppressWarnings(vgr<-split(sample(1:m,m),1:3)) pres <- NULL for(i in 1:3) { cind <- unsplit(vgr[-i],factor(rep((1:3)[-i],unlist(lapply(vgr[-i],length))))) cret <- ksvm(as.kernelMatrix(K[cind,cind]),y[cind],type=type(ret), C=C, nu=nu, epsilon=epsilon, tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, as.kernelMatrix(K[vgr[[i]], cind, drop = FALSE][,SVindex(cret), drop = FALSE])) pres <- rbind(pres,predict(cret, as.kernelMatrix(K[vgr[[i]],cind , drop = FALSE][,SVindex(cret) ,drop = FALSE]),type="decision")) } pres[abs(pres) > (5*sd(pres))] <- 0 prob.model(ret) <- list(sum(abs(pres))/dim(pres)[1]) } } } else{ if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") { if(is.null(class.weights)) cret <- ksvm(x[cind],y[cind],type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) else cret <- ksvm(x[cind],as.factor(lev(ret)[y[cind]]),type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache) cres <- predict(cret, x[vgr[[i]]]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") { cret <- ksvm(x[cind],y[cind],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]]]) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m)/cross + cerror } } cross(ret) <- cerror } prob.model(ret) <- list(NULL) if(prob.model) { if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") { p <- 0 for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(j,i)] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(i,j)] wl <- c(0,1) nweigths <- 2 } } m <- li+lj suppressWarnings(vgr <- split(c(sample(1:li,li),sample((li+1):(li+lj),lj)),1:3)) pres <- yres <- NULL for(k in 1:3) { cind <- unsplit(vgr[-k],factor(rep((1:3)[-k],unlist(lapply(vgr[-k],length))))) if(is.null(class.weights)) cret <- ksvm(x[c(indexes[[i]], indexes[[j]])][cind],yd[cind],type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache, prob.model=FALSE) else cret <- ksvm(x[c(indexes[[i]], indexes[[j]])][cind],as.factor(lev(ret)[y[cind]]),type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache, prob.model=FALSE) yres <- c(yres,yd[vgr[[k]]]) pres <- rbind(pres,predict(cret, x[c(indexes[[i]], indexes[[j]])][vgr[[k]]],type="decision")) } prob.model(ret)[[p]] <- .probPlatt(pres,yres) } } } if(type(ret) == "eps-svr"||type(ret) == "nu-svr"||type(ret)=="eps-bsvr"){ suppressWarnings(vgr<-split(sample(1:m,m),1:3)) for(i in 1:3) { cind <- unsplit(vgr[-i],factor(rep((1:3)[-i],unlist(lapply(vgr[-i],length))))) cret <- ksvm(x[cind],y[cind],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]]]) pres <- rbind(pres,predict(cret, x[vgr[[i]]],type="decision")) } pres[abs(pres) > (5*sd(pres))] <- 0 prob.model(ret) <- list(sum(abs(pres))/dim(pres)[1]) } } } return(ret) }) ##**************************************************************# ## predict for matrix, data.frame input setMethod("predict", signature(object = "ksvm"), function (object, newdata, type = "response", coupler = "minpair") { type <- match.arg(type,c("response","probabilities","votes","decision")) if (missing(newdata) && type=="response" & !is.null(fitted(object))) return(fitted(object)) else if(missing(newdata)) stop("Missing data !") if(!is(newdata,"list")){ if (!is.null(terms(object)) & !is(newdata,"kernelMatrix")) { if(!is.matrix(newdata)) newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = n.action(object)) } else newdata <- if (is.vector(newdata)) t(t(newdata)) else as.matrix(newdata) newnrows <- nrow(newdata) newncols <- ncol(newdata) if(!is(newdata,"kernelMatrix") && !is.null(xmatrix(object))){ if(is(xmatrix(object),"list") && is(xmatrix(object)[[1]],"matrix")) oldco <- ncol(xmatrix(object)[[1]]) if(is(xmatrix(object),"matrix")) oldco <- ncol(xmatrix(object)) if (oldco != newncols) stop ("test vector does not match model !") } } else newnrows <- length(newdata) p <- 0 if (is.list(scaling(object))) newdata[,scaling(object)$scaled] <- scale(newdata[,scaling(object)$scaled, drop = FALSE], center = scaling(object)$x.scale$"scaled:center", scale = scaling(object)$x.scale$"scaled:scale") if(type == "response" || type =="decision" || type=="votes") { if(type(object)=="C-svc"||type(object)=="nu-svc"||type(object)=="C-bsvc") { predres <- 1:newnrows if(type=="decision") votematrix <- matrix(0,nclass(object)*(nclass(object)-1)/2,newnrows) else votematrix <- matrix(0,nclass(object),newnrows) for(i in 1:(nclass(object)-1)) { jj <- i+1 for(j in jj:nclass(object)) { p <- p+1 if(is(newdata,"kernelMatrix")) ret <- newdata[,which(SVindex(object)%in%alphaindex(object)[[p]]), drop=FALSE] %*% coef(object)[[p]] - b(object)[p] else ret <- kernelMult(kernelf(object),newdata,xmatrix(object)[[p]],coef(object)[[p]]) - b(object)[p] if(type=="decision") votematrix[p,] <- ret else{ votematrix[i,ret<0] <- votematrix[i,ret<0] + 1 votematrix[j,ret>0] <- votematrix[j,ret>0] + 1 } } } if(type == "decision") predres <- t(votematrix) else predres <- sapply(predres, function(x) which.max(votematrix[,x])) } if(type(object) == "spoc-svc") { predres <- 1:newnrows votematrix <- matrix(0,nclass(object),newnrows) for(i in 1:nclass(object)){ if(is(newdata,"kernelMatrix")) votematrix[i,] <- newdata[,which(SVindex(object)%in%alphaindex(object)[[i]]), drop=FALSE] %*% coef(object)[[i]] else if (is(newdata,"list")) votematrix[i,] <- kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]]],coef(object)[[i]]) else votematrix[i,] <- kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]],,drop=FALSE],coef(object)[[i]]) } predres <- sapply(predres, function(x) which.max(votematrix[,x])) } if(type(object) == "kbb-svc") { predres <- 1:newnrows votematrix <- matrix(0,nclass(object),newnrows) A <- rowSums(alpha(object)) for(i in 1:nclass(object)) { for(k in (1:i)[-i]) if(is(newdata,"kernelMatrix")) votematrix[k,] <- votematrix[k,] - (newdata[,which(SVindex(object)%in%alphaindex(object)[[i]]), drop=FALSE] %*% alpha(object)[,k][alphaindex(object)[[i]]] + sum(alpha(object)[,k][alphaindex(object)[[i]]])) else if (is(newdata,"list")) votematrix[k,] <- votematrix[k,] - (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]]],alpha(object)[,k][alphaindex(object)[[i]]]) + sum(alpha(object)[,k][alphaindex(object)[[i]]])) else votematrix[k,] <- votematrix[k,] - (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]],,drop=FALSE],alpha(object)[,k][alphaindex(object)[[i]]]) + sum(alpha(object)[,k][alphaindex(object)[[i]]])) if(is(newdata,"kernelMatrix")) votematrix[i,] <- votematrix[i,] + (newdata[,which(SVindex(object)%in%alphaindex(object)[[i]]), drop=FALSE] %*% A[alphaindex(object)[[i]]] + sum(A[alphaindex(object)[[i]]])) else if (is(newdata,"list")) votematrix[i,] <- votematrix[i,] + (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]]],A[alphaindex(object)[[i]]]) + sum(A[alphaindex(object)[[i]]])) else votematrix[i,] <- votematrix[i,] + (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]],,drop=FALSE],A[alphaindex(object)[[i]]]) + sum(A[alphaindex(object)[[i]]])) if(i <= (nclass(object)-1)) for(kk in i:(nclass(object)-1)) if(is(newdata,"kernelMatrix")) votematrix[kk+1,] <- votematrix[kk+1,] - (newdata[,which(SVindex(object)%in%alphaindex(object)[[i]]), drop=FALSE] %*% alpha(object)[,kk][alphaindex(object)[[i]]] + sum(alpha(object)[,kk][alphaindex(object)[[i]]])) else if (is(newdata,"list")) votematrix[kk+1,] <- votematrix[kk+1,] - (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]]],alpha(object)[,kk][alphaindex(object)[[i]]]) + sum(alpha(object)[,kk][alphaindex(object)[[i]]])) else votematrix[kk+1,] <- votematrix[kk+1,] - (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]],,drop=FALSE],alpha(object)[,kk][alphaindex(object)[[i]]]) + sum(alpha(object)[,kk][alphaindex(object)[[i]]])) } predres <- sapply(predres, function(x) which.max(votematrix[,x])) } } if(type == "probabilities") { if(is.null(prob.model(object)[[1]])) stop("ksvm object contains no probability model. Make sure you set the paramater prob.model in ksvm during training.") if(type(object)=="C-svc"||type(object)=="nu-svc"||type(object)=="C-bsvc") { binprob <- matrix(0, newnrows, nclass(object)*(nclass(object) - 1)/2) for(i in 1:(nclass(object)-1)) { jj <- i+1 for(j in jj:nclass(object)) { p <- p+1 if(is(newdata,"kernelMatrix")) binprob[,p] <- 1 - .SigmoidPredict(as.vector(newdata[,which(SVindex(object)%in%alphaindex(object)[[p]]), drop=FALSE] %*% coef(object)[[p]] - b(object)[p]), prob.model(object)[[p]]$A, prob.model(object)[[p]]$B) else binprob[,p] <- 1 - .SigmoidPredict(as.vector(kernelMult(kernelf(object),newdata,xmatrix(object)[[p]],coef(object)[[p]]) - b(object)[p]), prob.model(object)[[p]]$A, prob.model(object)[[p]]$B) } } multiprob <- couple(binprob, coupler = coupler) } else stop("probability estimates only supported for C-svc, C-bsvc and nu-svc") } if(type(object) == "one-svc") { if(is(newdata,"kernelMatrix")) ret <- newdata %*% coef(object) - b(object) else ret <- kernelMult(kernelf(object),newdata,xmatrix(object),coef(object)) - b(object) ##one-class-classification: return TRUE/FALSE (probabilities ?) if(type=="decision") return(ret) else { ret[ret>0]<-1 return(ret == 1) } } else { if(type(object)=="eps-svr"||type(object)=="nu-svr"||type(object)=="eps-bsvr") { if(is(newdata,"kernelMatrix")) predres <- newdata %*% coef(object) - b(object) else predres <- kernelMult(kernelf(object),newdata,xmatrix(object),coef(object)) - b(object) } else { ##classification & votes : return votematrix if(type == "votes") return(votematrix) ##classification & probabilities : return probability matrix if(type == "probabilities") { colnames(multiprob) <- lev(object) return(multiprob) } if(is.numeric(lev(object)) && type == "response") return(lev(object)[predres]) if (is.character(lev(object)) && type!="decision") { ##classification & type response: return factors if(type == "response") return(factor (lev(object)[predres], levels = lev(object))) } } } if (!is.null(scaling(object)$y.scale) & !is(newdata,"kernelMatrix") & !is(newdata,"list")) ## return raw values, possibly scaled back return(predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center") else ##else: return raw values return(predres) }) #****************************************************************************************# setMethod("show","ksvm", function(object){ cat("Support Vector Machine object of class \"ksvm\"","\n") cat("\n") cat(paste("SV type:", type(object))) switch(type(object), "C-svc" = cat(paste(" (classification)", "\n")), "nu-svc" = cat(paste(" (classification)", "\n")), "C-bsvc" = cat(paste(" (classification)", "\n")), "one-svc" = cat(paste(" (novelty detection)", "\n")), "spoc-svc" = cat(paste(" (classification)", "\n")), "kbb-svc" = cat(paste(" (classification)", "\n")), "eps-svr" = cat(paste(" (regression)","\n")), "nu-svr" = cat(paste(" (regression)","\n")) ) switch(type(object), "C-svc" = cat(paste(" parameter : cost C =",param(object)$C, "\n")), "nu-svc" = cat(paste(" parameter : nu =", param(object)$nu, "\n")), "C-bsvc" = cat(paste(" parameter : cost C =",param(object)$C, "\n")), "one-svc" = cat(paste(" parameter : nu =", param(object)$nu, "\n")), "spoc-svc" = cat(paste(" parameter : cost C =",param(object)$C, "\n")), "kbb-svc" = cat(paste(" parameter : cost C =",param(object)$C, "\n")), "eps-svr" = cat(paste(" parameter : epsilon =",param(object)$epsilon, " cost C =", param(object)$C,"\n")), "nu-svr" = cat(paste(" parameter : epsilon =", param(object)$epsilon, " nu =", param(object)$nu,"\n")) ) cat("\n") show(kernelf(object)) cat(paste("\nNumber of Support Vectors :", nSV(object),"\n")) cat("\nObjective Function Value :", round(obj(object),4),"\n") ## if(type(object)=="C-svc" || type(object) == "nu-svc") ## cat(paste("Margin width :",margin(object),"\n")) if(!is.null(fitted(object))) cat(paste("Training error :", round(error(object),6),"\n")) if(cross(object)!= -1) cat("Cross validation error :",round(cross(object),6),"\n") if(!is.null(prob.model(object)[[1]])&&(type(object)=="eps-svr" ||type(object)=="nu-svr"||type(object)=="eps-bsvr")) cat("Laplace distr. width :",round(prob.model(object)[[1]],6),"\n") if(!is.null(prob.model(object)[[1]]) & (type(object) == "C-svc"| type(object) == "nu-svc"| type(object) == "C-bsvc")) cat("Probability model included.","\n") ##train error & loss }) setMethod("plot", signature(x = "ksvm", y = "missing"), function(x, data = NULL, grid = 50, slice = list(), ...) { if (type(x) =="C-svc" || type(x) == "nu-svc") { if(nclass(x) > 2) stop("plot function only supports binary classification") if (!is.null(terms(x))&&!is.null(data)) { if(!is.matrix(data)) sub <- model.matrix(delete.response(terms(x)), as.data.frame(data), na.action = n.action(x)) } else if(!is.null(data)) sub <- as.matrix(data) else sub <- xmatrix(x)[[1]] ## sub <- sub[,!colnames(xmatrix(x)[[1]])%in%names(slice)] xr <- seq(min(sub[,2]), max(sub[,2]), length = grid) yr <- seq(min(sub[,1]), max(sub[,1]), length = grid) sc <- 0 # if(is.null(data)) # { # sc <- 1 # data <- xmatrix(x)[[1]] # } if(is.data.frame(data) || !is.null(terms(x))){ lis <- c(list(yr), list(xr), slice) names(lis)[1:2] <- setdiff(colnames(sub),names(slice)) new <- expand.grid(lis)[,labels(terms(x))] } else new <- expand.grid(xr,yr) if(sc== 1) scaling(x) <- NULL preds <- predict(x, new ,type = "decision") if(is.null(terms(x))) xylb <- colnames(sub) else xylb <- names(lis) lvl <- 37 mymax <- max(abs(preds)) mylevels <- pretty(c(0, mymax), 15) nl <- length(mylevels)-2 mycols <- c(hcl(0, 100 * (nl:0/nl)^1.3, 90 - 40 *(nl:0/nl)^1.3), rev(hcl(260, 100 * (nl:0/nl)^1.3, 90 - 40 *(nl:0/nl)^1.3))) mylevels <- c(-rev(mylevels[-1]), mylevels) index <- max(which(mylevels < min(preds))):min(which(mylevels > max(preds))) mycols <- mycols[index] mylevels <- mylevels[index] #FIXME# previously the plot code assumed that the y values are either #FIXME# -1 or 1, but this is not generally true. If generated from a #FIXME# factor, they are typically 1 and 2. Maybe ymatrix should be #FIXME# changed? ymat <- ymatrix(x) ymean <- mean(unique(ymat)) filled.contour(xr, yr, matrix(as.numeric(preds), nrow = length(xr), byrow = TRUE), col = mycols, levels = mylevels, plot.axes = { axis(1) axis(2) if(!is.null(data)){ points(sub[-SVindex(x),2], sub[-SVindex(x),1], pch = ifelse(ymat[-SVindex(x)] < ymean, 2, 1)) points(sub[SVindex(x),2], sub[SVindex(x),1], pch = ifelse(ymat[SVindex(x)] < ymean, 17, 16))} else{ ## points(sub[-SVindex(x),], pch = ifelse(ymat[-SVindex(x)] < ymean, 2, 1)) points(sub, pch = ifelse(ymat[SVindex(x)] < ymean, 17, 16)) }}, nlevels = lvl, plot.title = title(main = "SVM classification plot", xlab = xylb[2], ylab = xylb[1]), ... ) } else { stop("Only plots of classification ksvm objects supported") } }) setGeneric(".probPlatt", function(deci, yres) standardGeneric(".probPlatt")) setMethod(".probPlatt",signature(deci="ANY"), function(deci,yres) { if (is.matrix(deci)) deci <- as.vector(deci) if (!is.vector(deci)) stop("input should be matrix or vector") yres <- as.vector(yres) ## Create label and count priors boolabel <- yres >= 0 prior1 <- sum(boolabel) m <- length(yres) prior0 <- m - prior1 ## set parameters (should be on the interface I guess) maxiter <- 100 minstep <- 1e-10 sigma <- 1e-3 eps <- 1e-5 ## Construct target support hiTarget <- (prior1 + 1)/(prior1 + 2) loTarget <- 1/(prior0 + 2) length <- prior1 + prior0 t <- rep(loTarget, m) t[boolabel] <- hiTarget ##Initial Point & Initial Fun Value A <- 0 B <- log((prior0 + 1)/(prior1 + 1)) fval <- 0 fApB <- deci*A + B bindex <- fApB >= 0 p <- q <- rep(0,m) fval <- sum(t[bindex]*fApB[bindex] + log(1 + exp(-fApB[bindex]))) fval <- fval + sum((t[!bindex] - 1)*fApB[!bindex] + log(1+exp(fApB[!bindex]))) for (it in 1:maxiter) { h11 <- h22 <- sigma h21 <- g1 <- g2 <- 0 fApB <- deci*A + B bindex <- fApB >= 0 p[bindex] <- exp(-fApB[bindex])/(1 + exp(-fApB[bindex])) q[bindex] <- 1/(1+exp(-fApB[bindex])) bindex <- fApB < 0 p[bindex] <- 1/(1 + exp(fApB[bindex])) q[bindex] <- exp(fApB[bindex])/(1 + exp(fApB[bindex])) d2 <- p*q h11 <- h11 + sum(d2*deci^2) h22 <- h22 + sum(d2) h21 <- h21 + sum(deci*d2) d1 <- t - p g1 <- g1 + sum(deci*d1) g2 <- g2 + sum(d1) ## Stopping Criteria if (abs(g1) < eps && abs(g2) < eps) break ## Finding Newton Direction -inv(t(H))%*%g det <- h11*h22 - h21^2 dA <- -(h22*g1 - h21*g2) / det dB <- -(-h21*g1 + h11*g2) / det gd <- g1*dA + g2*dB ## Line Search stepsize <- 1 while(stepsize >= minstep) { newA <- A + stepsize * dA newB <- B + stepsize * dB ## New function value newf <- 0 fApB <- deci * newA + newB bindex <- fApB >= 0 newf <- sum(t[bindex] * fApB[bindex] + log(1 + exp(-fApB[bindex]))) newf <- newf + sum((t[!bindex] - 1)*fApB[!bindex] + log(1 + exp(fApB[!bindex]))) ## Check decrease if (newf < (fval + 0.0001 * stepsize * gd)) { A <- newA B <- newB fval <- newf break } else stepsize <- stepsize/2 } if (stepsize < minstep) { cat("line search fails", A, B, g1, g2, dA, dB, gd) ret <- .SigmoidPredict(deci, A, B) return(ret) } } if(it >= maxiter -1) cat("maximum number of iterations reached",g1,g2) ret <- list(A=A, B=B) return(ret) }) ## Sigmoid predict function .SigmoidPredict <- function(deci, A, B) { fApB <- deci*A +B k <- length(deci) ret <- rep(0,k) bindex <- fApB >= 0 ret[bindex] <- exp(-fApB[bindex])/(1 + exp(-fApB[bindex])) ret[!bindex] <- 1/(1 + exp(fApB[!bindex])) return(ret) } kernlab/R/kernels.R0000644000175100001440000026556713271622147013727 0ustar hornikusers## kernel functions ## Functions for computing a kernel value, matrix, matrix-vector ## product and quadratic form ## ## author : alexandros karatzoglou ## Define the kernel objects, ## functions with an additional slot for the kernel parameter list. ## kernel functions take two vector arguments and return a scalar (dot product) rbfdot<- function(sigma=1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") return(exp(sigma*(2*crossprod(x,y) - crossprod(x) - crossprod(y)))) # sigma/2 or sigma ?? } } return(new("rbfkernel",.Data=rval,kpar=list(sigma=sigma))) } setClass("rbfkernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) laplacedot<- function(sigma=1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") return(exp(-sigma*sqrt(-(round(2*crossprod(x,y) - crossprod(x) - crossprod(y),9))))) } } return(new("laplacekernel",.Data=rval,kpar=list(sigma=sigma))) } setClass("laplacekernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) besseldot<- function(sigma = 1, order = 1, degree = 1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") lim <- 1/(gamma(order+1)*2^(order)) bkt <- sigma*sqrt(-(2*crossprod(x,y) - crossprod(x) - crossprod(y))) if(bkt < 10e-5) res <- lim else res <- besselJ(bkt,order)*(bkt^(-order)) return((res/lim)^degree) } } return(new("besselkernel",.Data=rval,kpar=list(sigma=sigma ,order = order ,degree = degree))) } setClass("besselkernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) anovadot<- function(sigma = 1, degree = 1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") res <- sum(exp(- sigma * (x - y)^2)) return((res)^degree) } } return(new("anovakernel",.Data=rval,kpar=list(sigma=sigma ,degree = degree))) } setClass("anovakernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) splinedot<- function() { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") minv <- pmin(x,y) res <- 1 + x*y*(1+minv) - ((x+y)/2)*minv^2 + (minv^3)/3 fres <- prod(res) return(fres) } } return(new("splinekernel",.Data=rval,kpar=list())) } setClass("splinekernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) fourierdot <- function(sigma = 1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") res <- (1 - sigma^2)/2*(1 - 2*sigma*cos(x - y) + sigma^2) fres <- prod(res) return(fres) } } return(new("fourierkernel",.Data=rval,kpar=list())) } setClass("fourierkernel",prototype=structure(.Data=function(){},kpar=list(sigma = 1)),contains=c("kernel")) tanhdot <- function(scale = 1, offset = 1) { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y)){ tanh(scale*crossprod(x)+offset) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") tanh(scale*crossprod(x,y)+offset) } } return(new("tanhkernel",.Data=rval,kpar=list(scale=scale,offset=offset))) } setClass("tanhkernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) setClass("polykernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) polydot <- function(degree = 1, scale = 1, offset = 1) { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y)){ (scale*crossprod(x)+offset)^degree } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") (scale*crossprod(x,y)+offset)^degree } } return(new("polykernel",.Data=rval,kpar=list(degree=degree,scale=scale,offset=offset))) } setClass("vanillakernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) vanilladot <- function( ) { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y)){ crossprod(x) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") crossprod(x,y) } } return(new("vanillakernel",.Data=rval,kpar=list())) } setClass("stringkernel",prototype=structure(.Data=function(){},kpar=list(length = 4, lambda = 1.1, type = "spectrum", normalized = TRUE)),contains=c("kernel")) stringdot <- function(length = 4, lambda = 1.1, type = "spectrum", normalized = TRUE) { type <- match.arg(type,c("sequence","string","fullstring","exponential","constant","spectrum", "boundrange")) ## need to do this to set the length parameters if(type == "spectrum" | type == "boundrange") lambda <- length switch(type, "sequence" = { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y) && normalized == FALSE) return(.Call(subsequencek, as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda))) if (is(x,"vector") && is(y,"vector") && normalized == FALSE) return(.Call(subsequencek, as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda))) if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call(subsequencek, as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda)) / sqrt(.Call(subsequencek, as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda)) * .Call(subsequencek, as.character(y), as.character(y), as.integer(nchar(y)), as.integer(nchar(y)), as.integer(length), as.double(lambda)))) } }, "exponential" = { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") if (normalized == FALSE){ if(is.null(y)) y <- x return(.Call(stringtv, as.character(x), as.character(y), as.integer(1), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(2), as.double(lambda))) } if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call(stringtv, as.character(x), as.character(y), as.integer(1), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(2), as.double(lambda)) / sqrt(.Call(stringtv, as.character(x), as.character(x), as.integer(1), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(2), as.double(lambda)) * .Call(stringtv, as.character(y), as.character(y), as.integer(1), as.integer(nchar(y)), as.integer(nchar(y)), as.integer(2), as.double(lambda)))) } }, "constant" = { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") if (normalized == FALSE){ if(is.null(y)) y <- x return(.Call(stringtv, as.character(x), as.character(y), as.integer(1), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(1), as.double(lambda))) } if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call(stringtv, as.character(x), as.character(y), as.integer(1), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(1), as.double(lambda)) / sqrt(.Call(stringtv, as.character(x), as.character(x), as.integer(1), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(1), as.double(lambda)) * .Call(stringtv, as.character(y), as.character(y), as.integer(1), as.integer(nchar(y)), as.integer(nchar(y)), as.integer(1), as.double(lambda)))) } }, "spectrum" = { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") n <- nchar(x) m <- nchar(y) if(n < length | m < length){ warning("String length smaller than length parameter value") return(0)} if (normalized == FALSE){ if(is.null(y)) y <- x return(.Call(stringtv, as.character(x), as.character(y), as.integer(1), as.integer(n), as.integer(m), as.integer(3), as.double(length))) } if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call(stringtv, as.character(x), as.character(y), as.integer(1), as.integer(n), as.integer(m), as.integer(3), as.double(length)) / sqrt(.Call(stringtv, as.character(x), as.character(x), as.integer(1), as.integer(n), as.integer(n), as.integer(3), as.double(lambda)) * .Call(stringtv, as.character(y), as.character(y), as.integer(1), as.integer(m), as.integer(m), as.integer(3), as.double(length)))) } }, "boundrange" = { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") if (normalized == FALSE){ if(is.null(y)) y <- x return(.Call(stringtv, as.character(x), as.character(y), as.integer(1), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(4), as.double(lambda))) } if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call(stringtv, as.character(x), as.character(y), as.integer(1), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(4), as.double(lambda)) / sqrt(.Call(stringtv, as.character(x), as.character(x), as.integer(1), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(4), as.double(lambda)) * .Call(stringtv, as.character(y), as.character(y), as.integer(1), as.integer(nchar(y)), as.integer(nchar(y)), as.integer(4), as.double(lambda)))) } }, "string" = { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y) && normalized == FALSE) return(.Call(substringk, as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda))) if (is(x,"vector") && is(y,"vector") && normalized == FALSE) return(.Call(substringk, as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda))) if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call(substringk, as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda)) / sqrt(.Call(substringk, as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda)) * .Call(substringk, as.character(y), as.character(y), as.integer(nchar(y)), as.integer(nchar(y)), as.integer(length), as.double(lambda)))) } }, "fullstring" = { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y) && normalized == FALSE) return(.Call(fullsubstringk, as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda))) if (is(x,"vector") && is(y,"vector") && normalized == FALSE) return(.Call(fullsubstringk, as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda))) if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call(fullsubstringk, as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda)) / sqrt(.Call(fullsubstringk, as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda)) * .Call(fullsubstringk, as.character(y), as.character(y), as.integer(nchar(y)), as.integer(nchar(y)), as.integer(length), as.double(lambda)))) } }) return(new("stringkernel",.Data=rval,kpar=list(length=length, lambda =lambda, type = type, normalized = normalized))) } ## show method for kernel functions setMethod("show",signature(object="kernel"), function(object) { switch(class(object), "rbfkernel" = cat(paste("Gaussian Radial Basis kernel function.", "\n","Hyperparameter :" ,"sigma = ", kpar(object)$sigma,"\n")), "laplacekernel" = cat(paste("Laplace kernel function.", "\n","Hyperparameter :" ,"sigma = ", kpar(object)$sigma,"\n")), "besselkernel" = cat(paste("Bessel kernel function.", "\n","Hyperparameter :" ,"sigma = ", kpar(object)$sigma,"order = ",kpar(object)$order, "degree = ", kpar(object)$degree,"\n")), "anovakernel" = cat(paste("Anova RBF kernel function.", "\n","Hyperparameter :" ,"sigma = ", kpar(object)$sigma, "degree = ", kpar(object)$degree,"\n")), "tanhkernel" = cat(paste("Hyperbolic Tangent kernel function.", "\n","Hyperparameters :","scale = ", kpar(object)$scale," offset = ", kpar(object)$offset,"\n")), "polykernel" = cat(paste("Polynomial kernel function.", "\n","Hyperparameters :","degree = ",kpar(object)$degree," scale = ", kpar(object)$scale," offset = ", kpar(object)$offset,"\n")), "vanillakernel" = cat(paste("Linear (vanilla) kernel function.", "\n")), "splinekernel" = cat(paste("Spline kernel function.", "\n")), "stringkernel" = { if(kpar(object)$type =="spectrum" | kpar(object)$type =="boundrange") cat(paste("String kernel function.", " Type = ", kpar(object)$type, "\n","Hyperparameters :","sub-sequence/string length = ",kpar(object)$length, "\n")) else if(kpar(object)$type =="exponential" | kpar(object)$type =="constant") cat(paste("String kernel function.", " Type = ", kpar(object)$type, "\n","Hyperparameters :"," lambda = ", kpar(object)$lambda, "\n")) else cat(paste("String kernel function.", " Type = ", kpar(object)$type, "\n","Hyperparameters :","sub-sequence/string length = ",kpar(object)$length," lambda = ", kpar(object)$lambda, "\n")) if(kpar(object)$normalized == TRUE) cat(" Normalized","\n") if(kpar(object)$normalized == FALSE) cat(" Not Normalized","\n")} ) }) ## create accesor function as in "S4 Classses in 15 pages more or less", well.. if (!isGeneric("kpar")){ if (is.function(kpar)) fun <- kpar else fun <- function(object) standardGeneric("kpar") setGeneric("kpar",fun) } setMethod("kpar","kernel", function(object) object@kpar) ## Functions that return usefull kernel calculations (kernel matrix etc.) ## kernelMatrix function takes two or three arguments kernelMatrix <- function(kernel, x, y=NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(x,"matrix")) stop("x must be a matrix") if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") n <- nrow(x) res1 <- matrix(rep(0,n*n), ncol = n) if(is.null(y)){ for(i in 1:n) { for(j in i:n) { res1[i,j] <- kernel(x[i,],x[j,]) } } res1 <- res1 + t(res1) diag(res1) <- diag(res1)/2 } if (is(y,"matrix")){ m<-dim(y)[1] res1 <- matrix(0,dim(x)[1],dim(y)[1]) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[i,],y[j,]) } } } return(as.kernelMatrix(res1)) } setGeneric("kernelMatrix",function(kernel, x, y = NULL) standardGeneric("kernelMatrix")) kernelMatrix.rbfkernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- rowSums(x*x)/2 if (is(x,"matrix") && is.null(y)){ res <- crossprod(t(x)) for (i in 1:n) res[i,]<- exp(2*sigma*(res[i,] - dota - rep(dota[i],n))) return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m) res[,i]<- exp(2*sigma*(res[,i] - dota - rep(dotb[i],n))) return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="rbfkernel"),kernelMatrix.rbfkernel) kernelMatrix.laplacekernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- rowSums(x*x)/2 if (is(x,"matrix") && is.null(y)){ res <- crossprod(t(x)) for (i in 1:n) res[i,]<- exp(-sigma*sqrt(round(-2*(res[i,] - dota - rep(dota[i],n)),9))) return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m) res[,i]<- exp(-sigma*sqrt(round(-2*(res[,i] - dota - rep(dotb[i],n)),9))) return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="laplacekernel"),kernelMatrix.laplacekernel) kernelMatrix.besselkernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma nu = kpar(kernel)$order ni = kpar(kernel)$degree n <- dim(x)[1] lim <- 1/(gamma(nu+1)*2^(nu)) dota <- rowSums(x*x)/2 if (is(x,"matrix") && is.null(y)){ res <- crossprod(t(x)) for (i in 1:n){ xx <- sigma*sqrt(round(-2*(res[i,] - dota - rep(dota[i],n)),9)) res[i,] <- besselJ(xx,nu)*(xx^(-nu)) res[i,which(xx<10e-5)] <- lim } return(as.kernelMatrix((res/lim)^ni)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m){ xx <- sigma*sqrt(round(-2*(res[,i] - dota - rep(dotb[i],n)),9)) res[,i] <- besselJ(xx,nu)*(xx^(-nu)) res[which(xx<10e-5),i] <- lim } return(as.kernelMatrix((res/lim)^ni)) } } setMethod("kernelMatrix",signature(kernel="besselkernel"),kernelMatrix.besselkernel) kernelMatrix.anovakernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma degree = kpar(kernel)$degree n <- dim(x)[1] if (is(x,"matrix") && is.null(y)){ a <- matrix(0, dim(x)[2], n) res <- matrix(0, n ,n) for (i in 1:n) { a[rep(TRUE,dim(x)[2]), rep(TRUE,n)] <- x[i,] res[i,]<- colSums(exp( - sigma*(a - t(x))^2))^degree } return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] b <- matrix(0, dim(x)[2],m) res <- matrix(0, dim(x)[1],m) for( i in 1:n) { b[rep(TRUE,dim(x)[2]), rep(TRUE,m)] <- x[i,] res[i,]<- colSums(exp( - sigma*(b - t(y))^2))^degree } return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="anovakernel"),kernelMatrix.anovakernel) kernelMatrix.polykernel <- function(kernel, x, y = NULL) { if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") scale = kpar(kernel)$scale offset = kpar(kernel)$offset degree = kpar(kernel)$degree if (is(x,"matrix") && is.null(y)) { res <- (scale*crossprod(t(x))+offset)^degree return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") res <- (scale*crossprod(t(x),t(y)) + offset)^degree return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="polykernel"),kernelMatrix.polykernel) kernelMatrix.vanilla <- function(kernel, x, y = NULL) { if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") if (is(x,"matrix") && is.null(y)){ res <- crossprod(t(x)) return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") res <- crossprod(t(x),t(y)) return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="vanillakernel"),kernelMatrix.vanilla) kernelMatrix.tanhkernel <- function(kernel, x, y = NULL) { if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") if (is(x,"matrix") && is.null(y)){ scale = kpar(kernel)$scale offset = kpar(kernel)$offset res <- tanh(scale*crossprod(t(x)) + offset) return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") res <- tanh(scale*crossprod(t(x),t(y)) + offset) return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="tanhkernel"),kernelMatrix.tanhkernel) kernelMatrix.splinekernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma degree = kpar(kernel)$degree n <- dim(x)[1] if (is(x,"matrix") && is.null(y)){ a <- matrix(0, dim(x)[2], n) res <- matrix(0, n ,n) x <- t(x) for (i in 1:n) { dr <- x + x[,i] dp <- x * x[,i] dm <- pmin(x,x[,i]) res[i,] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] b <- matrix(0, dim(x)[2],m) res <- matrix(0, dim(x)[1],m) x <- t(x) y <- t(y) for( i in 1:n) { dr <- y + x[,i] dp <- y * x[,i] dm <- pmin(y,x[,i]) res[i,] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="splinekernel"),kernelMatrix.splinekernel) kernelMatrix.stringkernel <- function(kernel, x, y=NULL) { n <- length(x) res1 <- matrix(rep(0,n*n), ncol = n) normalized = kpar(kernel)$normalized if(is(x,"list")) x <- sapply(x,paste,collapse="") if(is(y,"list")) y <- sapply(y,paste,collapse="") if (kpar(kernel)$type == "sequence" |kpar(kernel)$type == "string"|kpar(kernel)$type == "fullstring") { resdiag <- rep(0,n) if(normalized == TRUE) kernel <- stringdot(length = kpar(kernel)$length, type = kpar(kernel)$type, lambda = kpar(kernel)$lambda, normalized = FALSE) ## y is null if(is.null(y)){ if(normalized == TRUE){ ## calculate diagonal elements first, and use them to normalize for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- kernel(x[[i]],x[[j]])/sqrt(resdiag[i]*resdiag[j]) } } res1 <- res1 + t(res1) diag(res1) <- rep(1,n) } else{ for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- kernel(x[[i]],x[[j]]) } } res1 <- res1 + t(res1) diag(res1) <- resdiag } } if (!is.null(y)){ m <- length(y) res1 <- matrix(0,n,m) resdiag1 <- rep(0,m) if(normalized == TRUE){ for(i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:m) resdiag1[i] <- kernel(y[[i]],y[[i]]) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[[i]],y[[j]])/sqrt(resdiag[i]*resdiag1[j]) } } } else{ for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[[i]],y[[j]]) } } } } return(as.kernelMatrix(res1)) } else { switch(kpar(kernel)$type, "exponential" = sktype <- 2, "constant" = sktype <- 1, "spectrum" = sktype <- 3, "boundrange" = sktype <- 4) if(sktype==3 &(any(nchar(x) < kpar(kernel)$length)|any(nchar(x) < kpar(kernel)$length))) stop("spectral kernel does not accept strings shorter than the length parameter") if(is(x,"list")) x <- unlist(x) if(is(y,"list")) y <- unlist(y) x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") if(is.null(y)) ret <- matrix(0, length(x),length(x)) else ret <- matrix(0,length(x),length(y)) if(is.null(y)){ for(i in 1:length(x)) ret[i,i:length(x)] <- .Call(stringtv, as.character(x[i]), as.character(x[i:length(x)]), as.integer(length(x) - i + 1), as.integer(nchar(x[i])), as.integer(nchar(x[i:length(x)])), as.integer(sktype), as.double(kpar(kernel)$lambda)) ret <- ret + t(ret) diag(ret) <- diag(ret)/2 } else for(i in 1:length(x)) ret[i,] <- .Call(stringtv, as.character(x[i]), as.character(y), as.integer(length(y)), as.integer(nchar(x[i])), as.integer(nchar(y)), as.integer(sktype), as.double(kpar(kernel)$lambda)) if(normalized == TRUE){ if(is.null(y)) ret <- t((1/sqrt(diag(ret)))*t(ret*(1/sqrt(diag(ret))))) else{ norm1 <- rep(0,length(x)) norm2 <- rep(0,length(y)) for( i in 1:length(x)) norm1[i] <- .Call(stringtv, as.character(x[i]), as.character(x[i]), as.integer(1), as.integer(nchar(x[i])), as.integer(nchar(x[i])), as.integer(sktype), as.double(kpar(kernel)$lambda)) for( i in 1:length(y)) norm2[i] <- .Call(stringtv, as.character(y[i]), as.character(y[i]), as.integer(1), as.integer(nchar(y[i])), as.integer(nchar(y[i])), as.integer(sktype), as.double(kpar(kernel)$lambda)) ret <- t((1/sqrt(norm2))*t(ret*(1/sqrt(norm1)))) } } } return(as.kernelMatrix(ret)) } setMethod("kernelMatrix",signature(kernel="stringkernel"),kernelMatrix.stringkernel) ## kernelMult computes kernel matrix - vector product ## function computing * z ( %*% z) kernelMult <- function(kernel, x, y=NULL, z, blocksize = 128) { # if(is.function(kernel)) ker <- deparse(substitute(kernel)) # kernel <- do.call(kernel, kpar) if(!is(x,"matrix")) stop("x must be a matrix") if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must ba a matrix or a vector") n <- nrow(x) if(is.null(y)) { ## check if z,x match z <- as.matrix(z) if(is.null(y)&&!dim(z)[1]==n) stop("z columns/length do not match x columns") res1 <- matrix(rep(0,n*n), ncol = n) for(i in 1:n) { for(j in i:n) { res1[j,i] <- kernel(x[i,],x[j,]) } } res1 <- res1 + t(res1) diag(res1) <- diag(res1)/2 } if (is(y,"matrix")) { m <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1] == m) stop("z has wrong dimension") res1 <- matrix(rep.int(0,m*n),ncol=m) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[i,],y[j,]) } } } return(res1%*%z) } setGeneric("kernelMult", function(kernel, x, y=NULL, z, blocksize = 256) standardGeneric("kernelMult")) kernelMult.character <- function(kernel, x, y=NULL, z, blocksize = 256) { return(x%*%z) } setMethod("kernelMult",signature(kernel="character", x="kernelMatrix"),kernelMult.character) kernelMult.rbfkernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 dota <- as.matrix(rowSums(x^2)) if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(sigma*(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n))))%*%z lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n))))%*%z } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(sigma*(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2))))%*%z lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) - dota[lowerl:n]%*%t(rep.int(1,n2))))%*%z } return(res) } setMethod("kernelMult",signature(kernel="rbfkernel"),kernelMult.rbfkernel) kernelMult.laplacekernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 dota <- as.matrix(rowSums(x^2)) if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(-sigma*sqrt(-round(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n)),9)))%*%z lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(-sigma*sqrt(-round(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n)),9)))%*%z } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(-sigma*sqrt(-round(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2)),9)))%*%z lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(-sigma*sqrt(-round(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) - dota[lowerl:n]%*%t(rep.int(1,n2)),9)))%*%z } return(res) } setMethod("kernelMult",signature(kernel="laplacekernel"),kernelMult.laplacekernel) kernelMult.besselkernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma nu <- kpar(kernel)$order ni <- kpar(kernel)$degree n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 lim <- 1/(gamma(nu+1)*2^(nu)) dota <- as.matrix(rowSums(x^2)) if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize xx <- sigma*sqrt(-round(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n)),9)) res1 <- besselJ(xx,nu)*(xx^(-nu)) res1[which(xx<10e-5)] <- lim res[lowerl:upperl,] <- ((res1/lim)^ni)%*%z lowerl <- upperl + 1 } } if(lowerl <= n) { xx <- sigma*sqrt(-round(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n)),9)) res1 <- besselJ(xx,nu)*(xx^(-nu)) res1[which(xx<10e-5)] <- lim res[lowerl:n,] <- ((res1/lim)^ni)%*%z } } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize xx <- sigma*sqrt(-round(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2)),9)) res1 <- besselJ(xx,nu)*(xx^(-nu)) res1[which(xx < 10e-5)] <- lim res[lowerl:upperl,] <- ((res1/lim)^ni)%*%z lowerl <- upperl + 1 } } if(lowerl <= n) { xx <- sigma*sqrt(-round(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) - dota[lowerl:n]%*%t(rep.int(1,n2)),9)) res1 <- besselJ(xx,nu)*(xx^(-nu)) res1[which(xx < 10e-5)] <- lim res[lowerl:n,] <- ((res1/lim)^ni)%*%z } } return(res) } setMethod("kernelMult",signature(kernel="besselkernel"),kernelMult.besselkernel) kernelMult.anovakernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$degree n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { a <- matrix(0,m,blocksize) re <- matrix(0, n, blocksize) for(i in 1:nblocks) { upperl = upperl + blocksize for(j in 1:n) { a[rep(TRUE,m),rep(TRUE,blocksize)] <- x[j,] re[j,] <- colSums(exp( - sigma*(a - t(x[lowerl:upperl,]))^2))^degree } res[lowerl:upperl,] <- t(re)%*%z lowerl <- upperl + 1 } } if(lowerl <= n){ a <- matrix(0,m,n-lowerl+1) re <- matrix(0,n,n-lowerl+1) for(j in 1:n) { a[rep(TRUE,m),rep(TRUE,n-lowerl+1)] <- x[j,] re[j,] <- colSums(exp( - sigma*(a - t(x[lowerl:n,,drop=FALSE]))^2))^degree } res[lowerl:n,] <- t(re)%*%z } } if(is(y,"matrix")) { n2 <- dim(y)[1] nblocks <- floor(n2/blocksize) z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { b <- matrix(0, m, blocksize) re <- matrix(0, n, blocksize) for(i in 1:nblocks) { upperl = upperl + blocksize for(j in 1:n) { b[rep(TRUE,dim(x)[2]), rep(TRUE,blocksize)] <- x[j,] re[j,]<- colSums(exp( - sigma*(b - t(y[lowerl:upperl,]))^2)^degree) } res[,1] <- res[,1] + re %*%z[lowerl:upperl,] lowerl <- upperl + 1 } } if(lowerl <= n) { b <- matrix(0, dim(x)[2], n2-lowerl+1) re <- matrix(0, n, n2-lowerl+1) for( i in 1:n) { b[rep(TRUE,dim(x)[2]),rep(TRUE,n2-lowerl+1)] <- x[i,] re[i,]<- colSums(exp( - sigma*(b - t(y[lowerl:n2,,drop=FALSE]))^2)^degree) } res[,1] <- res[,1] + re%*%z[lowerl:n2] } } return(res) } setMethod("kernelMult",signature(kernel="anovakernel"),kernelMult.anovakernel) kernelMult.splinekernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") n <- dim(x)[1] m <- dim(x)[2] if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) x <- t(x) if(nblocks > 0) { re <- matrix(0, dim(z)[1], blocksize) for(i in 1:nblocks) { upperl = upperl + blocksize for (j in lowerl:upperl) { dr <- x + x[ , j] dp <- x * x[ , j] dm <- pmin(x,x[,j]) re[,j-(i-1)*blocksize] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } res[lowerl:upperl,] <- crossprod(re,z) lowerl <- upperl + 1 } } if(lowerl <= n){ a <- matrix(0,m,n-lowerl+1) re <- matrix(0,dim(z)[1],n-lowerl+1) for(j in lowerl:(n-lowerl+1)) { dr <- x + x[ , j] dp <- x * x[ , j] dm <- pmin(x,x[,j]) re[,j-nblocks*blocksize] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } res[lowerl:n,] <- crossprod(re,z) } } if(is(y,"matrix")) { n2 <- dim(y)[1] nblocks <- floor(n2/blocksize) z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) x <- t(x) y <- t(y) if(nblocks > 0) { re <- matrix(0, dim(z)[1], blocksize) for(i in 1:nblocks) { upperl = upperl + blocksize for(j in lowerl:upperl) { dr <- y + x[ , j] dp <- y * x[ , j] dm <- pmin(y,x[,j]) re[,j-(i-1)*blocksize] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } res[lowerl:upperl] <- crossprod(re, z) lowerl <- upperl + 1 } } if(lowerl <= n) { b <- matrix(0, dim(x)[2], n-lowerl+1) re <- matrix(0, dim(z)[1], n-lowerl+1) for(j in lowerl:(n-lowerl+1)) { dr <- y + x[, j] dp <- y * x[, j] dm <- pmin(y,x[,j]) re[,j-nblocks*blocksize] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } res[lowerl:n] <- crossprod(re, z) } } return(res) } setMethod("kernelMult",signature(kernel="splinekernel"),kernelMult.splinekernel) kernelMult.polykernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) degree <- kpar(kernel)$degree scale <- kpar(kernel)$scale offset <- kpar(kernel)$offset n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- ((scale*x[lowerl:upperl,]%*%t(x) + offset)^degree) %*% z lowerl <- upperl + 1 } if(lowerl <= n) res[lowerl:n,] <- ((scale*x[lowerl:n,]%*%t(x) +offset)^degree)%*%z } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- ((scale*x[lowerl:upperl,]%*%t(y) + offset)^degree)%*%z lowerl <- upperl + 1 } if(lowerl <= n) res[lowerl:n,] <- ((scale*x[lowerl:n,]%*%t(y) + offset)^degree)%*%z } return(res) } setMethod("kernelMult",signature(kernel="polykernel"),kernelMult.polykernel) kernelMult.tanhkernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) scale <- kpar(kernel)$scale offset <- kpar(kernel)$offset n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- tanh(scale*x[lowerl:upperl,]%*%t(x) + offset) %*% z lowerl <- upperl + 1 } if(lowerl <= n) res[lowerl:n,] <- tanh(scale*x[lowerl:n,]%*%t(x) +offset)%*%z } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- tanh(scale*x[lowerl:upperl,]%*%t(y) + offset)%*%z lowerl <- upperl + 1 } if(lowerl <= n) res[lowerl:n,] <- tanh(scale*x[lowerl:n,]%*%t(y) + offset)%*%z } return(res) } setMethod("kernelMult",signature(kernel="tanhkernel"),kernelMult.tanhkernel) kernelMult.vanillakernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") n <- dim(x)[1] m <- dim(x)[2] if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- t(crossprod(crossprod(x,z),t(x))) } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- t(crossprod(crossprod(y,z),t(x))) } return(res) } setMethod("kernelMult",signature(kernel="vanillakernel"),kernelMult.vanillakernel) kernelMult.stringkernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") normalized = kpar(kernel)$normalized n <- length(x) res1 <- matrix(rep(0,n*n), ncol = n) resdiag <- rep(0,n) if(is(x,"list")) x <- sapply(x,paste,collapse="") if(is(y,"list")) y <- sapply(y,paste,collapse="") if (kpar(kernel)$type == "sequence" |kpar(kernel)$type == "string"|kpar(kernel)$type == "fullstring") { if(normalized == TRUE) kernel <- stringdot(length = kpar(kernel)$length, type = kpar(kernel)$type, lambda = kpar(kernel)$lambda, normalized = FALSE) ## y is null if(is.null(y)){ if(normalized == TRUE){ z <- as.matrix(z) if(dim(z)[1]!= n) stop("z rows must be equal to x length") dz <- dim(z)[2] vres <- matrix(0,n,dz) ## calculate diagonal elements first, and use them to normalize for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- kernel(x[[i]],x[[j]])/sqrt(resdiag[i]*resdiag[j]) } } res1 <- res1 + t(res1) diag(res1) <- rep(1,n) vres <- res1 %*% z } else{ z <- as.matrix(z) if(dim(z)[1]!= n) stop("z rows must be equal to x length") dz <- dim(z)[2] vres <- matrix(0,n,dz) ## calculate diagonal elements first, and use them to normalize for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- kernel(x[[i]],x[[j]]) } } res1 <- res1 + t(res1) diag(res1) <- resdiag vres <- res1 %*% z } } if (!is.null(y)){ if(normalized == TRUE){ nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 m <- length(y) z <- as.matrix(z) if(dim(z)[1]!= m) stop("z rows must be equal to y length") resdiag1 <- rep(0,m) dz <- dim(z)[2] vres <- matrix(0,n,dz) for(i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:m) resdiag1[i] <- kernel(y[[i]],y[[i]]) if (nblocks > 0){ res1 <- matrix(0,blocksize,m) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { for(j in 1:m) { res1[i - (k-1)*blocksize,j] <- kernel(x[[i]],y[[j]])/sqrt(resdiag[i]*resdiag1[j]) } } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,m) for(i in lowerl:n) { for(j in 1:m) { res1[i - nblocks*blocksize,j] <- kernel(x[[i]],y[[j]])/sqrt(resdiag[i]*resdiag1[j]) } } vres[lowerl:n,] <- res1 %*% z } } else { nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 m <- length(y) z <- as.matrix(z) if(dim(z)[1]!= m) stop("z rows must be equal to y length") dz <- dim(z)[2] vres <- matrix(0,n,dz) if (nblocks > 0){ res1 <- matrix(0,blocksize,m) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { for(j in 1:m) { res1[i - (k-1)*blocksize, j] <- kernel(x[[i]],y[[j]]) } } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,m) for(i in lowerl:n) { for(j in 1:m) { res1[i - nblocks*blocksize,j] <- kernel(x[[i]],y[[j]]) } } vres[lowerl:n,] <- res1 %*% z } } } } else { switch(kpar(kernel)$type, "exponential" = sktype <- 2, "constant" = sktype <- 1, "spectrum" = sktype <- 3, "boundrange" = sktype <- 4) if(sktype==3 &(any(nchar(x) < kpar(kernel)$length)|any(nchar(x) < kpar(kernel)$length))) stop("spectral kernel does not accept strings shorter than the length parameter") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") ## y is null if(is.null(y)){ if(normalized == TRUE){ nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 z <- as.matrix(z) if(dim(z)[1]!= n) stop("z rows must be equal to y length") dz <- dim(z)[2] vres <- matrix(0,n,dz) for (i in 1:n) resdiag[i] <- .Call(stringtv, as.character(x[i]), as.character(x[i]), as.integer(1), as.integer(nchar(x[i])), as.integer(nchar(x[i])), as.integer(sktype), as.double(kpar(kernel)$lambda)) if (nblocks > 0){ res1 <- matrix(0,blocksize,n) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { res1[i - (k-1)*blocksize, ] <- .Call(stringtv, as.character(x[i]), as.character(x), as.integer(length(x)), as.integer(nchar(x[i])), as.integer(nchar(x)), as.integer(sktype), as.double(kpar(kernel)$lambda)) / sqrt(resdiag[i]*resdiag) } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,n) for(i in lowerl:n) { res1[i - nblocks*blocksize,] <- .Call(stringtv, as.character(x[i]), as.character(x), as.integer(length(x)), as.integer(nchar(x[i])), as.integer(nchar(x)), as.integer(sktype), as.double(kpar(kernel)$lambda)) / sqrt(resdiag[i]*resdiag) } vres[lowerl:n,] <- res1 %*% z } } else { nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 z <- as.matrix(z) if(dim(z)[1]!= n) stop("z rows must be equal to y length") dz <- dim(z)[2] vres <- matrix(0,n,dz) if (nblocks > 0){ res1 <- matrix(0,blocksize,n) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { res1[i - (k-1)*blocksize, ] <- .Call(stringtv, as.character(x[i]), as.character(x), as.integer(length(x)), as.integer(nchar(x[i])), as.integer(nchar(x)), as.integer(sktype), as.double(kpar(kernel)$lambda)) } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,n) for(i in lowerl:n) { res1[i - nblocks*blocksize,] <- .Call(stringtv, as.character(x[i]), as.character(x), as.integer(length(x)), as.integer(nchar(x[i])), as.integer(nchar(x)), as.integer(sktype), as.double(kpar(kernel)$lambda)) } vres[lowerl:n,] <- res1 %*% z } } } if (!is.null(y)){ if(normalized == TRUE){ nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 m <- length(y) z <- as.matrix(z) if(dim(z)[1]!= m) stop("z rows must be equal to y length") resdiag1 <- rep(0,m) dz <- dim(z)[2] vres <- matrix(0,n,dz) for(i in 1:n) resdiag[i] <- .Call(stringtv, as.character(x[i]), as.character(x[i]), as.integer(1), as.integer(nchar(x[i])), as.integer(nchar(x[i])), as.integer(sktype), as.double(kpar(kernel)$lambda)) for(i in 1:m) resdiag1[i] <- .Call(stringtv, as.character(y[i]), as.character(y[i]), as.integer(1), as.integer(nchar(y[i])), as.integer(nchar(y[i])), as.integer(sktype), as.double(kpar(kernel)$lambda)) if (nblocks > 0){ res1 <- matrix(0,blocksize,m) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { res1[i - (k-1)*blocksize, ] <- .Call(stringtv, as.character(x[i]), as.character(y), as.integer(length(y)), as.integer(nchar(x[i])), as.integer(nchar(y)), as.integer(sktype), as.double(kpar(kernel)$lambda)) / sqrt(resdiag[i]*resdiag1) } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,m) for(i in lowerl:n) { res1[i - nblocks*blocksize,] <- .Call(stringtv, as.character(x[i]), as.character(y), as.integer(length(y)), as.integer(nchar(x[i])), as.integer(nchar(y)), as.integer(sktype), as.double(kpar(kernel)$lambda)) / sqrt(resdiag[i]*resdiag1) } vres[lowerl:n,] <- res1 %*% z } } else { nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 m <- length(y) z <- as.matrix(z) if(dim(z)[1]!= m) stop("z rows must be equal to y length") dz <- dim(z)[2] vres <- matrix(0,n,dz) if (nblocks > 0){ res1 <- matrix(0,blocksize,m) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { res1[i - (k-1)*blocksize, ] <- .Call(stringtv, as.character(x[i]), as.character(y), as.integer(length(y)), as.integer(nchar(x[i])), as.integer(nchar(y)), as.integer(sktype), as.double(kpar(kernel)$lambda)) } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,m) for(i in lowerl:n) { res1[i - nblocks*blocksize,] <- .Call(stringtv, as.character(x[i]), as.character(y), as.integer(length(y)), as.integer(nchar(x[i])), as.integer(nchar(y)), as.integer(sktype), as.double(kpar(kernel)$lambda)) } vres[lowerl:n,] <- res1 %*% z } } } } return(vres) } setMethod("kernelMult",signature(kernel="stringkernel"),kernelMult.stringkernel) ## kernelPol return the quadratic form of a kernel matrix ## kernelPol returns the scalar product of x y componentwise with polarities ## of z and k kernelPol <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(x,"matrix")) stop("x must be a matrix") if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must ba a matrix or a vector") n <- nrow(x) z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") res1 <- matrix(rep(0,n*n), ncol = n) if (is.null(y)) { for(i in 1:n) { for(j in i:n) { res1[i,j] <- kernel(x[i,],x[j,])*z[j]*z[i] } } res1 <- res1 + t(res1) diag(res1) <- diag(res1)/2 } if (is(x,"matrix") && is(y,"matrix")){ m <- dim(y)[1] if(is.null(k)) stop("k not specified!") k <- as.matrix(k) if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") if(!dim(z)[2]==dim(k)[2]) stop("z and k vectors must have the same number of columns") if(!dim(x)[1]==dim(z)[1]) stop("z and x must have the same number of rows") if(!dim(y)[1]==dim(k)[1]) stop("y and k must have the same number of rows") res1 <- matrix(0,dim(x)[1],dim(y)[1]) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[i,],y[j,])*z[i]*k[j] } } } return(res1) } setGeneric("kernelPol", function(kernel, x, y=NULL, z, k = NULL) standardGeneric("kernelPol")) kernelPol.rbfkernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix a vector or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma n <- dim(x)[1] dota <- rowSums(x*x)/2 z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") if (is.null(y)) { if(is(z,"matrix")&&!dim(z)[1]==n) stop("z must have size equal to x colums") res <- crossprod(t(x)) for (i in 1:n) res[i,] <- z[i,]*(exp(2*sigma*(res[i,] - dota - rep(dota[i],n)))*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.matrix(k) if(!dim(k)[1]==m) stop("k must have equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m)#2*sigma or sigma res[,i]<- k[i,]*(exp(2*sigma*(res[,i] - dota - rep(dotb[i],n)))*z) return(res) } } setMethod("kernelPol",signature(kernel="rbfkernel"),kernelPol.rbfkernel) kernelPol.laplacekernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix, vector or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") sigma <- kpar(kernel)$sigma if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) n <- dim(x)[1] dota <- rowSums(x*x)/2 z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") if (is.null(y)) { if(is(z,"matrix")&&!dim(z)[1]==n) stop("z must have size equal to x colums") res <- crossprod(t(x)) for (i in 1:n) res[i,] <- z[i,]*(exp(-sigma*sqrt(-round(2*(res[i,] - dota - rep(dota[i],n)),9)))*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.matrix(k) if(!dim(k)[1]==m) stop("k must have equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m)#2*sigma or sigma res[,i]<- k[i,]*(exp(-sigma*sqrt(-round(2*(res[,i] - dota - rep(dotb[i],n)),9)))*z) return(res) } } setMethod("kernelPol",signature(kernel="laplacekernel"),kernelPol.laplacekernel) kernelPol.besselkernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") sigma <- kpar(kernel)$sigma if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) nu <- kpar(kernel)$order ni <- kpar(kernel)$degree n <- dim(x)[1] lim <- 1/(gamma(nu + 1)*2^nu) dota <- rowSums(x*x)/2 z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") if (is.null(y)) { if(is(z,"matrix")&&!dim(z)[1]==n) stop("z must have size equal to x colums") res <- crossprod(t(x)) for (i in 1:n) { xx <- sigma*sqrt(-round(2*(res[i,] - dota - rep(dota[i],n)),9)) res[i,] <- besselJ(xx,nu)*(xx^(-nu)) res[i,which(xx < 10e-5)] <- lim res[i,] <- z[i,]*(((res[i,]/lim)^ni)*z) } return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] if(!dim(k)[1]==m) stop("k must have equal rows to y") k <- as.matrix(k) if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m){#2*sigma or sigma xx <- sigma*sqrt(-round(2*(res[,i] - dota - rep(dotb[i],n)),9)) res[,i] <- besselJ(xx,nu)*(xx^(-nu)) res[which(xx<10e-5),i] <- lim res[,i]<- k[i,]*(((res[,i]/lim)^ni)*z) } return(res) } } setMethod("kernelPol",signature(kernel="besselkernel"),kernelPol.besselkernel) kernelPol.anovakernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$degree if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) n <- dim(x)[1] z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") if (is.null(y)) { if(is(z,"matrix")&&!dim(z)[1]==n) stop("z must have size equal to x colums") a <- matrix(0, dim(x)[2], n) res <- matrix(0,n,n) for (i in 1:n) { a[rep(TRUE,dim(x)[2]), rep(TRUE,n)] <- x[i,] res[i,]<- z[i,]*((colSums(exp( - sigma*(a - t(x))^2))^degree)*z) } return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.matrix(k) if(!dim(k)[1]==m) stop("k must have equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") b <- matrix(0, dim(x)[2],m) res <- matrix(0, dim(x)[1],m) for( i in 1:n) { b[rep(TRUE,dim(x)[2]), rep(TRUE,m)] <- x[i,] res[i,] <- z[i,]*((colSums(exp( - sigma*(b - t(y))^2))^degree)*k) } return(res) } } setMethod("kernelPol",signature(kernel="anovakernel"),kernelPol.anovakernel) kernelPol.splinekernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$degree n <- dim(x)[1] z <- as.vector(z) if(!(length(z)==n)) stop("z must have the length equal to x colums") if (is.null(y)) { res <- kernelMatrix(kernel,x) return(unclass(z*t(res*z))) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.vector(k) if(!(length(k)==m)) stop("k must have length equal to rows of y") res <- kernelMatrix(kernel,x,y) return(unclass(k*t(res*z))) } } setMethod("kernelPol",signature(kernel="splinekernel"),kernelPol.splinekernel) kernelPol.polykernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) degree <- kpar(kernel)$degree scale <- kpar(kernel)$scale offset <- kpar(kernel)$offset n <- dim(x)[1] if(is(z,"matrix")) { z <- as.vector(z) } m <- length(z) if(!(m==n)) stop("z must have the length equal to x colums") if (is.null(y)) { res <- z*t(((scale*crossprod(t(x))+offset)^degree)*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.vector(k) if(!(length(k)==m)) stop("k must have length equal to rows of y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") res<- k*t(((scale*x%*%t(y) + offset)^degree)*z) return(res) } } setMethod("kernelPol",signature(kernel="polykernel"),kernelPol.polykernel) kernelPol.tanhkernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix, vector or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) scale <- kpar(kernel)$scale offset <- kpar(kernel)$offset n <- dim(x)[1] if(is(z,"matrix")) { z <- as.vector(z) } m <- length(z) if(!(m==n)) stop("z must have the length equal to x colums") if (is.null(y)) { res <- z*t(tanh(scale*crossprod(t(x))+offset)*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.vector(k) if(!(length(k)==m)) stop("k must have length equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes x, y must have the same number of columns") res<- k*t(tanh(scale*x%*%t(y) + offset)*z) return(res) } } setMethod("kernelPol",signature(kernel="tanhkernel"),kernelPol.tanhkernel) kernelPol.vanillakernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix, vector or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") n <- dim(x)[1] if(is(z,"matrix")) { z <- as.vector(z) } m <- length(z) if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!(m==n)) stop("z must have the length equal to x colums") if (is.null(y)) { res <- z*t(crossprod(t(x))*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.vector(k) if(!length(k)==m) stop("k must have length equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes x, y must have the same number of columns") for( i in 1:m) res<- k*t(x%*%t(y)*z) return(res) } } setMethod("kernelPol",signature(kernel="vanillakernel"),kernelPol.vanillakernel) kernelPol.stringkernel <- function(kernel, x, y=NULL ,z ,k=NULL) { n <- length(x) res1 <- matrix(rep(0,n*n), ncol = n) resdiag <- rep(0,n) if(is(x,"list")) x <- sapply(x,paste,collapse="") if(is(y,"list")) y <- sapply(y,paste,collapse="") normalized = kpar(kernel)$normalized if(normalized == TRUE) kernel <- stringdot(length = kpar(kernel)$length, type = kpar(kernel)$type, lambda = kpar(kernel)$lambda, normalized = FALSE) z <- as.matrix(z) ## y is null if (kpar(kernel)$type == "sequence" |kpar(kernel)$type == "string"|kpar(kernel)$type == "fullstring") { if(is.null(y)){ if(normalized == TRUE){ ## calculate diagonal elements first, and use them to normalize for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- (z[i,]*kernel(x[[i]],x[[j]])*z[j,])/sqrt(resdiag[i]*resdiag[j]) } } res1 <- res1 + t(res1) diag(res1) <- z^2 } else { for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- (z[i,]*kernel(x[[i]],x[[j]])*z[j,]) } } res1 <- res1 + t(res1) diag(res1) <- resdiag * z^2 } } if (!is.null(y)){ if(normalized == TRUE){ m <- length(y) res1 <- matrix(0,n,m) resdiag1 <- rep(0,m) k <- as.matrix(k) for(i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:m) resdiag1[i] <- kernel(y[[i]],y[[i]]) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- (z[i,]*kernel(x[[i]],y[[j]])*k[j,])/sqrt(resdiag[i]*resdiag1[j]) } } } } else{ m <- length(y) res1 <- matrix(0,n,m) k <- as.matrix(k) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- (z[i,]*kernel(x[[i]],y[[j]])*k[j,]) } } } } else { switch(kpar(kernel)$type, "exponential" = sktype <- 2, "constant" = sktype <- 1, "spectrum" = sktype <- 3, "boundrange" = sktype <- 4) if(is(x,"list")) x <- unlist(x) if(is(y,"list")) y <- unlist(y) x <- paste(x,"\n",seq="") if(!is.null(y)) y <- paste(y,"\n",seq="") if(is.null(y)) ret <- matrix(0, length(x),length(x)) else ret <- matrix(0,length(x),length(y)) if(is.null(y)){ for( i in 1:length(x)) ret[i,] <- .Call(stringtv, as.character(x[i]), as.character(x), as.integer(length(x)), as.integer(nchar(x[i])), as.integer(nchar(x)), as.integer(sktype), as.double(kpar(kernel)$lambda)) res1 <- k*ret*k } else{ for( i in 1:length(x)) ret[i,] <- .Call(stringtv, as.character(x[i]), as.character(y), as.integer(length(x)), as.integer(nchar(x[i])), as.integer(nchar(y)), as.integer(sktype), as.double(kpar(kernel)$lambda)) res1 <- k*ret*z } if(normalized == TRUE){ if(is.null(y)){ ret <- t((1/sqrt(diag(ret)))*t(ret*(1/sqrt(diag(ret))))) res1 <- k*ret*k } else{ norm1 <- rep(0,length(x)) norm2 <- rep(0,length(y)) for( i in 1:length(x)) norm1[i] <- .Call(stringtv, as.character(x[i]), as.character(x[i]), as.integer(1), as.integer(nchar(x[i])), as.integer(nchar(x[i])), as.integer(sktype), as.double(kpar(kernel)$lambda)) for( i in 1:length(y)) norm2[i] <- .Call(stringtv, as.character(y[i]), as.character(y[i]), as.integer(1), as.integer(nchar(y[i])), as.integer(nchar(y[i])), as.integer(sktype), as.double(kpar(kernel)$lambda)) ret <- t((1/sqrt(norm2))*t(ret*(1/sqrt(norm1)))) res1 <- k*ret*z } } } return(res1) } setMethod("kernelPol",signature(kernel="stringkernel"),kernelPol.stringkernel) ## kernelFast returns the kernel matrix, its usefull in algorithms ## which require iterative kernel matrix computations kernelFast <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setGeneric("kernelFast",function(kernel, x, y, a) standardGeneric("kernelFast")) kernelFast.rbfkernel <- function(kernel, x, y, a) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- a/2 if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m) res[,i]<- exp(2*sigma*(res[,i] - dota - rep(dotb[i],n))) return(res) } } setMethod("kernelFast",signature(kernel="rbfkernel"),kernelFast.rbfkernel) kernelFast.laplacekernel <- function(kernel, x, y, a) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- a/2 if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m) res[,i]<- exp(-sigma*sqrt(round(-2*(res[,i] - dota - rep(dotb[i],n)),9))) return(res) } } setMethod("kernelFast",signature(kernel="laplacekernel"),kernelFast.laplacekernel) kernelFast.besselkernel <- function(kernel, x, y, a) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma nu = kpar(kernel)$order ni = kpar(kernel)$degree n <- dim(x)[1] lim <- 1/(gamma(nu+1)*2^(nu)) dota <- a/2 if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m){ xx <- sigma*sqrt(round(-2*(res[,i] - dota - rep(dotb[i],n)),9)) res[,i] <- besselJ(xx,nu)*(xx^(-nu)) res[which(xx<10e-5),i] <- lim } return((res/lim)^ni) } } setMethod("kernelFast",signature(kernel="besselkernel"),kernelFast.besselkernel) kernelFast.anovakernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="anovakernel"),kernelFast.anovakernel) kernelFast.polykernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="polykernel"),kernelFast.polykernel) kernelFast.vanilla <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="vanillakernel"),kernelFast.vanilla) kernelFast.tanhkernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="tanhkernel"),kernelFast.tanhkernel) kernelFast.stringkernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="stringkernel"),kernelFast.stringkernel) kernelFast.splinekernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="splinekernel"),kernelFast.splinekernel) kernlab/R/csi.R0000644000175100001440000003653711304023134013015 0ustar hornikusers## 15.09.2005 alexandros setGeneric("csi", function(x, y, kernel="rbfdot",kpar=list(sigma=0.1), rank, centering = TRUE, kappa =0.99 ,delta = 40 ,tol = 1e-4) standardGeneric("csi")) setMethod("csi",signature(x="matrix"), function(x, y, kernel="rbfdot",kpar=list(sigma=0.1), rank, centering = TRUE, kappa =0.99 ,delta = 40 ,tol = 1e-5) { ## G,P,Q,R,error1,error2,error,predicted.gain,true.gain ## INPUT ## x : data ## y : target vector n x d ## m : maximal rank ## kappa : trade-off between approximation of K and prediction of y (suggested: .99) ## centering : 1 if centering, 0 otherwise (suggested: 1) ## delta : number of columns of cholesky performed in advance (suggested: 40) ## tol : minimum gain at iteration (suggested: 1e-4) ## OUTPUT ## G : Cholesky decomposition -> K(P,P) is approximated by G*G' ## P : permutation matrix ## Q,R : QR decomposition of G (or center(G) if centering) ## error1 : tr(K-G*G')/tr(K) at each step of the decomposition ## error2 : ||y-Q*Q'*y||.F^2 / ||y||.F^2 at each step of the decomposition ## predicted.gain : predicted gain before adding each column ## true.gain : actual gain after adding each column n <- dim(x)[1] d <- dim(y)[2] if(n != dim(y)[1]) stop("Labels y and data x dont match") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") m <- rank ## make sure rank is smaller than n m <- min(n-2,m) G <- matrix(0,n,min(m+delta,n)) ## Cholesky factor diagK <- rep(drop(kernel(x[1,],x[1,])),n) P <- 1:n ## pivots Q <- matrix(0,n,min(m+delta,n)) ## Q part of the QR decomposition R <- matrix(0,min(m+delta,n),min(m+delta,n)) ## R part of the QR decomposition traceK <- sum(diagK) lambda <- (1-kappa)/traceK if (centering) y <- y - (1/n) * t(matrix(colSums(y),d,n)) sumy2 <- sum(y^2) mu <- kappa/sumy2 error1 <- traceK error2 <- sumy2 predictedgain <- truegain <- rep(0,min(m+delta,n)) k <- 0 # current index of the Cholesky decomposition kadv <- 0 # current index of the look ahead steps Dadv <- diagK D <- diagK ## makes sure that delta is smaller than n - 2 delta <- min(delta,n - 2) ## approximation cost cached quantities A1 <- matrix(0,n,1) A2 <- matrix(0,n,1) A3 <- matrix(0,n,1) GTG <- matrix(0,m+delta,m+delta) QTy <- matrix(0,m+delta,d) QTyyTQ <- matrix(0,m+delta,m+delta) ## first performs delta steps of Cholesky and QR decomposition if(delta > 0) for (i in 1:delta) { kadv <- kadv + 1 ## select best index diagmax <- Dadv[kadv] jast <- 1 for (j in 1:(n-kadv+1)) { if (Dadv[j+kadv-1] > diagmax/0.99){ diagmax <- Dadv[j+kadv-1] jast <- j } } if (diagmax < 1e-12){ kadv <- kadv - 1 ## all pivots are too close to zero, stops break ## this can only happen if the matrix has rank less than delta } else{ jast <- jast + kadv-1 ## permute indices P[c(kadv,jast)] <- P[c(jast,kadv)] Dadv[c(kadv, jast)] <- Dadv[c(jast, kadv)] D[c(kadv, jast)] <- D[c(jast, kadv)] A1[c(kadv, jast)] <- A1[c(jast, kadv)] G[c(kadv, jast),1:kadv-1] <- G[c(jast,kadv),1:kadv-1] Q[c(kadv, jast),1:kadv-1] <- Q[c(jast, kadv),1:kadv-1] ## compute new Cholesky column G[kadv,kadv] <- Dadv[kadv] G[kadv,kadv] <- sqrt(G[kadv,kadv]) newKcol <- kernelMatrix(kernel, x[P[(kadv+1):n],,drop = FALSE],x[P[kadv],,drop=FALSE]) G[(kadv+1):n,kadv]<- (1/G[kadv,kadv])*(newKcol - G[(kadv+1):n,1:kadv-1,drop=FALSE] %*% t(G[kadv,1:kadv-1,drop=FALSE])) ## update diagonal Dadv[(kadv+1):n] <- Dadv[(kadv+1):n] - G[(kadv+1):n,kadv]^2 Dadv[kadv] <- 0 ## performs QR if (centering) Gcol <- G[,kadv,drop=FALSE] - (1/n) * matrix(sum(G[,kadv]),n,1) else Gcol <- G[,kadv, drop=FALSE] R[1:kadv-1,kadv] <- crossprod(Q[,1:kadv-1, drop=FALSE], Gcol) Q[,kadv] <- Gcol - Q[,1:kadv-1,drop=FALSE] %*% R[1:kadv-1,kadv,drop=FALSE] R[kadv,kadv] <- sqrt(sum(Q[,kadv]^2)) Q[,kadv] <- Q[,kadv]/drop(R[kadv,kadv]) ## update cached quantities if (centering) GTG[1:kadv,kadv] <- crossprod(G[,1:kadv], G[,kadv]) else GTG[1:kadv,kadv] <- crossprod(R[1:kadv,1:kadv], R[1:kadv,kadv]) GTG[kadv,1:kadv] <- t(GTG[1:kadv,kadv]) QTy[kadv,] <- crossprod(Q[,kadv], y[P,,drop = FALSE]) QTyyTQ[kadv,1:kadv] <- QTy[kadv,,drop=FALSE] %*% t(QTy[1:kadv,,drop=FALSE]) QTyyTQ[1:kadv,kadv] <- t(QTyyTQ[kadv,1:kadv]) ## update costs A1[kadv:n] <- A1[kadv:n] + GTG[kadv,kadv] * G[kadv:n,kadv]^2 A1[kadv:n] <- A1[kadv:n] + 2 * G[kadv:n,kadv] *(G[kadv:n,1:kadv-1] %*% GTG[1:kadv-1,kadv,drop=FALSE]) } } ## compute remaining costs for all indices A2 <- rowSums(( G[,1:kadv,drop=FALSE] %*% crossprod(R[1:kadv,1:kadv], QTy[1:kadv,,drop=FALSE]))^2) A3 <- rowSums((G[,1:kadv,drop=FALSE] %*% t(R[1:kadv,1:kadv]))^2) ## start main loop while (k < m){ k <- k +1 ## compute the gains in approximation for all remaining indices dJK <- matrix(0,(n-k+1),1) for (i in 1:(n-k+1)) { kast <- k+i-1 if (D[kast] < 1e-12) dJK[i] <- -1e100 ## this column is already generated by already ## selected columns -> cannot be selected else { dJK[i] <- A1[kast] if (kast > kadv) ## add eta dJK[i] <- dJK[i] + D[kast]^2 - (D[kast] - Dadv[kast])^2 dJK[i] <- dJK[i] / D[kast] } } dJy <- matrix(0,n-k+1,1) if (kadv > k){ for (i in 1:(n-k+1)) { kast <- k+i-1 if (A3[kast] < 1e-12) dJy[i] <- 0 else dJy[i] <- A2[kast] / A3[kast] } } ## select the best column dJ <- lambda * dJK + mu * dJy diagmax <- -1 jast <- 0 for (j in 1:(n-k+1)) { if (D[j+k-1] > 1e-12) if (dJ[j] > diagmax/0.9){ diagmax <- dJ[j] jast <- j } } if (jast==0) { ## no more good indices, exit k <- k-1 break } jast <- jast + k - 1 predictedgain[k] <- diagmax ## performs one cholesky + QR step: ## if new pivot not already selected, use pivot ## otherwise, select new look ahead index that maximize Dadv if (jast > kadv){ newpivot <- jast jast <- kadv + 1 } else{ a <- 1e-12 b <- 0 for (j in 1:(n-kadv)) { if (Dadv[j+kadv] > a/0.99){ a <- Dadv[j+kadv] b <- j+kadv } } if (b==0) newpivot <- 0 else newpivot <- b } if (newpivot > 0){ ## performs steps kadv <- kadv + 1 ## permute P[c(kadv, newpivot)] <- P[c(newpivot, kadv)] Dadv[c(kadv, newpivot)] <- Dadv[c(newpivot, kadv)] D[c(kadv, newpivot)] <- D[c(newpivot, kadv)] A1[c(kadv, newpivot)] <- A1[c(newpivot, kadv)] A2[c(kadv, newpivot)] <- A2[c(newpivot, kadv)] A3[c(kadv, newpivot)] <- A3[c(newpivot, kadv)] G[c(kadv, newpivot),1:kadv-1] <- G[c(newpivot, kadv),1:kadv-1] Q[c(kadv, newpivot),1:kadv-1] <- Q[ c(newpivot, kadv),1:kadv-1] ## compute new Cholesky column G[kadv,kadv] <- Dadv[kadv] G[kadv,kadv] <- sqrt(G[kadv,kadv]) newKcol <- kernelMatrix(kernel,x[P[(kadv+1):n],,drop=FALSE],x[P[kadv],,drop=FALSE]) G[(kadv+1):n,kadv] <- 1/G[kadv,kadv]*( newKcol - G[(kadv+1):n,1:kadv-1,drop=FALSE]%*%t(G[kadv,1:kadv-1,drop=FALSE])) ## update diagonal Dadv[(kadv+1):n] <- Dadv[(kadv+1):n] - G[(kadv+1):n,kadv]^2 Dadv[kadv] <- 0 ## performs QR if (centering) Gcol <- G[,kadv,drop=FALSE] - 1/n * matrix(sum(G[,kadv]),n,1 ) else Gcol <- G[,kadv,drop=FALSE] R[1:kadv-1,kadv] <- crossprod(Q[,1:kadv-1], Gcol) Q[,kadv] <- Gcol - Q[,1:kadv-1, drop=FALSE] %*% R[1:kadv-1,kadv, drop=FALSE] R[kadv,kadv] <- sum(abs(Q[,kadv])^2)^(1/2) Q[,kadv] <- Q[,kadv] / drop(R[kadv,kadv]) ## update the cached quantities if (centering) GTG[k:kadv,kadv] <- crossprod(G[,k:kadv], G[,kadv]) else GTG[k:kadv,kadv] <- crossprod(R[1:kadv,k:kadv], R[1:kadv,kadv]) GTG[kadv,k:kadv] <- t(GTG[k:kadv,kadv]) QTy[kadv,] <- crossprod(Q[,kadv], y[P,,drop =FALSE]) QTyyTQ[kadv,k:kadv] <- QTy[kadv,,drop = FALSE] %*% t(QTy[k:kadv,,drop = FALSE]) QTyyTQ[k:kadv,kadv] <- t(QTyyTQ[kadv,k:kadv]) ## update costs A1[kadv:n] <- A1[kadv:n] + GTG[kadv,kadv] * G[kadv:n,kadv]^2 A1[kadv:n] <- A1[kadv:n] + 2 * G[kadv:n,kadv] * (G[kadv:n,k:kadv-1,drop = FALSE] %*% GTG[k:kadv-1,kadv,drop=FALSE]) A3[kadv:n] <- A3[kadv:n] + G[kadv:n,kadv]^2 * sum(R[k:kadv,kadv]^2) temp <- crossprod(R[k:kadv,kadv,drop = FALSE], R[k:kadv,k:kadv-1,drop = FALSE]) A3[kadv:n] <- A3[kadv:n] + 2 * G[kadv:n,kadv] * (G[kadv:n,k:kadv-1] %*% t(temp)) temp <- crossprod(R[k:kadv,kadv,drop = FALSE], QTyyTQ[k:kadv,k:kadv,drop = FALSE]) temp1 <- temp %*% R[k:kadv,kadv,drop = FALSE] A2[kadv:n] <- A2[kadv:n] + G[kadv:n,kadv,drop = FALSE]^2 %*% temp1 temp2 <- temp %*% R[k:kadv,k:kadv-1] A2[kadv:n] <- A2[kadv:n] + 2 * G[kadv:n,kadv] * (G[kadv:n,k:kadv-1,drop=FALSE] %*% t(temp2)) } ## permute pivots in the Cholesky and QR decomposition between p,q p <- k q <- jast if (p < q){ ## store some quantities Gbef <- G[,p:q] Gbeftotal <- G[,k:kadv] GTGbef <- GTG[p:q,p:q] QTyyTQbef <- QTyyTQ[p:q,k:kadv] Rbef <- R[p:q,p:q] Rbeftotal <- R[k:kadv,k:kadv] tempG <- diag(1,q-p+1,q-p+1) tempQ <- diag(1,q-p+1,q-p+1) for (s in seq(q-1,p,-1)) { ## permute indices P[c(s, s+1)] <- P[c(s+1, s)] Dadv[c(s, s+1)] <- Dadv[c(s+1, s)] D[c(s, s+1)] <- D[c(s+1, s)] A1[c(s, s+1)] <- A1[c(s+1, s)] A2[c(s, s+1)] <- A2[c(s+1, s)] A3[c(s, s+1)] <- A3[c(s+1, s)] G[c(s, s+1),1:kadv] <- G[c(s+1,s), 1:kadv] Gbef[c(s, s+1),] <- Gbef[c(s+1, s),] Gbeftotal[c(s, s+1),] <- Gbeftotal[c(s+1, s),] Q[c(s, s+1),1:kadv] <- Q[c(s+1, s) ,1:kadv] ## update decompositions res <- .qr2(t(G[s:(s+1),s:(s+1)])) Q1 <- res$Q R1 <- res$R G[,s:(s+1)] <- G[,s:(s+1)] %*% Q1 G[s,(s+1)] <- 0 R[1:kadv,s:(s+1)] <- R[1:kadv,s:(s+1)] %*% Q1 res <- .qr2(R[s:(s+1),s:(s+1)]) Q2 <- res$Q R2 <- res$R R[s:(s+1),1:kadv] <- crossprod(Q2, R[s:(s+1),1:kadv]) Q[,s:(s+1)] <- Q[,s:(s+1)] %*% Q2 R[s+1,s] <- 0 ## update relevant quantities if( k <= (s-1) && s+2 <= kadv) nonchanged <- c(k:(s-1), (s+2):kadv) if( k <= (s-1) && s+2 > kadv) nonchanged <- k:(s-1) if( k > (s-1) && s+2 <= kadv) nonchanged <- (s+2):kadv GTG[nonchanged,s:(s+1)] <- GTG[nonchanged,s:(s+1)] %*% Q1 GTG[s:(s+1),nonchanged] <- t(GTG[nonchanged,s:(s+1)]) GTG[s:(s+1),s:(s+1)] <- crossprod(Q1, GTG[s:(s+1),s:(s+1)] %*% Q1) QTy[s:(s+1),] <- crossprod(Q2, QTy[s:(s+1),]) QTyyTQ[nonchanged,s:(s+1)] <- QTyyTQ[nonchanged,s:(s+1)] %*% Q2 QTyyTQ[s:(s+1),nonchanged] <- t(QTyyTQ[nonchanged,s:(s+1)]) QTyyTQ[s:(s+1),s:(s+1)] <- crossprod(Q2, QTyyTQ[s:(s+1),s:(s+1)] %*% Q2) tempG[,(s-p+1):(s-p+2)] <- tempG[,(s-p+1):(s-p+2)] %*% Q1 tempQ[,(s-p+1):(s-p+2)] <- tempQ[,(s-p+1):(s-p+2)] %*% Q2 } ## update costs tempG <- tempG[,1] tempGG <- GTGbef %*% tempG A1[k:n] <- A1[k:n] - 2 * G[k:n,k] * (Gbef[k:n,] %*% tempGG) # between p and q -> different if(k > (p-1) ) kmin <- 0 else kmin <- k:(p-1) if((q+1) > kadv) qmin <- 0 else qmin <- (q+1):kadv A1[k:n] <- A1[k:n] - 2 * G[k:n,k] * (G[k:n,kmin,drop=FALSE] %*% GTG[kmin,k,drop=FALSE]) # below p A1[k:n] <- A1[k:n] - 2 * G[k:n,k] * (G[k:n,qmin,drop=FALSE] %*% GTG[qmin,k,drop=FALSE]) # above q tempQ <- tempQ[,1] temp <- G[k:n,qmin,drop=FALSE] %*% t(R[k,qmin,drop=FALSE]) temp <- temp + G[k:n,kmin,drop=FALSE] %*% t(R[k,kmin,drop=FALSE]) temp <- temp + Gbef[k:n,] %*% crossprod(Rbef, tempQ) A3[k:n] <- A3[k:n] - temp^2 A2[k:n] <- A2[k:n] + temp^2 * QTyyTQ[k,k] temp2 <- crossprod(tempQ,QTyyTQbef) %*% Rbeftotal A2[k:n] <- A2[k:n] - 2 * temp * (Gbeftotal[k:n,,drop=FALSE] %*% t(temp2)) } else { ## update costs A1[k:n] <- A1[k:n] - 2 * G[k:n,k] * (G[k:n,k:kadv,drop=FALSE] %*% GTG[k:kadv,k,drop=FALSE]) A3[k:n]<- A3[k:n] - (G[k:n,k:kadv,drop=FALSE] %*% t(R[k,k:kadv,drop=FALSE]))^2 temp <- G[k:n,k:kadv,drop=FALSE] %*% t(R[k,k:kadv,drop=FALSE]) A2[k:n] <- A2[k:n] + (temp^2) * QTyyTQ[k,k] temp2 <- QTyyTQ[k,k:kadv,drop=FALSE] %*% R[k:kadv,k:kadv,drop=FALSE] A2[k:n] <- A2[k:n] - 2 * temp * (G[k:n,k:kadv,drop=FALSE] %*% t(temp2)) } ## update diagonal and other quantities (A1,B1) D[(k+1):n] <- D[(k+1):n] - G[(k+1):n,k]^2 D[k] <- 0 A1[k:n] <- A1[k:n] + GTG[k,k] * (G[k:n,k]^2) ## compute errors and true gains temp2 <- crossprod(Q[,k], y[P,]) temp2 <- sum(temp2^2) temp1 <- sum(G[,k]^2) truegain[k] <- temp1 * lambda + temp2 * mu error1[k+1] <- error1[k] - temp1 error2[k+1] <- error2[k] - temp2 if (truegain[k] < tol) break } ## reduce dimensions of decomposition G <- G[,1:k,drop=FALSE] Q <- Q[,1:k,drop=FALSE] R <- R[1:k,1:k,drop=FALSE] ## compute and normalize errors error <- lambda * error1 + mu * error2 error1 <- error1 / traceK error2 <- error2 / sumy2 repivot <- sort(P, index.return = TRUE)$ix return(new("csi",.Data=G[repivot, ,drop=FALSE],Q= Q[repivot,,drop = FALSE], R = R, pivots=repivot, diagresidues = error1, maxresiduals = error2, truegain = truegain, predgain = predictedgain)) }) ## I guess we can replace this with qr() .qr2 <- function(M) { ## QR decomposition for 2x2 matrices Q <- matrix(0,2,2) R <- matrix(0,2,2) x <- sqrt(M[1,1]^2 + M[2,1]^2) R[1,1] <- x Q[,1] <- M[,1]/x R[1,2] <- crossprod(Q[,1], M[,2]) Q[,2] <- M[,2] - R[1,2] * Q[,1] R[2,2] <- sum(abs(Q[,2])^2)^(1/2) Q[,2] <- Q[,2] / R[2,2] return(list(Q=Q,R=R)) } kernlab/R/kernelmatrix.R0000644000175100001440000000050311304023134014724 0ustar hornikusers setGeneric("as.kernelMatrix",function(x, center = FALSE) standardGeneric("as.kernelMatrix")) setMethod("as.kernelMatrix", signature(x = "matrix"), function(x, center = FALSE) { if(center){ m <- dim(x)[1] x <- t(t(x - colSums(x)/m) - rowSums(x)/m) + sum(x)/m^2 } return(new("kernelMatrix",.Data = x)) }) kernlab/R/kfa.R0000644000175100001440000001020212676464656013014 0ustar hornikusers ## This code takes the set x of vectors from the input space ## and does projection pursuit to find a good basis for x. ## ## The algorithm is described in Section 14.5 of ## Learning with Kernels by B. Schoelkopf and A. Smola, entitled ## Kernel Feature Analysis. ## ## created : 17.09.04 alexandros ## updated : setGeneric("kfa",function(x, ...) standardGeneric("kfa")) setMethod("kfa", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) Terms <- attr(mf, "terms") na.act <- attr(mf, "na.action") x <- model.matrix(mt, mf) res <- kfa(x, ...) ## fix up call to refer to the generic, but leave arg name as `formula' cl[[1]] <- as.name("kfa") kcall(res) <- cl attr(Terms,"intercept") <- 0 terms(res) <- Terms if(!is.null(na.act)) n.action(res) <- na.act return(res) }) setMethod("kfa",signature(x="matrix"), function(x, kernel="rbfdot", kpar=list(sigma=0.1), features = 0, subset = 59, normalize = TRUE, na.action = na.omit) { if(!is.matrix(x)) stop("x must be a matrix") x <- na.action(x) if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") ## initialize variables m <- dim(x)[1] if(subset > m) subset <- m if (features==0) features <- subset alpha <- matrix(0,subset,features) alphazero <- rep(1,subset) alphafeat <- matrix(0,features,features) idx <- -(1:subset) randomindex <- sample(1:m, subset) K <- kernelMatrix(kernel,x[randomindex,,drop=FALSE],x) ## main loop for (i in 1:features) { K.cols <- K[-idx, , drop = FALSE] if(i > 1) projections <- K.cols * (alphazero[-idx]%*%t(rep(1,m))) + crossprod(t(alpha[-idx,1:(i-1),drop=FALSE]),K[idx, ,drop = FALSE]) else projections <- K.cols * (alphazero%*%t(rep(1,m))) Q <- apply(projections, 1, sd) Q.tmp <- rep(0,subset) Q.tmp[-idx] <- Q Qidx <- which.max(Q.tmp) Qmax <- Q.tmp[Qidx] if(i > 1) alphafeat[i,1:(i-1)] <- alpha[Qidx,1:(i-1)] alphafeat[i,i] <- alphazero[Qidx] if (i > 1) idx <- c(idx,Qidx) else idx <- Qidx if (i > 1) Qfeat <- c(Qfeat, Qmax) else Qfeat <- Qmax Ksub <- K[idx, idx, drop = FALSE] alphasub <- alphafeat[i,1:i] phisquare <- alphasub %*% Ksub %*% t(t(alphasub)) dotprod <- (alphazero * (K[,idx, drop = FALSE] %*% t(t(alphasub))) + alpha[,1:i]%*%(Ksub%*%t(t(alphasub))))/drop(phisquare) alpha[,1:i] <- alpha[,1:i] - dotprod %*%alphasub if(normalize){ sumalpha <- alphazero + rowSums(abs(alpha)) alphazero <- alphazero / sumalpha alpha <- alpha/ (sumalpha %*% t(rep(1,features))) } } obj <- new("kfa") alpha(obj) <- alphafeat alphaindex(obj) <- randomindex[idx] xmatrix(obj) <- x[alphaindex(obj),] kernelf(obj) <- kernel kcall(obj) <- match.call() return(obj) }) ## project a new matrix into the feature space setMethod("predict",signature(object="kfa"), function(object , x) { if (!is.null(terms(object))) { if(!is.matrix(x)) x <- model.matrix(delete.response(terms(object)), as.data.frame(x), na.action = n.action(object)) } else x <- if (is.vector(x)) t(t(x)) else as.matrix(x) if (!is.matrix(x)) stop("x must be a matrix a vector or a data frame") tmpres <- kernelMult(kernelf(object), x, xmatrix(object), alpha(object)) return(tmpres - matrix(colSums(tmpres)/dim(tmpres)[1],dim(tmpres)[1],dim(tmpres)[2],byrow=TRUE)) }) setMethod("show",signature(object="kfa"), function(object) { cat(paste("Number of features :",dim(alpha(object))[2],"\n")) show(kernelf(object)) }) kernlab/R/kpca.R0000644000175100001440000001214412676464735013176 0ustar hornikusers## kpca function ## author : alexandros setGeneric("kpca",function(x, ...) standardGeneric("kpca")) setMethod("kpca", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) na.act <- attr(mf, "na.action") Terms <- attr(mf, "terms") x <- model.matrix(mt, mf) res <- kpca(x, ...) ## fix up call to refer to the generic, but leave arg name as `formula' cl[[1]] <- as.name("kpca") kcall(res) <- cl attr(Terms,"intercept") <- 0 terms(res) <- Terms if(!is.null(na.act)) n.action(res) <- na.act return(res) }) ## Matrix Interface setMethod("kpca",signature(x="matrix"), function(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 0, th = 1e-4, na.action = na.omit, ...) { x <- na.action(x) x <- as.matrix(x) m <- nrow(x) ret <- new("kpca") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") km <- kernelMatrix(kernel,x) ## center kernel matrix kc <- t(t(km - colSums(km)/m) - rowSums(km)/m) + sum(km)/m^2 ## compute eigenvectors res <- eigen(kc/m,symmetric=TRUE) if(features == 0) features <- sum(res$values > th) else if(res$values[features] < th) warning(paste("eigenvalues of the kernel matrix are below threshold!")) pcv(ret) <- t(t(res$vectors[,1:features])/sqrt(res$values[1:features])) eig(ret) <- res$values[1:features] names(eig(ret)) <- paste("Comp.", 1:features, sep = "") rotated(ret) <- kc %*% pcv(ret) kcall(ret) <- match.call() kernelf(ret) <- kernel xmatrix(ret) <- x return(ret) }) ## List Interface setMethod("kpca",signature(x="list"), function(x, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), features = 0, th = 1e-4, na.action = na.omit, ...) { x <- na.action(x) m <- length(x) ret <- new("kpca") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") km <- kernelMatrix(kernel,x) ## center kernel matrix kc <- t(t(km - colSums(km)/m) - rowSums(km)/m) + sum(km)/m^2 ## compute eigenvectors res <- eigen(kc/m,symmetric=TRUE) if(features == 0) features <- sum(res$values > th) else if(res$values[features] < th) warning(paste("eigenvalues of the kernel matrix are below threshold!")) pcv(ret) <- t(t(res$vectors[,1:features])/sqrt(res$values[1:features])) eig(ret) <- res$values[1:features] names(eig(ret)) <- paste("Comp.", 1:features, sep = "") rotated(ret) <- kc %*% pcv(ret) kcall(ret) <- match.call() kernelf(ret) <- kernel xmatrix(ret) <- x return(ret) }) ## Kernel Matrix Interface setMethod("kpca",signature(x= "kernelMatrix"), function(x, features = 0, th = 1e-4, ...) { ret <- new("kpca") m <- dim(x)[1] if(m!= dim(x)[2]) stop("Kernel matrix has to be symetric, and positive semidefinite") ## center kernel matrix kc <- t(t(x - colSums(x)/m) - rowSums(x)/m) + sum(x)/m^2 ## compute eigenvectors res <- eigen(kc/m,symmetric=TRUE) if(features == 0) features <- sum(res$values > th) else if(res$values[features] < th) warning(paste("eigenvalues of the kernel matrix are below threshold!")) pcv(ret) <- t(t(res$vectors[,1:features])/sqrt(res$values[1:features])) eig(ret) <- res$values[1:features] names(eig(ret)) <- paste("Comp.", 1:features, sep = "") rotated(ret) <- kc %*% pcv(ret) kcall(ret) <- match.call() xmatrix(ret) <- x kernelf(ret) <- " Kernel matrix used." return(ret) }) ## project a new matrix into the feature space setMethod("predict",signature(object="kpca"), function(object , x) { if (!is.null(terms(object))) { if(!is.matrix(x) || !is(x,"list")) x <- model.matrix(delete.response(terms(object)), as.data.frame(x), na.action = n.action(object)) } else x <- if (is.vector(x)) t(t(x)) else if (!is(x,"list")) x <- as.matrix(x) if (is.vector(x) || is.data.frame(x)) x <- as.matrix(x) if (!is.matrix(x) && !is(x,"list")) stop("x must be a matrix a vector, a data frame, or a list") if(is(x,"matrix")) { n <- nrow(x) m <- nrow(xmatrix(object))} else { n <- length(x) m <- length(xmatrix(object)) } if(is.character(kernelf(object))) { knc <- x ka <- xmatrix(object) } else { knc <- kernelMatrix(kernelf(object),x,xmatrix(object)) ka <- kernelMatrix(kernelf(object),xmatrix(object)) } ## center ret <- t(t(knc - rowSums(knc)/m) - rowSums(ka)/m) + sum(ka)/(m*n) return(ret %*% pcv(object)) }) kernlab/R/ranking.R0000644000175100001440000002172113561524074013675 0ustar hornikusers## manifold ranking ## author: alexandros setGeneric("ranking",function(x, ...) standardGeneric("ranking")) setMethod("ranking",signature(x="matrix"), function (x, y, kernel = "rbfdot", kpar = list(sigma = 1), scale = FALSE, alpha = 0.99, iterations = 600, edgegraph = FALSE, convergence = FALSE, ...) { m <- dim(x)[1] d <- dim(x)[2] if(length(y) != m) { ym <- matrix(0,m,1) ym[y] <- 1 y <- ym } if (is.null(y)) y <- matrix(1, m, 1) labelled <- y != 0 if (!any(labelled)) stop("no labels sublied") if(is.character(kernel)) kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","besseldot","laplacedot")) if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(scale) x <- scale(x) ## scaling from ksvm ## normalize ? if (is(kernel)[1]=='rbfkernel' && edgegraph){ sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- rowSums(x*x)/2 sed <- crossprod(t(x)) for (i in 1:n) sed[i,] <- - 2*(sed[i,] - dota - rep(dota[i],n)) diag(sed) <- 0 K <- exp(- sigma * sed) mst <- minimum.spanning.tree(sed) algo.mst <- mst$E max.squared.edge.length <- mst$max.sed.in.tree edgegraph <- (sed <= max.squared.edge.length) K[!edgegraph] <- 0 ##algo.edge.graph <- sparse(algo.edge.graph) rm(sed) gc() } else { if(edgegraph && is(kernel)[1]!="rbfkernel") warning('edge graph is only implemented for use with the RBF kernel') edgegraph <- matrix() K <- kernelMatrix(kernel,x) } diag(K) <- 0 ##K <- sparse(K) cs <- colSums(K) ##cs[cs <= 10e-6] <- 1 D <- 1/sqrt(cs) K <- D * K %*% diag(D) if(sum(labelled)==1) y <- K[, labelled,drop = FALSE] else y <- as.matrix(colSums(K[, labelled])) K <- alpha * K[, !labelled] ym <- matrix(0,m,iterations) ym[,1] <- y for (iteration in 2:iterations) ym[, iteration] <- ym[, 1] + K %*% ym[!labelled, iteration-1] ym[labelled,] <- NA r <- ym r[!labelled,] <- compute.ranks(-r[!labelled, ]) if(convergence) convergence <- (r - rep(r[,dim(r)[2]],iterations))/(m-sum(labelled)) else convergence <- matrix() res <- cbind(t(t(1:m)), ym[,iterations], r[,iterations]) return(new("ranking", .Data=res, convergence = convergence, edgegraph = edgegraph)) }) ## kernelMatrix interface setMethod("ranking",signature(x="kernelMatrix"), function (x, y, alpha = 0.99, iterations = 600, convergence = FALSE, ...) { m <- dim(x)[1] if(length(y) != m) { ym <- matrix(0,m,1) ym[y] <- 1 y <- ym } if (is.null(y)) y <- matrix(1, m, 1) labelled <- y != 0 if (!any(labelled)) stop("no labels sublied") diag(x) <- 0 ##K <- sparse(K) cs <- colSums(x) ##cs[cs <= 10e-6] <- 1 D <- 1/sqrt(cs) x <- D * x %*% diag(D) if(sum(labelled)==1) y <- x[, labelled,drop = FALSE] else y <- as.matrix(colSums(x[, labelled])) x <- alpha * x[, !labelled] ym <- matrix(0,m,iterations) ym[,1] <- y for (iteration in 2:iterations) ym[, iteration] <- ym[, 1] + x %*% ym[!labelled, iteration-1] ym[labelled,] <- NA r <- ym r[!labelled,] <- compute.ranks(-r[!labelled, ]) if(convergence) convergence <- (r - rep(r[,dim(r)[2]],iterations))/(m-sum(labelled)) else convergence <- matrix() res <- cbind(t(t(1:m)), ym[,iterations], r[,iterations]) return(new("ranking", .Data=res, convergence = convergence)) }) ## list interface setMethod("ranking",signature(x="list"), function (x, y, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), alpha = 0.99, iterations = 600, convergence = FALSE, ...) { m <- length(x) if(length(y) != m) { ym <- matrix(0,m,1) ym[y] <- 1 y <- ym } if (is.null(y)) y <- matrix(1, m, 1) labelled <- y != 0 if (!any(labelled)) stop("no labels sublied") if(is.character(kernel)) kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","besseldot","laplacedot")) if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") edgegraph <- matrix() K <- kernelMatrix(kernel,x) diag(K) <- 0 ##K <- sparse(K) cs <- colSums(K) ##cs[cs <= 10e-6] <- 1 D <- 1/sqrt(cs) K <- D * K %*% diag(D) if(sum(labelled)==1) y <- K[, labelled,drop = FALSE] else y <- as.matrix(colSums(K[, labelled])) K <- alpha * K[, !labelled] ym <- matrix(0,m,iterations) ym[,1] <- y for (iteration in 2:iterations) ym[, iteration] <- ym[, 1] + K %*% ym[!labelled, iteration-1] ym[labelled,] <- NA r <- ym r[!labelled,] <- compute.ranks(-r[!labelled, ]) if(convergence) convergence <- (r - rep(r[,dim(r)[2]],iterations))/(m-sum(labelled)) else convergence <- matrix() res <- cbind(t(t(1:m)), ym[,iterations], r[,iterations]) return(new("ranking", .Data=res, convergence = convergence, edgegraph = NULL)) }) minimum.spanning.tree <- function(sed) { max.sed.in.tree <- 0 E <- matrix(0,dim(sed)[1],dim(sed)[2]) n <- dim(E)[1] C <- logical(n) cmp <- sed diag(cmp) <- NA ans <- min(cmp, na.rm = TRUE) i <- which.min(cmp) j <- i%/%n + 1 i <- i%%n +1 for (nC in 1:n) { cmp <- sed cmp[C,] <- NA cmp[,!C] <- NA if(nC == 1) { ans <- 1 i <- 1 } else{ ans <- min(cmp, na.rm=TRUE) i <- which.min(cmp)} j <- i%/%n + 1 i <- i%%n + 1 E[i, j] <- nC E[j, i] <- nC C[i] <- TRUE max.sed.in.tree <- max(max.sed.in.tree, sed[i, j]) } ## E <- sparse(E) res <- list(E=E, max.sed.in.tree=max.sed.in.tree) } compute.ranks <- function(am) { rm <- matrix(0,dim(am)[1],dim(am)[2]) for (j in 1:dim(am)[2]) { a <- am[, j] sort <- sort(a, index.return = TRUE) sorted <- sort$x r <- sort$ix r[r] <- 1:length(r) while(1) { if(sum(na.omit(diff(sorted) == 0)) == 0) break tied <- sorted[min(which(diff(sorted) == 0))] sorted[sorted==tied] <- NA r[a==tied] <- mean(r[a==tied]) } rm[, j] <- r } return(rm) } setMethod("show","ranking", function(object) { cat("Ranking object of class \"ranking\"","\n") cat("\n") show(object@.Data) cat("\n") if(!any(is.na(convergence(object)))) cat("convergence matrix included.","\n") if(!any(is.na(edgegraph(object)))) cat("edgegraph matrix included.","\n") }) kernlab/R/kha.R0000644000175100001440000001042612676464711013016 0ustar hornikusers #Kernel Hebbian Algorithm function setGeneric("kha",function(x, ...) standardGeneric("kha")) setMethod("kha", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) na.act <- attr(mf, "na.action") Terms <- attr(mf, "terms") x <- model.matrix(mt, mf) res <- kha(x, ...) ## fix up call to refer to the generic, but leave arg name as `formula' cl[[1]] <- as.name("kha") kcall(res) <- cl attr(Terms,"intercept") <- 0 terms(res) <- Terms if(!is.null(na.act)) n.action(res) <- na.act return(res) }) setMethod("kha",signature(x="matrix"), function(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 5, eta = 0.005, th = 1e-4, maxiter = 10000, verbose = FALSE, na.action = na.omit, ...) { x <- na.action(x) x <- as.matrix(x) m <- nrow(x) ret <- new("kha") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") ## Initialize A dual variables A <- matrix(runif(features*m),m,features)*2 - 1 AOld <- A ## compute square norm of data a <- rowSums(x^2) ## initialize the empirical sum kernel map eskm <- rep(0,m) for (i in 1:m) eskm[i] <- sum(kernelFast(kernel,x,x[i,,drop=FALSE], a)) eks <- sum(eskm) counter <- 0 step <- th + 1 Aold <- A while(step > th && counter < maxiter) { y <- rep(0, features) ot <- rep(0,m) ## Hebbian Iteration for (i in 1:m) { ## compute y output etkm <- as.vector(kernelFast(kernel,x,x[i,,drop=FALSE], a)) sum1 <- as.vector(etkm %*% A) sum2 <- as.vector(eskm%*%A)/m asum <- colSums(A) sum3 <- as.vector(eskm[i]*asum)/m sum4 <- as.vector(eks * asum)/m^2 y <- sum1 - sum2 - sum3 + sum4 ## update A yy <- y%*%t(y) yy[upper.tri(yy)] <- 0 tA <- t(A) A <- t(tA - eta * yy%*%tA) A[i,] <- A[i,] + eta * y } if (counter %% 100 == 0 ) { step = mean(abs(Aold - A)) Aold <- A if(verbose) cat("Iteration :", counter, "Converged :", step,"\n") } counter <- counter + 1 } ## Normalize in Feature space cA <- t(A) - colSums(A) Fnorm <- rep(0,features) for (j in 1:m) Fnorm <- Fnorm + colSums(t(cA[,j] * cA) * as.vector(kernelFast(kernel,x,x[j,,drop=FALSE],a))) if(any(Fnorm==0)) { warning("Normalization vector contains zeros, replacing them with ones") Fnorm[which(Fnorm==0)] <- 1 } A <- t(t(A)/sqrt(Fnorm)) pcv(ret) <- A eig(ret) <- Fnorm names(eig(ret)) <- paste("Comp.", 1:features, sep = "") eskm(ret) <- eskm kcall(ret) <- match.call() kernelf(ret) <- kernel xmatrix(ret) <- x return(ret) }) ## Project a new matrix into the feature space setMethod("predict",signature(object="kha"), function(object , x) { if (!is.null(terms(object))) { if(!is.matrix(x)) x <- model.matrix(delete.response(terms(object)), as.data.frame(x), na.action = n.action(object)) } else x <- if (is.vector(x)) t(t(x)) else as.matrix(x) if (is.vector(x)||is.data.frame(x)) x<-as.matrix(x) if (!is.matrix(x)) stop("x must be a matrix a vector or a data frame") n <- nrow(x) m <- nrow(xmatrix(object)) A <- pcv(object) y <- matrix(0,n,dim(A)[2]) eks <- sum(eskm(object)) a <- rowSums(xmatrix(object)^2) ## Project data sum2 <- as.vector(eskm(object)%*%A)/m asum <- colSums(A) sum4 <- as.vector(eks * asum)/m^2 for (i in 1:n) { ## compute y output etkm <- as.vector(kernelFast(kernelf(object),xmatrix(object),x[i,,drop=FALSE], a)) sum1 <- as.vector(etkm %*% A) sum3 <- sum(etkm)*asum/m y[i,] <- sum1 - sum2 - sum3 + sum4 } return(y) }) kernlab/R/onlearn.R0000644000175100001440000001667712560371302013710 0ustar hornikusers## kernel based on-line learning algorithms for classification, novelty detection and regression. ## ## created 15.09.04 alexandros ## updated setGeneric("onlearn",function(obj, x, y = NULL, nu = 0.2, lambda = 1e-4) standardGeneric("onlearn")) setMethod("onlearn", signature(obj = "onlearn"), function(obj , x, y = NULL, nu = 0.2, lambda = 1e-4) { if(onstart(obj) == 1 && onstop(obj) < buffer(obj)) buffernotfull <- TRUE else buffernotfull <- FALSE if(is.vector(x)) x <- matrix(x,,length(x)) d <- dim(x)[2] for (i in 1:dim(x)[1]) { xt <- x[i,,drop=FALSE] yt <- y[i] if(type(obj)=="novelty") { phi <- fit(obj) if(phi < 0) { alpha(obj) <- (1-lambda) * alpha(obj) if(buffernotfull) onstop(obj) <- onstop(obj) + 1 else{ onstop(obj) <- onstop(obj)%%buffer(obj) + 1 onstart(obj) <- onstart(obj)%%buffer(obj) +1 } alpha(obj)[onstop(obj)] <- lambda xmatrix(obj)[onstop(obj),] <- xt rho(obj) <- rho(obj) + lambda*(nu-1) } else rho(obj) <- rho(obj) + lambda*nu rho(obj) <- max(rho(obj), 0) if(onstart(obj) == 1 && onstop(obj) < buffer(obj)) fit(obj) <- drop(kernelMult(kernelf(obj), xt, matrix(xmatrix(obj)[1:onstop(obj),],ncol=d), matrix(alpha(obj)[1:onstop(obj)],ncol=1)) - rho(obj)) else fit(obj) <- drop(kernelMult(kernelf(obj), xt, xmatrix(obj), matrix(alpha(obj),ncol=1)) - rho(obj)) } if(type(obj)=="classification") { if(is.null(pattern(obj)) && is.factor(y)) pattern(obj) <- yt if(!is.null(pattern(obj))) if(pattern(obj) == yt) yt <- 1 else yt <- -1 phi <- fit(obj) alpha(obj) <- (1-lambda) * alpha(obj) if(yt*phi < rho(obj)) { if(buffernotfull) onstop(obj) <- onstop(obj) + 1 else{ onstop(obj) <- onstop(obj)%%buffer(obj) + 1 onstart(obj) <- onstart(obj)%%buffer(obj) +1 } alpha(obj)[onstop(obj)] <- lambda*yt b(obj) <- b(obj) + lambda*yt xmatrix(obj)[onstop(obj),] <- xt rho(obj) <- rho(obj) + lambda*(nu-1) ## (1-nu) ?? } else rho(obj) <- rho(obj) + lambda*nu rho(obj) <- max(rho(obj), 0) if(onstart(obj) == 1 && onstop(obj) < buffer(obj)) fit(obj) <- drop(kernelMult(kernelf(obj), xt, xmatrix(obj)[1:onstop(obj),,drop=FALSE], matrix(alpha(obj)[1:onstop(obj)],ncol=1)) + b(obj)) else fit(obj) <-drop(kernelMult(kernelf(obj), xt, xmatrix(obj), matrix(alpha(obj),ncol=1)) + b(obj)) } if(type(obj)=="regression") { alpha(obj) <- (1-lambda) * alpha(obj) phi <- fit(obj) if(abs(-phi) < rho(obj)) { if(buffernotfull) onstop(obj) <- onstop(obj) + 1 else{ onstop(obj) <- onstop(obj)%%buffer(obj) + 1 onstart(obj) <- onstart(obj)%% buffer(obj) +1 } alpha(obj)[onstop(obj)] <- sign(yt-phi)*lambda xmatrix(obj)[onstop(obj),] <- xt rho(obj) <- rho(obj) + lambda*(1-nu) ## (1-nu) ?? } else{ rho(obj) <- rho(obj) - lambda*nu alpha(obj)[onstop(obj)] <- sign(yt-phi)/rho(obj) } if(onstart(obj) == 1 && onstop(obj) < buffer(obj)) fit(obj) <- drop(kernelMult(kernelf(obj), xt, matrix(xmatrix(obj)[1:onstop(obj),],ncol=d), matrix(alpha(obj)[1:onstop(obj)],ncol=1)) + b(obj)) else fit(obj) <- drop(kernelMult(kernelf(obj), xt, xmatrix(obj), matrix(alpha(obj),ncol=1)) + b(obj)) } } return(obj) }) setGeneric("inlearn",function(d, kernel = "rbfdot", kpar = list(sigma=0.1), type = "novelty", buffersize = 1000) standardGeneric("inlearn")) setMethod("inlearn", signature(d = "numeric"), function(d ,kernel = "rbfdot", kpar = list(sigma=0.1), type = "novelty", buffersize = 1000) { obj <- new("onlearn") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") type(obj) <- match.arg(type,c("novelty","classification","regression")) xmatrix(obj) <- matrix(0,buffersize,d) kernelf(obj) <- kernel onstart(obj) <- 1 onstop(obj) <- 1 fit(obj) <- 0 b(obj) <- 0 alpha(obj) <- rep(0, buffersize) rho(obj) <- 0 buffer(obj) <- buffersize return(obj) }) setMethod("show","onlearn", function(object){ cat("On-line learning object of class \"onlearn\"","\n") cat("\n") cat(paste("Learning problem :", type(object), "\n")) cat cat(paste("Data dimensions :", dim(xmatrix(object))[2], "\n")) cat(paste("Buffersize :", buffer(object), "\n")) cat("\n") show(kernelf(object)) }) setMethod("predict",signature(object="onlearn"), function(object, x) { if(is.vector(x)) x<- matrix(x,1) d <- dim(xmatrix(object))[2] if(type(object)=="novelty") { if(onstart(object) == 1 && onstop(object) < buffer(object)) res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object)[1:onstop(object),],ncol= d), matrix(alpha(object)[1:onstop(object)],ncol=1)) - rho(object)) else res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object),ncol=d), matrix(alpha(object),ncol=1)) - rho(object)) } if(type(object)=="classification") { if(onstart(object) == 1 && onstop(object) < buffer(object)) res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object)[1:onstop(object),],ncol=d), matrix(alpha(object)[1:onstop(object)],ncol=1)) + b(object)) else res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object),ncol=d), matrix(alpha(object),ncol=1)) + b(object)) } if(type(object)=="regression") { if(onstart(object) == 1 && onstop(object) < buffer(object)) res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object)[1:onstop(object),],ncol=d), matrix(alpha(object)[1:onstop(object)],ncol=1)) + b(object)) else res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object),ncol=d), matrix(alpha(object),ncol=1)) + b(object)) } return(res) }) kernlab/R/kqr.R0000644000175100001440000002445512676464751013063 0ustar hornikuserssetGeneric("kqr", function(x, ...) standardGeneric("kqr")) setMethod("kqr",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m[[1L]] <- quote(stats::model.frame) m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 x <- model.matrix(Terms, m) y <- model.extract(m, "response") if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- kqr(x, y, scaled = scaled, ...) kcall(ret) <- cl terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("kqr",signature(x="vector"), function(x,...) { x <- t(t(x)) ret <- kqr(x, ...) ret }) setMethod("kqr",signature(x="matrix"), function (x, y, scaled = TRUE, tau = 0.5, C = 0.1, kernel = "rbfdot", kpar = "automatic", reduced = FALSE, rank = dim(x)[1]/6, fit = TRUE, cross = 0, na.action = na.omit) { if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1") ret <- new("kqr") param(ret) <- list(C = C, tau = tau) if (is.null(y)) x <- na.action(x) else { df <- na.action(data.frame(y, x)) y <- df[,1] x <- as.matrix(df[,-1]) } ncols <- ncol(x) m <- nrows <- nrow(x) tmpsc <- NULL x.scale <- y.scale <- NULL ## scaling if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")] y <- scale(y) y.scale <- attributes(y)[c("scaled:center","scaled:scale")] y <- as.vector(y) tmpsc <- list(scaled = scaled, x.scale = x.scale,y.scale = y.scale) } } ## Arrange all the kernel mambo jumpo if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot")) if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE,frac=1)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") ## Setup QP problem and call ipop if(!reduced) H = kernelMatrix(kernel,x) else H = csi(x, kernel = kernel, rank = rank) c = -y A = rep(1,m) b = 0 r = 0 l = matrix(C * (tau-1),m,1) u = matrix(C * tau ,m,1) qpsol = ipop(c, H, A, b, l, u, r) alpha(ret)= coef(ret) = primal(qpsol) b(ret) = dual(qpsol)[1] ## Compute training error/loss xmatrix(ret) <- x ymatrix(ret) <- y kernelf(ret) <- kernel kpar(ret) <- kpar type(ret) <- ("Quantile Regresion") if (fit){ fitted(ret) <- predict(ret, x) if (!is.null(scaling(ret)$y.scale)) fitted(ret) <- fitted(ret) * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center" error(ret) <- c(pinloss(y, fitted(ret), tau), ramploss(y,fitted(ret),tau)) } else fitted(ret) <- NULL if(any(scaled)) scaling(ret) <- tmpsc ## Crossvalidation cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { pinloss <- 0 ramloss <- 0 crescs <- NULL suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- kqr(x[cind,],y[cind], tau = tau, C = C, scale = FALSE, kernel = kernel, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) crescs <- c(crescs,cres) } if (!is.null(scaling(ret)$y.scale)){ crescs <- crescs * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center" ysvgr <- y[unlist(vgr)] * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center" } else ysvgr <- y[unlist(vgr)] pinloss <- drop(pinloss(ysvgr, crescs, tau)) ramloss <- drop(ramloss(ysvgr, crescs, tau)) cross(ret) <- c(pinloss, ramloss) } return(ret) }) setMethod("kqr",signature(x="list"), function (x, y, tau = 0.5, C = 0.1, kernel = "strigdot", kpar = list(length=4, C=0.5), fit = TRUE, cross = 0) { if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") K <- kernelMatrix(kernel,x) ret <- kqr(K,y = y,tau = tau, C = C, fit = fit, cross = cross) kernelf(ret) <- kernel kpar(ret) <- kpar return(ret) }) setMethod("kqr",signature(x="kernelMatrix"), function (x, y, tau = 0.5, C = 0.1, fit = TRUE, cross = 0) { if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1") ret <- new("kqr") param(ret) <- list(C = C, tau = tau) ncols <- ncol(x) m <- nrows <- nrow(x) y <- as.vector(y) ## Setup QP problem and call ipop H = x c = -y A = rep(1,m) b = 0 r = 0 l = matrix(C * (tau-1),m,1) u = matrix(C * tau ,m,1) qpsol = ipop(c, H, A, b, l, u, r) alpha(ret)= coef(ret) = primal(qpsol) b(ret) = dual(qpsol)[1] ## Compute training error/loss ymatrix(ret) <- y kernelf(ret) <- "Kernel Matrix used." type(ret) <- ("Quantile Regresion") if (fit){ fitted(ret) <- predict(ret, x) error(ret) <- c(pinloss(y, fitted(ret), tau), ramploss(y,fitted(ret),tau)) } else NA ## Crossvalidation cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { pinloss <- 0 ramloss <- 0 crescs <- NULL suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- kqr(x[cind,cind],y[cind], tau = tau, C = C, scale = FALSE, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],vgr[[i]]]) crescs <- c(crescs,cres) } ysvgr <- y[unlist(vgr)] pinloss <- drop(pinloss(ysvgr, crescs, tau)) ramloss <- drop(ramloss(ysvgr, crescs, tau)) cross(ret) <- c(pinloss, ramloss) } return(ret) }) pinloss <- function(y,f,tau) { if(is.vector(y)) m <- length(y) else m <- dim(y)[1] tmp <- y - f return((tau *sum(tmp*(tmp>=0)) + (tau-1) * sum(tmp * (tmp<0)))/m) } ramploss <- function(y,f,tau) { if(is.vector(y)) m <- length(y) else m <- dim(y)[1] return(sum(y<=f)/m) } setMethod("predict", signature(object = "kqr"), function (object, newdata) { sc <- 0 if (missing(newdata)) if(!is.null(fitted(object))) return(fitted(object)) else stop("newdata is missing and no fitted values found.") if(!is(newdata,"kernelMatrix")){ ncols <- ncol(xmatrix(object)) nrows <- nrow(xmatrix(object)) oldco <- ncols if (!is.null(terms(object))) { newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = na.action) } else newdata <- if (is.vector (newdata)) t(t(newdata)) else as.matrix(newdata) newcols <- 0 newnrows <- nrow(newdata) newncols <- ncol(newdata) newco <- newncols if (oldco != newco) stop ("test vector does not match model !") if (is.list(scaling(object)) && sc != 1) newdata[,scaling(object)$scaled] <- scale(newdata[,scaling(object)$scaled, drop = FALSE], center = scaling(object)$x.scale$"scaled:center", scale = scaling(object)$x.scale$"scaled:scale" ) predres <- kernelMult(kernelf(object),newdata,xmatrix(object),as.matrix(alpha(object))) - b(object) if (!is.null(scaling(object)$y.scale)) return(predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center") else return(predres) } else { return(newdata%*%alpha(object) - b(object)) } }) setMethod("show","kqr", function(object){ cat("Kernel Quantile Regression object of class \"kqr\"","\n") cat("\n") show(kernelf(object)) cat("\n") cat("Regularization Cost Parameter C: ",round(param(object)[[1]],9)) cat(paste("\nNumber of training instances learned :", dim(xmatrix(object))[1],"\n")) if(!is.null(fitted(object))) cat(paste("Train error :"," pinball loss : ", round(error(object)[1],9)," rambloss :", round(error(object)[2],9),"\n")) ##train error & loss if(cross(object)!=-1) cat("Cross validation error :", " pinballoss : ", round(cross(object)[1],9)," rambloss :", round(cross(object)[2],9),"\n") }) kernlab/R/gausspr.R0000644000175100001440000003567512676464637013763 0ustar hornikusers## Gaussian Processes implementation. Laplace approximation for classification. ## author : alexandros karatzoglou setGeneric("gausspr", function(x, ...) standardGeneric("gausspr")) setMethod("gausspr",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m[[1L]] <- quote(stats::model.frame) m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 x <- model.matrix(Terms, m) y <- model.extract(m, "response") if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- gausspr(x, y, scaled = scaled, ...) kcall(ret) <- cl terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("gausspr",signature(x="vector"), function(x,...) { x <- t(t(x)) ret <- gausspr(x, ...) ret }) setMethod("gausspr",signature(x="matrix"), function (x, y, scaled = TRUE, type = NULL, kernel = "rbfdot", kpar = "automatic", var = 1, variance.model = FALSE, tol = 0.0005, cross = 0, fit = TRUE, ... ,subset ,na.action = na.omit) { ## should become an option reduced <- FALSE ## subsetting and na-handling for matrices ret <- new("gausspr") if (!missing(subset)) x <- x[subset,] if (is.null(y)) x <- na.action(x) else { df <- na.action(data.frame(y, x)) y <- df[,1] x <- as.matrix(df[,-1]) } ncols <- ncol(x) m <- nrows <- nrow(x) if (is.null (type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- type x.scale <- y.scale <- NULL ## scaling if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")] if (is.numeric(y)&&(type(ret)!="classification")) { y <- scale(y) y.scale <- attributes(y)[c("scaled:center","scaled:scale")] y <- as.vector(y) } tmpsc <- list(scaled = scaled, x.scale = x.scale, y.scale = y.scale) } } if (var < 10^-3) stop("Noise variance parameter var has to be greater than 10^-3") # in case of classification: transform factors into integers if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) } else { if (type(ret) == "classification" && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if(type(ret) == "classification") lev(ret) <- unique (y) } # initialize nclass(ret) <- length (lev(ret)) if(!is.null(type)) type(ret) <- match.arg(type,c("classification", "regression")) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot")) if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") p <- 0 if (type(ret) == "classification") { indexes <- lapply(1:nclass(ret), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) xd <- matrix(0,(li+lj),dim(x)[2]) xdi <- 1:(li+lj) <= li xd[xdi,rep(TRUE,dim(x)[2])] <- x[indexes[[i]],] xd[xdi == FALSE,rep(TRUE,dim(x)[2])] <- x[indexes[[j]],] if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) yd <- c(rep(1,li),rep(-1,lj)) else yd <- c(rep(-1,li),rep(1,lj)) if(reduced == FALSE){ K <- kernelMatrix(kernel,xd) gradnorm <- 1 alphag <- solut <- rep(0,li+lj) while (gradnorm > tol) { f <- crossprod(K,alphag) grad <- -yd/(1 + exp(yd*f)) hess <- exp(yd*f) hess <- hess / ((1 + hess)^2) ## We use solveiter instead of solve to speed up things ## A <- t(t(K)*as.vector(hess)) ## diag(A) <- diag(A) + 1 ## alphag <- alphag - solve(A,(grad + alphag)) solut <- solveiter(K, hess, (grad + alphag), solut) alphag <- alphag - solut gradnorm <- sqrt(sum((grad + alphag)^2)) } } else if (reduced ==TRUE) { yind <- t(matrix(unique(yd),2,length(yd))) ymat <- matrix(0, length(yd), 2) ymat[yind==yd] <- 1 ##Z <- csi(xd, ymat, kernel = kernel, rank = dim(yd)[1]) ##Z <- Z[sort(pivots(Z),index.return = TRUE)$ix, ,drop=FALSE] Z <- inchol(xd, kernel = kernel) gradnorm <- 1 alphag <- rep(0,li+lj) m1 <- dim(Z)[1] n1 <- dim(Z)[2] Ksub <- diag(rep(1,n1)) while (gradnorm > tol) { f <- drop(Z%*%crossprod(Z,alphag)) f[which(f>20)] <- 20 grad <- -yd/(1 + exp(yd*f)) hess <- exp(yd*f) hess <- as.vector(hess / ((1 + hess)^2)) alphag <- alphag - (- Z %*%solve(Ksub + (t(Z)*hess)%*%Z) %*% (t(Z)*hess))%*%(grad + alphag) + (grad + alphag) gradnorm <- sqrt(sum((grad + alphag)^2)) } } alpha(ret)[[p]] <- alphag alphaindex(ret)[[p]] <- c(indexes[[i]],indexes[[j]]) } } } if (type(ret) == "regression") { K <- kernelMatrix(kernel,x) if(variance.model) { sol <- solve(K + diag(rep(var, length = m))) rm(K) alpha(ret) <- sol%*%y } else alpha(ret) <- solve(K + diag(rep(var, length = m))) %*% y } kcall(ret) <- match.call() kernelf(ret) <- kernel xmatrix(ret) <- x if(variance.model) sol(ret) <- sol fitted(ret) <- if (fit) predict(ret, x) else NA if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression"){ if (!is.null(scaling(ret)$y.scale)) fitted(ret) <- fitted(ret) * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center" error(ret) <- drop(crossprod(fitted(ret) - y)/m) } } if(any(scaled)) scaling(ret) <- tmpsc cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="classification") { cret <- gausspr(x[cind,], y[cind], scaled = FALSE, type=type(ret),kernel=kernel,var = var, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="regression") { cret <- gausspr(x[cind,],y[cind],type=type(ret),scaled = FALSE, kernel=kernel,var = var,tol=tol, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) if (!is.null(scaling(ret)$y.scale)) scal <- scaling(ret)$y.scale$"scaled:scale" cerror <- drop((scal^2)*crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } return(ret) }) setMethod("predict", signature(object = "gausspr"), function (object, newdata, type = "response", coupler = "minpair") { sc <- 0 type <- match.arg(type,c("response","probabilities","votes", "variance", "sdeviation")) if (missing(newdata) && type!="response") return(fitted(object)) else if(missing(newdata)) { newdata <- xmatrix(object) sc <- 1 } ncols <- ncol(xmatrix(object)) nrows <- nrow(xmatrix(object)) oldco <- ncols if (!is.null(terms(object))) { newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = na.action) } else newdata <- if (is.vector (newdata)) t(t(newdata)) else as.matrix(newdata) newcols <- 0 newnrows <- nrow(newdata) newncols <- ncol(newdata) newco <- newncols if (oldco != newco) stop ("test vector does not match model !") if (is.list(scaling(object)) && sc != 1) newdata[,scaling(object)$scaled] <- scale(newdata[,scaling(object)$scaled, drop = FALSE], center = scaling(object)$x.scale$"scaled:center", scale = scaling(object)$x.scale$"scaled:scale" ) p <- 0 if(type == "response") { if(type(object)=="classification") { predres <- 1:newnrows votematrix <- matrix(0,nclass(object),nrows) for(i in 1:(nclass(object)-1)) { jj <- i+1 for(j in jj:nclass(object)) { p <- p+1 ret <- kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[p]],],alpha(object)[[p]]) votematrix[i,ret>0] <- votematrix[i,ret>0] + 1 votematrix[j,ret<0] <- votematrix[j,ret<0] + 1 } } predres <- sapply(predres, function(x) which.max(votematrix[,x])) } } if(type == "probabilities") { if(type(object)=="classification") { binprob <- matrix(0, newnrows, nclass(object)*(nclass(object) - 1)/2) for(i in 1:(nclass(object)-1)) { jj <- i+1 for(j in jj:nclass(object)) { p <- p+1 binprob[,p] <- 1/(1+exp(-kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[p]],],alpha(object)[[p]]))) } } ## multiprob <- sapply(1:newnrows, function(x) couple(binprob[x ,],coupler = coupler)) multiprob <- couple(binprob, coupler = coupler) } } if(type(object) == "regression") { if (type == "variance"||type == "sdeviation") { Ktest <- kernelMatrix(kernelf(object),xmatrix(object), newdata) predres <- diag(kernelMatrix(kernelf(object),newdata) - t(Ktest) %*% sol(object) %*% Ktest) if (type== "sdeviation") predres <- sqrt(predres) if (!is.null(scaling(object)$y.scale)) predres <- predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center" } else { predres <- kernelMult(kernelf(object),newdata,xmatrix(object),as.matrix(alpha(object))) if (!is.null(scaling(object)$y.scale)) predres <- predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center" } } if (is.character(lev(object))) { ##classification & probabilities : return probabilitie matrix if(type == "probabilities") { colnames(multiprob) <- lev(object) return(multiprob) } ##classification & type response: return factors if(type == "response") return(factor (lev(object)[predres], levels = lev(object))) ##classification & votes : return votematrix if(type == "votes") return(votematrix) } else ##else: return raw values return(predres) }) setMethod("show","gausspr", function(object){ cat("Gaussian Processes object of class \"gausspr\"","\n") cat(paste("Problem type:", type(object),"\n")) cat("\n") show(kernelf(object)) cat(paste("\nNumber of training instances learned :", dim(xmatrix(object))[1],"\n")) if(!is.null(fitted(object))) cat(paste("Train error :", round(error(object),9),"\n")) ##train error & loss if(cross(object)!=-1) cat("Cross validation error :",round(cross(object),9),"\n") }) solveiter <- function(B,noiseproc,b,x,itmax = 50,tol = 10e-4 ,verbose = FALSE) { ## ---------------------------- ## Preconditioned Biconjugate Gradient method ## solves linear system Ax <- b for general A ## ------------------------------------------ ## x : initial guess ## itmax : max # iterations ## iterates while mean(abs(Ax-b)) > tol ## ## Simplified form of Numerical Recipes: linbcg ## ## The preconditioned matrix is set to inv(diag(A)) ## A defined through A <- I + N*B diagA <- matrix(1,dim(B)[1],1) + colSums(B)+ diag(B)*(noiseproc-1) ## diags of A cont <- 0 iter <- 0 r <- .Amul2(x,B,noiseproc) r <- b - r rr <- r znrm <- 1 bnrm <- sqrt(sum((b)^2)) z <- r/diagA err <- sqrt(sum((.Amul2(x,B,noiseproc) - b)^2))/bnrm while (iter <= itmax){ iter <- iter + 1 zm1nrm <- znrm zz <- rr/diagA bknum<- drop(crossprod(z,rr)) if (iter == 1) { p <- z pp <- zz } else { bk <- bknum/bkden p <- bk*p + z pp <- bk*pp + zz } bkden <- bknum z <- .Amul2(p,B,noiseproc) akden <- drop(crossprod(z,pp)) ak <- bknum/akden zz <- .Amul2T(pp,B,noiseproc) x <- x + ak*p r <- r - ak*z rr <- rr - ak*zz z <- r/diagA znrm <- 1 err <- mean(abs(r)) if (err tol && counter < maxiter ) { ## Aggressively allocate memory if(counter %% BLOCKSIZE == 0) { Tktmp <- matrix(0, m, dim(Tk)[2] + BLOCKSIZE) Tktmp[1:m > 0, 1:(dim(Tk)[2] + BLOCKSIZE) <= dim(Tk)[2]] <- Tk Tk <- Tktmp Ttmp <- matrix(0, dim(T)[1]+BLOCKSIZE, BLOCKSIZE+counter) ind <- 1:(dim(T)[1]+BLOCKSIZE) <= dim(T)[1] ind2 <- 1:(BLOCKSIZE + counter) <= counter Ttmp[ind , ind2] <- T Ttmp[ind == FALSE, ind2 == FALSE] <- diag(1, BLOCKSIZE) T <- Ttmp padded.veck.tmp <- matrix(0,dim(padded.veck)[1]+BLOCKSIZE) padded.veck.tmp[1:(dim(padded.veck)[1]+BLOCKSIZE) <= dim(padded.veck)[1]] <- padded.veck padded.veck <- padded.veck.tmp pivots.tmp <- matrix(0, dim(pivots)[1]+BLOCKSIZE) pivots.tmp[1:(dim(pivots)[1] + BLOCKSIZE)<= dim(pivots)[1]] <- pivots pivots <- pivots.tmp maxresiduals.tmp <- matrix(0,dim(maxresiduals)[1]+BLOCKSIZE) maxresiduals.tmp[1:(dim(maxresiduals)[1]+BLOCKSIZE) <= dim(maxresiduals)[1]] <- maxresiduals maxresiduals <- maxresiduals.tmp if(counter == 0) t <- rep(0,BLOCKSIZE) else t <- rep(0,length(t)+BLOCKSIZE) } veck <- kernelFast(kernel, x, x[index, ,drop=FALSE],dota) if (counter == 0) { ## No need to compute t here tau <- sqrt(veck[index]) ## Update T T[1, 1] <- tau ## Compute the update for Tk update <- veck/tau } else { padded.veck[1:counter] <- veck[pivots[1:counter]] ## First compute t ## t <- t(crossprod(padded.veck,backsolve(T,diag(1,nrow=dim(T)[1])))) ## cat("T: ",dim(T), " p:",length(padded.veck),",\n") t[1:counter] <- backsolve(T, k=counter, padded.veck, transpose = TRUE) ## Now compute tau tau <- as.vector(sqrt(veck[index] - crossprod(t))) ## Update T T[1:counter, counter+1] <- t[1:counter] T[counter + 1, counter + 1] <- tau ## Compute the update for Tk update <- (1/tau) * (veck - Tk %*% t) } ## Update Tk Tk[,counter + 1] <- update ## Update diagonal residuals diag.residues <- diag.residues - update^2 ## Update pivots pivots[counter + 1] <- index ## Monitor residuals maxresiduals[counter + 1] <- residue ## Choose next candidate residue <- max( diag.residues ) index <- which.max(diag.residues) ## Update counter counter <- counter + 1 ## Report progress to the user if(counter%%blocksize == 0 && (verbose == TRUE)) cat("counter = ",counter," ", "residue = ", residue, "\n") } ## Throw away extra columns which we might have added Tk <- Tk[, 1:counter] pivots <- pivots[1:counter] maxresiduals <- maxresiduals[1:counter] return(new("inchol",.Data=Tk,pivots=pivots,diagresidues = diag.residues, maxresiduals = maxresiduals)) }) kernlab/R/aobjects.R0000644000175100001440000010724112055335057014036 0ustar hornikusers## S4 object definitions and assigment/accessor functions for the slots. ## ## created 10.09.03 alexandros karatzoglou ## updated 23.08.05 setClass("kernel",representation("function",kpar="list")) setClass("kernelMatrix",representation("matrix"),prototype=structure(.Data=matrix())) setClassUnion("listI", c("list","numeric","vector","integer","matrix")) setClassUnion("output", c("matrix","factor","vector","logical","numeric","list","integer","NULL")) setClassUnion("input", c("matrix","list")) setClassUnion("kfunction", c("function","character")) setClassUnion("mpinput", c("matrix","data.frame","missing")) setClassUnion("lpinput", c("list","missing")) setClassUnion("kpinput", c("kernelMatrix","missing")) setClass("vm", representation(alpha = "listI", ## since setClassUnion is not working type = "character", kernelf = "kfunction", kpar = "list", xmatrix = "input", ymatrix = "output", fitted = "output", lev = "vector", nclass = "numeric", error = "vector", cross = "vector", n.action= "ANY", terms = "ANY", kcall = "call"), contains= "VIRTUAL") #Generic Vector Machine object if(!isGeneric("type")){ if (is.function("type")) fun <- type else fun <- function(object) standardGeneric("type") setGeneric("type", fun) } setMethod("type", "vm", function(object) object@type) setGeneric("type<-", function(x, value) standardGeneric("type<-")) setReplaceMethod("type", "vm", function(x, value) { x@type <- value x }) if(!isGeneric("kernelf")){ if (is.function("kernelf")) fun <- kernelf else fun <- function(object) standardGeneric("kernelf") setGeneric("kernelf", fun) } setMethod("kernelf", "vm", function(object) object@kernelf) setGeneric("kernelf<-", function(x, value) standardGeneric("kernelf<-")) setReplaceMethod("kernelf", "vm", function(x, value) { x@kernelf <- value x }) if(!isGeneric("kpar")){ if (is.function("kpar")) fun <- kpar else fun <- function(object) standardGeneric("kpar") setGeneric("kpar", fun) } setMethod("kpar", "vm", function(object) object@kpar) setGeneric("kpar<-", function(x, value) standardGeneric("kpar<-")) setReplaceMethod("kpar", "vm", function(x, value) { x@kpar <- value x }) if(!isGeneric("kcall")){ if (is.function("kcall")) fun <- kcall else fun <- function(object) standardGeneric("kcall") setGeneric("kcall", fun) } setMethod("kcall", "vm", function(object) object@kcall) setGeneric("kcall<-", function(x, value) standardGeneric("kcall<-")) setReplaceMethod("kcall", "vm", function(x, value) { x@kcall <- value x }) setMethod("terms", "vm", function(x, ...) x@terms) setGeneric("terms<-", function(x, value) standardGeneric("terms<-")) setReplaceMethod("terms", "vm", function(x, value) { x@terms <- value x }) if(!isGeneric("xmatrix")){ if (is.function("xmatrix")) fun <- xmatrix else fun <- function(object) standardGeneric("xmatrix") setGeneric("xmatrix", fun) } setMethod("xmatrix", "vm", function(object) object@xmatrix) setGeneric("xmatrix<-", function(x, value) standardGeneric("xmatrix<-")) setReplaceMethod("xmatrix", "vm", function(x, value) { x@xmatrix <- value x }) if(!isGeneric("ymatrix")){ if (is.function("ymatrix")) fun <- ymatrix else fun <- function(object) standardGeneric("ymatrix") setGeneric("ymatrix", fun) } setMethod("ymatrix", "vm", function(object) object@ymatrix) setGeneric("ymatrix<-", function(x, value) standardGeneric("ymatrix<-")) setReplaceMethod("ymatrix", "vm", function(x, value) { x@ymatrix <- value x }) setMethod("fitted", "vm", function(object, ...) object@fitted) setGeneric("fitted<-", function(x, value) standardGeneric("fitted<-")) setReplaceMethod("fitted", "vm", function(x, value) { x@fitted <- value x }) if(!isGeneric("lev")){ if (is.function("lev")) fun <- lev else fun <- function(object) standardGeneric("lev") setGeneric("lev", fun) } setMethod("lev", "vm", function(object) object@lev) setGeneric("lev<-", function(x, value) standardGeneric("lev<-")) setReplaceMethod("lev", "vm", function(x, value) { x@lev <- value x }) if(!isGeneric("nclass")){ if (is.function("nclass")) fun <- nclass else fun <- function(object) standardGeneric("nclass") setGeneric("nclass", fun) } setMethod("nclass", "vm", function(object) object@nclass) setGeneric("nclass<-", function(x, value) standardGeneric("nclass<-")) setReplaceMethod("nclass", "vm", function(x, value) { x@nclass <- value x }) if(!isGeneric("alpha")){ if (is.function("alpha")) fun <- alpha else fun <- function(object) standardGeneric("alpha") setGeneric("alpha", fun) } setMethod("alpha", "vm", function(object) object@alpha) setGeneric("alpha<-", function(x, value) standardGeneric("alpha<-")) setReplaceMethod("alpha", "vm", function(x, value) { x@alpha <- value x }) if(!isGeneric("error")){ if (is.function("error")) fun <- error else fun <- function(object) standardGeneric("error") setGeneric("error", fun) } setMethod("error", "vm", function(object) object@error) setGeneric("error<-", function(x, value) standardGeneric("error<-")) setReplaceMethod("error", "vm", function(x, value) { x@error <- value x }) if(!isGeneric("cross")){ if (is.function("cross")) fun <- cross else fun <- function(object) standardGeneric("cross") setGeneric("cross", fun) } setMethod("cross", "vm", function(object) object@cross) setGeneric("cross<-", function(x, value) standardGeneric("cross<-")) setReplaceMethod("cross", "vm", function(x, value) { x@cross <- value x }) if(!isGeneric("n.action")){ if (is.function("n.action")) fun <- n.action else fun <- function(object) standardGeneric("n.action") setGeneric("n.action", fun) } setMethod("n.action", "vm", function(object) object@n.action) setGeneric("n.action<-", function(x, value) standardGeneric("n.action<-")) setReplaceMethod("n.action", "vm", function(x, value) { x@n.action <- value x }) setClass("ksvm", representation(param = "list", scaling = "ANY", coef = "ANY", alphaindex = "ANY", b = "numeric", obj = "vector", SVindex = "vector", nSV = "numeric", prior = "list", prob.model = "list" ), contains="vm") if(!isGeneric("param")){ if (is.function("param")) fun <- param else fun <- function(object) standardGeneric("param") setGeneric("param", fun) } setMethod("param", "ksvm", function(object) object@param) setGeneric("param<-", function(x, value) standardGeneric("param<-")) setReplaceMethod("param", "ksvm", function(x, value) { x@param <- value x }) if(!isGeneric("scaling")){ if (is.function("scaling")) fun <- scaling else fun <- function(object) standardGeneric("scaling") setGeneric("scaling", fun) } setMethod("scaling", "ksvm", function(object) object@scaling) setGeneric("scaling<-", function(x, value) standardGeneric("scaling<-")) setReplaceMethod("scaling", "ksvm", function(x, value) { x@scaling<- value x }) if(!isGeneric("obj")){ if (is.function("obj")) fun <- obj else fun <- function(object) standardGeneric("obj") setGeneric("obj", fun) } setMethod("obj", "ksvm", function(object) object@obj) setGeneric("obj<-", function(x, value) standardGeneric("obj<-")) setReplaceMethod("obj", "ksvm", function(x, value) { x@obj<- value x }) setMethod("coef", "ksvm", function(object, ...) object@coef) setGeneric("coef<-", function(x, value) standardGeneric("coef<-")) setReplaceMethod("coef", "ksvm", function(x, value) { x@coef <- value x }) if(!isGeneric("alphaindex")){ if (is.function("alphaindex")) fun <- alphaindex else fun <- function(object) standardGeneric("alphaindex") setGeneric("alphaindex", fun) } setMethod("alphaindex", "ksvm", function(object) object@alphaindex) setGeneric("alphaindex<-", function(x, value) standardGeneric("alphaindex<-")) setReplaceMethod("alphaindex", "ksvm", function(x, value) { x@alphaindex <- value x }) if(!isGeneric("b")){ if (is.function("b")) fun <- b else fun <- function(object) standardGeneric("b") setGeneric("b", fun) } setMethod("b", "ksvm", function(object) object@b) setGeneric("b<-", function(x, value) standardGeneric("b<-")) setReplaceMethod("b", "ksvm", function(x, value) { x@b <- value x }) if(!isGeneric("SVindex")){ if (is.function("SVindex")) fun <- SVindex else fun <- function(object) standardGeneric("SVindex") setGeneric("SVindex", fun) } setMethod("SVindex", "ksvm", function(object) object@SVindex) setGeneric("SVindex<-", function(x, value) standardGeneric("SVindex<-")) setReplaceMethod("SVindex", "ksvm", function(x, value) { x@SVindex <- value x }) if(!isGeneric("nSV")){ if (is.function("nSV")) fun <- nSV else fun <- function(object) standardGeneric("nSV") setGeneric("nSV", fun) } setMethod("nSV", "ksvm", function(object) object@nSV) setGeneric("nSV<-", function(x, value) standardGeneric("nSV<-")) setReplaceMethod("nSV", "ksvm", function(x, value) { x@nSV <- value x }) if(!isGeneric("prior")){ if (is.function("prior")) fun <- prior else fun <- function(object) standardGeneric("prior") setGeneric("prior", fun) } setMethod("prior", "ksvm", function(object) object@prior) setGeneric("prior<-", function(x, value) standardGeneric("prior<-")) setReplaceMethod("prior", "ksvm", function(x, value) { x@prior <- value x }) if(!isGeneric("prob.model")){ if (is.function("prob.model")) fun <- prob.model else fun <- function(object) standardGeneric("prob.model") setGeneric("prob.model", fun) } setMethod("prob.model", "ksvm", function(object) object@prob.model) setGeneric("prob.model<-", function(x, value) standardGeneric("prob.model<-")) setReplaceMethod("prob.model", "ksvm", function(x, value) { x@prob.model <- value x }) setClass("lssvm", representation(param = "list", scaling = "ANY", coef = "ANY", alphaindex = "ANY", ## prob.model = "list", b = "numeric", nSV = "numeric" ), contains="vm") ##setMethod("prob.model", "lssvm", function(object) object@prob.model) ##setGeneric("prob.model<-", function(x, value) standardGeneric("prob.model<-")) ##setReplaceMethod("prob.model", "lssvm", function(x, value) { ## x@prob.model <- value ## x ##}) setMethod("param", "lssvm", function(object) object@param) setReplaceMethod("param", "lssvm", function(x, value) { x@param <- value x }) setMethod("scaling", "lssvm", function(object) object@scaling) setReplaceMethod("scaling", "lssvm", function(x, value) { x@scaling<- value x }) setMethod("coef", "lssvm", function(object, ...) object@coef) setReplaceMethod("coef", "lssvm", function(x, value) { x@coef <- value x }) setMethod("alphaindex", "lssvm", function(object) object@alphaindex) setReplaceMethod("alphaindex", "lssvm", function(x, value) { x@alphaindex <- value x }) setMethod("b", "lssvm", function(object) object@b) setReplaceMethod("b", "lssvm", function(x, value) { x@b <- value x }) setMethod("nSV", "lssvm", function(object) object@nSV) setReplaceMethod("nSV", "lssvm", function(x, value) { x@nSV <- value x }) setClass("kqr", representation(param = "list", scaling = "ANY", coef = "ANY", b = "numeric" ), contains="vm") setMethod("b", "kqr", function(object) object@b) setReplaceMethod("b", "kqr", function(x, value) { x@b <- value x }) setMethod("scaling", "kqr", function(object) object@scaling) setReplaceMethod("scaling", "kqr", function(x, value) { x@scaling <- value x }) setMethod("coef", "kqr", function(object) object@coef) setReplaceMethod("coef", "kqr", function(x, value) { x@coef <- value x }) setMethod("param", "kqr", function(object) object@param) setReplaceMethod("param", "kqr", function(x, value) { x@param <- value x }) ## failed attempt to get rid of all this above ## mkaccesfun <- function(cls) #{ # snames <- slotNames(cls) ## # # for(i in 1:length(snames)) # { resF <- paste("\"",snames[i],"\"",sep="") # if(!isGeneric(snames[i])) # eval(parse(file="",text=paste("setGeneric(",resF,",function(object)","standardGeneric(",resF,")",")",sep=" "))) # setGeneric(snames[i], function(object) standardGeneric(snames[i])) # # setMethod(snames[i], cls, function(object) eval(parse(file="",text=paste("object@",snames[i],sep="")))) # resG <- paste("\"",snames[i],"<-","\"",sep="") #eval(parse(file="",text=paste("setGeneric(",resG,",function(x, value)","standardGeneric(",resG,")",")",sep=" "))) # setReplaceMethod(snames[i], cls, function(x, value) { # eval(parse(file="",text=paste("x@",snames[i],"<-value",sep=""))) # x # }) # } #} setClass("prc", representation(pcv = "matrix", eig = "vector", kernelf = "kfunction", kpar = "list", xmatrix = "input", kcall = "ANY", terms = "ANY", n.action = "ANY"),contains="VIRTUAL") #accessor functions if(!isGeneric("pcv")){ if (is.function("pcv")) fun <- pcv else fun <- function(object) standardGeneric("pcv") setGeneric("pcv", fun) } setMethod("pcv", "prc", function(object) object@pcv) setGeneric("pcv<-", function(x, value) standardGeneric("pcv<-")) setReplaceMethod("pcv", "prc", function(x, value) { x@pcv <- value x }) if(!isGeneric("eig")){ if (is.function("eig")) fun <- eig else fun <- function(object) standardGeneric("eig") setGeneric("eig", fun) } setMethod("eig", "prc", function(object) object@eig) setGeneric("eig<-", function(x, value) standardGeneric("eig<-")) setReplaceMethod("eig", "prc", function(x, value) { x@eig <- value x }) setMethod("kernelf","prc", function(object) object@kernelf) setReplaceMethod("kernelf","prc", function(x, value){ x@kernelf <- value x }) setMethod("xmatrix","prc", function(object) object@xmatrix) setReplaceMethod("xmatrix","prc", function(x, value){ x@xmatrix <- value x }) setMethod("kcall","prc", function(object) object@kcall) setReplaceMethod("kcall","prc", function(x, value){ x@kcall <- value x }) setMethod("terms","prc", function(x, ...) x@terms) setReplaceMethod("terms","prc", function(x, value){ x@terms <- value x }) setMethod("n.action","prc", function(object) object@n.action) setReplaceMethod("n.action","prc", function(x, value){ x@n.action <- value x }) ##kernel principal components object setClass("kpca", representation(rotated = "matrix"),contains="prc") #accessor functions if(!isGeneric("rotated")){ if (is.function("rotated")) fun <- rotated else fun <- function(object) standardGeneric("rotated") setGeneric("rotated", fun) } setMethod("rotated", "kpca", function(object) object@rotated) setGeneric("rotated<-", function(x, value) standardGeneric("rotated<-")) setReplaceMethod("rotated", "kpca", function(x, value) { x@rotated <- value x }) ## kernel maximum mean discrepancy setClass("kmmd", representation(H0="logical", AsympH0 ="logical", kernelf = "kfunction", Asymbound="numeric", Radbound="numeric", xmatrix="input", mmdstats="vector")) if(!isGeneric("mmdstats")){ if (is.function("mmdstats")) fun <- mmdstats else fun <- function(object) standardGeneric("mmdstats") setGeneric("mmdstats", fun) } setMethod("mmdstats","kmmd", function(object) object@mmdstats) setGeneric("mmdstats<-", function(x, value) standardGeneric("mmdstats<-")) setReplaceMethod("mmdstats","kmmd", function(x, value){ x@mmdstats <- value x }) if(!isGeneric("Radbound")){ if (is.function("Radbound")) fun <- Radbound else fun <- function(object) standardGeneric("Radbound") setGeneric("Radbound", fun) } setMethod("Radbound","kmmd", function(object) object@Radbound) setGeneric("Radbound<-", function(x, value) standardGeneric("Radbound<-")) setReplaceMethod("Radbound","kmmd", function(x, value){ x@Radbound <- value x }) if(!isGeneric("Asymbound")){ if (is.function("Asymbound")) fun <- Asymbound else fun <- function(object) standardGeneric("Asymbound") setGeneric("Asymbound", fun) } setMethod("Asymbound","kmmd", function(object) object@Asymbound) setGeneric("Asymbound<-", function(x, value) standardGeneric("Asymbound<-")) setReplaceMethod("Asymbound","kmmd", function(x, value){ x@Asymbound <- value x }) if(!isGeneric("H0")){ if (is.function("H0")) fun <- H0 else fun <- function(object) standardGeneric("H0") setGeneric("H0", fun) } setMethod("H0","kmmd", function(object) object@H0) setGeneric("H0<-", function(x, value) standardGeneric("H0<-")) setReplaceMethod("H0","kmmd", function(x, value){ x@H0 <- value x }) if(!isGeneric("AsympH0")){ if (is.function("AsympH0")) fun <- AsympH0 else fun <- function(object) standardGeneric("AsympH0") setGeneric("AsympH0", fun) } setMethod("AsympH0","kmmd", function(object) object@AsympH0) setGeneric("AsympH0<-", function(x, value) standardGeneric("AsympH0<-")) setReplaceMethod("AsympH0","kmmd", function(x, value){ x@AsympH0 <- value x }) setMethod("kernelf","kmmd", function(object) object@kernelf) setReplaceMethod("kernelf","kmmd", function(x, value){ x@kernelf <- value x }) setClass("ipop", representation(primal = "vector", dual = "numeric", how = "character" )) if(!isGeneric("primal")){ if (is.function("primal")) fun <- primal else fun <- function(object) standardGeneric("primal") setGeneric("primal", fun) } setMethod("primal", "ipop", function(object) object@primal) setGeneric("primal<-", function(x, value) standardGeneric("primal<-")) setReplaceMethod("primal", "ipop", function(x, value) { x@primal <- value x }) if(!isGeneric("dual")){ if (is.function("dual")) fun <- dual else fun <- function(object) standardGeneric("dual") setGeneric("dual", fun) } setMethod("dual", "ipop", function(object) object@dual) setGeneric("dual<-", function(x, value) standardGeneric("dual<-")) setReplaceMethod("dual", "ipop", function(x, value) { x@dual <- value x }) if(!isGeneric("how")){ if (is.function("how")) fun <- how else fun <- function(object) standardGeneric("how") setGeneric("how", fun) } setMethod("how", "ipop", function(object) object@how) setGeneric("how<-", function(x, value) standardGeneric("how<-")) setReplaceMethod("how", "ipop", function(x, value) { x@how <- value x }) # Kernel Canonical Correlation Analysis setClass("kcca", representation(kcor = "vector", xcoef = "matrix", ycoef = "matrix" ##xvar = "matrix", ##yvar = "matrix" )) if(!isGeneric("kcor")){ if (is.function("kcor")) fun <- kcor else fun <- function(object) standardGeneric("kcor") setGeneric("kcor", fun) } setMethod("kcor", "kcca", function(object) object@kcor) setGeneric("kcor<-", function(x, value) standardGeneric("kcor<-")) setReplaceMethod("kcor", "kcca", function(x, value) { x@kcor <- value x }) if(!isGeneric("xcoef")){ if (is.function("xcoef")) fun <- xcoef else fun <- function(object) standardGeneric("xcoef") setGeneric("xcoef", fun) } setMethod("xcoef", "kcca", function(object) object@xcoef) setGeneric("xcoef<-", function(x, value) standardGeneric("xcoef<-")) setReplaceMethod("xcoef", "kcca", function(x, value) { x@xcoef <- value x }) if(!isGeneric("ycoef")){ if (is.function("ycoef")) fun <- ycoef else fun <- function(object) standardGeneric("ycoef") setGeneric("ycoef", fun) } setMethod("ycoef", "kcca", function(object) object@ycoef) setGeneric("ycoef<-", function(x, value) standardGeneric("ycoef<-")) setReplaceMethod("ycoef", "kcca", function(x, value) { x@ycoef <- value x }) ##if(!isGeneric("xvar")){ ## if (is.function("xvar")) ## fun <- xvar ## else fun <- function(object) standardGeneric("xvar") ## setGeneric("xvar", fun) ##} ##setMethod("xvar", "kcca", function(object) object@xvar) ##setGeneric("xvar<-", function(x, value) standardGeneric("xvar<-")) ##setReplaceMethod("xvar", "kcca", function(x, value) { ## x@xvar <- value ## x ##}) ##if(!isGeneric("yvar")){ ## if (is.function("yvar")) ## fun <- yvar ## else fun <- function(object) standardGeneric("yvar") ## setGeneric("yvar", fun) ##} ##setMethod("yvar", "kcca", function(object) object@yvar) ##setGeneric("yvar<-", function(x, value) standardGeneric("yvar<-")) ##setReplaceMethod("yvar", "kcca", function(x, value) { ## x@yvar <- value ## x ##}) ## Gaussian Processes object setClass("gausspr",representation(tol = "numeric", scaling = "ANY", sol = "matrix", alphaindex="list", nvar = "numeric" ),contains="vm") setMethod("alphaindex","gausspr", function(object) object@alphaindex) setReplaceMethod("alphaindex","gausspr", function(x, value){ x@alphaindex <- value x }) if(!isGeneric("sol")){ if (is.function("sol")) fun <- sol else fun <- function(object) standardGeneric("sol") setGeneric("sol", fun) } setMethod("sol","gausspr", function(object) object@sol) setGeneric("sol<-", function(x, value) standardGeneric("sol<-")) setReplaceMethod("sol","gausspr", function(x, value){ x@sol <- value x }) setMethod("scaling","gausspr", function(object) object@scaling) setReplaceMethod("scaling","gausspr", function(x, value){ x@scaling <- value x }) setMethod("coef", "gausspr", function(object, ...) object@alpha) # Relevance Vector Machine object setClass("rvm", representation(tol = "numeric", nvar = "numeric", mlike = "numeric", RVindex = "vector", coef = "ANY", nRV = "numeric"),contains ="vm") if(!isGeneric("tol")){ if (is.function("tol")) fun <- tol else fun <- function(object) standardGeneric("tol") setGeneric("tol", fun) } setMethod("tol", "rvm", function(object) object@tol) setGeneric("tol<-", function(x, value) standardGeneric("tol<-")) setReplaceMethod("tol", "rvm", function(x, value) { x@tol <- value x }) setMethod("coef", "rvm", function(object, ...) object@coef) setReplaceMethod("coef", "rvm", function(x, value) { x@coef <- value x }) if(!isGeneric("RVindex")){ if (is.function("RVindex")) fun <- RVindex else fun <- function(object) standardGeneric("RVindex") setGeneric("RVindex", fun) } setMethod("RVindex", "rvm", function(object) object@RVindex) setGeneric("RVindex<-", function(x, value) standardGeneric("RVindex<-")) setReplaceMethod("RVindex", "rvm", function(x, value) { x@RVindex <- value x }) if(!isGeneric("nvar")){ if (is.function("nvar")) fun <- nvar else fun <- function(object) standardGeneric("nvar") setGeneric("nvar", fun) } setMethod("nvar", "rvm", function(object) object@nvar) setGeneric("nvar<-", function(x, value) standardGeneric("nvar<-")) setReplaceMethod("nvar", "rvm", function(x, value) { x@nvar <- value x }) if(!isGeneric("nRV")){ if (is.function("nRV")) fun <- nRV else fun <- function(object) standardGeneric("nRV") setGeneric("nRV", fun) } setMethod("nRV", "rvm", function(object) object@nRV) setGeneric("nRV<-", function(x, value) standardGeneric("nRV<-")) setReplaceMethod("nRV", "rvm", function(x, value) { x@nRV <- value x }) setMethod("coef", "rvm", function(object, ...) object@alpha) if(!isGeneric("mlike")){ if (is.function("mlike")) fun <- mlike else fun <- function(object) standardGeneric("mlike") setGeneric("mlike", fun) } setMethod("mlike", "rvm", function(object) object@mlike) setGeneric("mlike<-", function(x, value) standardGeneric("mlike<-")) setReplaceMethod("mlike", "rvm", function(x, value) { x@mlike <- value x }) setClass("inchol",representation("matrix", pivots="vector", diagresidues="vector", maxresiduals="vector"), prototype=structure(.Data=matrix(), pivots=vector(), diagresidues=vector(), maxresiduals=vector())) if(!isGeneric("pivots")){ if (is.function("pivots")) fun <- pivots else fun <- function(object) standardGeneric("pivots") setGeneric("pivots", fun) } setMethod("pivots", "inchol", function(object) object@pivots) setGeneric("pivots<-", function(x, value) standardGeneric("pivots<-")) setReplaceMethod("pivots", "inchol", function(x, value) { x@pivots <- value x }) if(!isGeneric("diagresidues")){ if (is.function("diagresidues")) fun <- diagresidues else fun <- function(object) standardGeneric("diagresidues") setGeneric("diagresidues", fun) } setMethod("diagresidues", "inchol", function(object) object@diagresidues) setGeneric("diagresidues<-", function(x,value) standardGeneric("diagresidues<-")) setReplaceMethod("diagresidues", "inchol", function(x, value) { x@diagresidues <- value x }) if(!isGeneric("maxresiduals")){ if (is.function("maxresiduals")) fun <- maxresiduals else fun <- function(object) standardGeneric("maxresiduals") setGeneric("maxresiduals", fun) } setMethod("maxresiduals", "inchol", function(object) object@maxresiduals) setGeneric("maxresiduals<-", function(x,value) standardGeneric("maxresiduals<-")) setReplaceMethod("maxresiduals", "inchol", function(x, value) { x@maxresiduals <- value x }) ## csi object setClass("csi",representation(Q = "matrix", R = "matrix", truegain = "vector", predgain = "vector"),contains="inchol") if(!isGeneric("Q")){ if (is.function("Q")) fun <- Q else fun <- function(object) standardGeneric("Q") setGeneric("Q", fun) } setMethod("Q", "csi", function(object) object@Q) setGeneric("Q<-", function(x, value) standardGeneric("Q<-")) setReplaceMethod("Q", "csi", function(x, value) { x@Q <- value x }) if(!isGeneric("R")){ if (is.function("R")) fun <- R else fun <- function(object) standardGeneric("R") setGeneric("R", fun) } setMethod("R", "csi", function(object) object@R) setGeneric("R<-", function(x, value) standardGeneric("R<-")) setReplaceMethod("R", "csi", function(x, value) { x@R <- value x }) if(!isGeneric("truegain")){ if (is.function("truegain")) fun <- truegain else fun <- function(object) standardGeneric("truegain") setGeneric("truegain", fun) } setMethod("truegain", "csi", function(object) object@truegain) setGeneric("truegain<-", function(x, value) standardGeneric("truegain<-")) setReplaceMethod("truegain", "csi", function(x, value) { x@truegain <- value x }) if(!isGeneric("predgain")){ if (is.function("predgain")) fun <- predgain else fun <- function(object) standardGeneric("predgain") setGeneric("predgain", fun) } setMethod("predgain", "csi", function(object) object@predgain) setGeneric("predgain<-", function(x, value) standardGeneric("predgain<-")) setReplaceMethod("predgain", "csi", function(x, value) { x@predgain <- value x }) setClass("specc",representation("vector", centers="matrix", size="vector", kernelf="kfunction", withinss = "vector" ),prototype=structure(.Data=vector(), centers = matrix(), size=matrix(), kernelf = ls, withinss=vector())) if(!isGeneric("centers")){ if (is.function("centers")) fun <- centers else fun <- function(object) standardGeneric("centers") setGeneric("centers", fun) } setMethod("centers", "specc", function(object) object@centers) setGeneric("centers<-", function(x,value) standardGeneric("centers<-")) setReplaceMethod("centers", "specc", function(x, value) { x@centers <- value x }) if(!isGeneric("size")){ if (is.function("size")) fun <- size else fun <- function(object) standardGeneric("size") setGeneric("size", fun) } setMethod("size", "specc", function(object) object@size) setGeneric("size<-", function(x,value) standardGeneric("size<-")) setReplaceMethod("size", "specc", function(x, value) { x@size <- value x }) if(!isGeneric("withinss")){ if (is.function("withinss")) fun <- withinss else fun <- function(object) standardGeneric("withinss") setGeneric("withinss", fun) } setMethod("withinss", "specc", function(object) object@withinss) setGeneric("withinss<-", function(x,value) standardGeneric("withinss<-")) setReplaceMethod("withinss", "specc", function(x, value) { x@withinss <- value x }) setMethod("kernelf","specc", function(object) object@kernelf) setReplaceMethod("kernelf","specc", function(x, value){ x@kernelf <- value x }) setClass("ranking",representation("matrix", convergence="matrix", edgegraph="matrix"), prototype=structure(.Data=matrix(), convergence=matrix(), edgegraph=matrix())) if(!isGeneric("convergence")){ if (is.function("convergence")) fun <- convergence else fun <- function(object) standardGeneric("convergence") setGeneric("convergence", fun) } setMethod("convergence", "ranking", function(object) object@convergence) setGeneric("convergence<-", function(x,value) standardGeneric("convergence<-")) setReplaceMethod("convergence", "ranking", function(x, value) { x@convergence <- value x }) if(!isGeneric("edgegraph")){ if (is.function("edgegraph")) fun <- edgegraph else fun <- function(object) standardGeneric("edgegraph") setGeneric("edgegraph", fun) } setMethod("edgegraph", "ranking", function(object) object@edgegraph) setGeneric("edgegraph<-", function(x,value) standardGeneric("edgegraph<-")) setReplaceMethod("edgegraph", "ranking", function(x, value) { x@edgegraph <- value x }) ## online learning algorithms class setClass("onlearn", representation( kernelf = "kfunction", buffer = "numeric", kpar = "list", xmatrix = "matrix", fit = "numeric", onstart = "numeric", onstop = "numeric", alpha = "ANY", rho = "numeric", b = "numeric", pattern ="ANY", type="character" )) if(!isGeneric("fit")){ if (is.function("fit")) fun <- fit else fun <- function(object) standardGeneric("fit") setGeneric("fit", fun) } setMethod("fit","onlearn", function(object) object@fit) setGeneric("fit<-", function(x, value) standardGeneric("fit<-")) setReplaceMethod("fit","onlearn", function(x, value){ x@fit <- value x }) if(!isGeneric("onstart")){ if (is.function("onstart")) fun <- onstart else fun <- function(object) standardGeneric("onstart") setGeneric("onstart", fun) } setMethod("onstart", "onlearn", function(object) object@onstart) setGeneric("onstart<-", function(x, value) standardGeneric("onstart<-")) setReplaceMethod("onstart", "onlearn", function(x, value) { x@onstart <- value x }) if(!isGeneric("onstop")){ if (is.function("onstop")) fun <- onstop else fun <- function(object) standardGeneric("onstop") setGeneric("onstop", fun) } setMethod("onstop", "onlearn", function(object) object@onstop) setGeneric("onstop<-", function(x, value) standardGeneric("onstop<-")) setReplaceMethod("onstop", "onlearn", function(x, value) { x@onstop <- value x }) if(!isGeneric("buffer")){ if (is.function("buffer")) fun <- buffer else fun <- function(object) standardGeneric("buffer") setGeneric("buffer", fun) } setMethod("buffer", "onlearn", function(object) object@buffer) setGeneric("buffer<-", function(x, value) standardGeneric("buffer<-")) setReplaceMethod("buffer", "onlearn", function(x, value) { x@buffer <- value x }) setMethod("kernelf","onlearn", function(object) object@kernelf) setReplaceMethod("kernelf","onlearn", function(x, value){ x@kernelf <- value x }) setMethod("kpar","onlearn", function(object) object@kpar) setReplaceMethod("kpar","onlearn", function(x, value){ x@kpar <- value x }) setMethod("xmatrix","onlearn", function(object) object@xmatrix) setReplaceMethod("xmatrix","onlearn", function(x, value){ x@xmatrix <- value x }) setMethod("alpha","onlearn", function(object) object@alpha) setReplaceMethod("alpha","onlearn", function(x, value){ x@alpha <- value x }) setMethod("b","onlearn", function(object) object@b) setReplaceMethod("b","onlearn", function(x, value){ x@b <- value x }) setMethod("type","onlearn", function(object) object@type) setReplaceMethod("type","onlearn", function(x, value){ x@type <- value x }) if(!isGeneric("rho")){ if (is.function("rho")) fun <- rho else fun <- function(object) standardGeneric("rho") setGeneric("rho", fun) } setMethod("rho", "onlearn", function(object) object@rho) setGeneric("rho<-", function(x, value) standardGeneric("rho<-")) setReplaceMethod("rho", "onlearn", function(x, value) { x@rho <- value x }) if(!isGeneric("pattern")){ if (is.function("pattern")) fun <- pattern else fun <- function(object) standardGeneric("pattern") setGeneric("pattern", fun) } setMethod("pattern", "onlearn", function(object) object@pattern) setGeneric("pattern<-", function(x, value) standardGeneric("pattern<-")) setReplaceMethod("pattern", "onlearn", function(x, value) { x@pattern <- value x }) setClass("kfa",representation(alpha = "matrix", alphaindex = "vector", kernelf = "kfunction", xmatrix = "matrix", kcall = "call", terms = "ANY" )) setMethod("coef", "kfa", function(object, ...) object@alpha) setMethod("kernelf","kfa", function(object) object@kernelf) setReplaceMethod("kernelf","kfa", function(x, value){ x@kernelf <- value x }) setMethod("alphaindex","kfa", function(object) object@alphaindex) setReplaceMethod("alphaindex","kfa", function(x, value){ x@alphaindex <- value x }) setMethod("alpha","kfa", function(object) object@alpha) setReplaceMethod("alpha","kfa", function(x, value){ x@alpha <- value x }) setMethod("xmatrix","kfa", function(object) object@xmatrix) setReplaceMethod("xmatrix","kfa", function(x, value){ x@xmatrix <- value x }) setMethod("kcall","kfa", function(object) object@kcall) setReplaceMethod("kcall","kfa", function(x, value){ x@kcall <- value x }) setMethod("terms","kfa", function(x, ...) x@terms) setReplaceMethod("terms","kfa", function(x, value){ x@terms <- value x }) ## kernel hebbian algorithm object setClass("kha", representation(eskm ="vector"),contains="prc") ## accessor functions if(!isGeneric("eskm")){ if (is.function("eskm")) fun <- eskm else fun <- function(object) standardGeneric("eskm") setGeneric("eskm", fun) } setMethod("eskm", "kha", function(object) object@eskm) setGeneric("eskm<-", function(x, value) standardGeneric("eskm<-")) setReplaceMethod("eskm", "kha", function(x, value) { x@eskm <- value x }) kernlab/R/kmmd.R0000644000175100001440000002030012560371302013154 0ustar hornikusers## calculates the kernel maximum mean discrepancy for samples from two distributions ## author: alexandros karatzoglou setGeneric("kmmd",function(x,...) standardGeneric("kmmd")) setMethod("kmmd", signature(x = "matrix"), function(x, y, kernel="rbfdot",kpar="automatic", alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 150, frac = 1, ...) { x <- as.matrix(x) y <- as.matrix(y) res <- new("kmmd") if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","matrix")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(kmmd(x= as.kernelMatrix(x), y = y, Kxy = as.kernelMatrix(x)%*%y, alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 100, frac = 1, ...)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=sigest(rbind(x,y),scaled=FALSE)[2]) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") m <- dim(x)[1] n <- dim(y)[1] N <- max(m,n) M <- min(m,n) Kxx <- kernelMatrix(kernel,x) Kyy <- kernelMatrix(kernel,y) Kxy <- kernelMatrix(kernel,x,y) resmmd <- .submmd(Kxx, Kyy, Kxy, alpha) H0(res) <- (resmmd$mmd1 > resmmd$D1) Radbound(res) <- resmmd$D1 Asymbound(res) <- 0 mmdstats(res)[1] <- resmmd$mmd1 mmdstats(res)[2] <- resmmd$mmd3 if(asymptotic){ boundA <- .submmd3bound(Kxx, Kyy, Kxy, alpha, frac, ntimes, replace) AsympH0(res) <- (resmmd$mmd3 > boundA) Asymbound(res) <- boundA } kernelf(res) <- kernel return(res) }) setMethod("kmmd",signature(x="list"), function(x, y, kernel="stringdot",kpar=list(type="spectrum",length=4), alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 150, frac = 1, ...) { if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") Kxx <- kernelMatrix(kernel,x) Kyy <- kernelMatrix(kernel,y) Kxy <- kernelMatrix(kernel,x,y) ret <- kmmd(x=Kxx,y = Kyy,Kxy=Kxy, alpha=alpha, asymptotic= asymptotic, replace = replace, ntimes = ntimes, frac= frac) kernelf(ret) <- kernel return(ret) }) setMethod("kmmd",signature(x="kernelMatrix"), function (x, y, Kxy, alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 100, frac = 1, ...) { res <- new("kmmd") resmmd <- .submmd(x, y, Kxy, alpha) H0(res) <- (resmmd$mmd1 > resmmd$D1) Radbound(res) <- resmmd$D1 Asymbound(res) <- 0 mmdstats(res)[1] <- resmmd$mmd1 mmdstats(res)[2] <- resmmd$mmd3 if(asymptotic){ boundA <- .submmd3bound(x, y, Kxy, alpha, frac, ntimes, replace) AsympH0(res) <- (resmmd$mmd1 > boundA) Asymbound(res) <- boundA } kernelf(res) <- " Kernel matrix used as input." return(res) }) .submmd <- function(Kxx,Kyy, Kxy, alpha) { m <- dim(Kxx)[1] n <- dim(Kyy)[1] N <- max(m,n) M <- min(m,n) sumKxx <- sum(Kxx) if(m!=n) sumKxxM <- sum(Kxx[1:M,1:M]) else sumKxxM <- sumKxx dgxx <- diag(Kxx) sumKxxnd <- sumKxx - sum(dgxx) R <- max(dgxx) RM <- max(dgxx[1:M]) hu <- colSums(Kxx[1:M,1:M]) - dgxx[1:M] sumKyy <- sum(Kyy) if(m!=n) sumKyyM <- sum(Kyy[1:M,1:M]) else sumKyyM <- sumKyy dgyy <- diag(Kyy) sumKyynd <- sum(Kyy) - sum(dgyy) R <- max(R,dgyy) RM <- max(RM,dgyy[1:M]) # RM instead of R in original hu <- hu + colSums(Kyy[1:M,1:M]) - dgyy[1:M] sumKxy <- sum(Kxy) if (m!=n) sumKxyM <- sum(Kxy[1:M,1:M]) else sumKxyM <- sumKxy dg <- diag(Kxy) # up to M only hu <- hu - colSums(Kxy[1:M,1:M]) - colSums(t(Kxy[1:M,1:M])) + 2*dg # one sided sum mmd1 <- sqrt(max(0,sumKxx/(m*m) + sumKyy/(n*n) - 2/m/n* sumKxy)) mmd3 <- sum(hu)/M/(M-1) D1 <- 2*sqrt(RM/M)+sqrt(log(1/alpha)*4*RM/M) return(list(mmd1=mmd1,mmd3=mmd3,D1=D1)) } .submmd3bound <- function(Kxx,Kyy, Kxy, alpha, frac, ntimes, replace) { ## implements the bootstrapping approach to the MMD3 bound by shuffling ## the kernel matrix ## frac : fraction of data used for bootstrap ## ntimes : how many times MMD is to be evaluated m <- dim(Kxx)[1] n <- dim(Kyy)[1] M <- min(m,n) N <- max(m,n) poslabels <- 1:m neglabels <- (m+1):(m+n) ## bootstrap bootmmd3 <- rep(0,ntimes) for (i in 1:ntimes) { nsamples <- ceiling(frac*min(m,n)) xinds <- sample(1:m,nsamples,replace=replace) yinds <- sample(1:n,nsamples,replace=replace) newlab <- c(poslabels[xinds],neglabels[yinds]) samplenew <- sample(newlab, length(newlab), replace=FALSE) xinds <- samplenew[1:nsamples] yinds <- samplenew[(nsamples+1):length(samplenew)] newm <- length(xinds) newn <- length(yinds) newM <- min(newm,newn) ##get new kernel matrices (without concat to big matrix to save memory) xind1 <- xinds[xinds<=m] xind2 <- xinds[xinds>m]- m yind1 <- yinds[yinds<=m] yind2 <- yinds[yinds>m]-m ##Kxx (this should be implemented with kernelMult for memory efficiency) nKxx <- rbind(cbind(Kxx[xind1,xind1],Kxy[xind1,xind2]), cbind(t(Kxy[xind1,xind2]),Kyy[xind2,xind2])) dgxx <- diag(nKxx) hu <- colSums(nKxx[1:newM,1:newM]) - dgxx[1:newM] # one sided sum rm(nKxx) #Kyy nKyy <- rbind(cbind(Kxx[yind1,yind1],Kxy[yind1,yind2]), cbind(t(Kxy[yind1,yind2]), Kyy[yind2,yind2])) dgyy <- diag(nKyy) hu <- hu + colSums(nKyy[1:newM,1:newM]) - dgyy[1:newM] rm(nKyy) ## Kxy nKxy <- rbind(cbind(Kxx[yind1,xind1],Kxy[yind1,xind2]), cbind(t(Kxy[xind1,yind2]),Kyy[yind2,xind2])) dg <- diag(nKxy) hu <- hu - colSums(nKxy[1:newM,1:newM]) - colSums(t(nKxy[1:newM,1:newM])) + 2*dg rm(nKxy) ## now calculate mmd3 bootmmd3[i] <- sum(hu)/newM/(newM-1) } bootmmd3 <- sort(bootmmd3, decreasing=TRUE); aind <- floor(alpha*ntimes) ## better less than too much (-> floor); ## take threshold in between aind and the next smaller value: bound <- sum(bootmmd3[c(aind,aind+1)])/2; return(bound) } setMethod("show","kmmd", function(object){ cat("Kernel Maximum Mean Discrepancy object of class \"kmmd\"","\n","\n") show(kernelf(object)) if(is.logical(object@H0)){ cat("\n") cat("\n","H0 Hypothesis rejected : ", paste(H0(object))) cat("\n","Rademacher bound : ", paste(Radbound(object))) } cat("\n") if(Asymbound(object)!=0){ cat("\n","H0 Hypothesis rejected (based on Asymptotic bound): ", paste(AsympH0(object))) cat("\n","Asymptotic bound : ", paste(Asymbound(object))) } cat("\n","1st and 3rd order MMD Statistics : ", paste( mmdstats(object))) cat("\n") }) kernlab/R/lssvm.R0000644000175100001440000005506412676465003013421 0ustar hornikusers## reduced least squares support vector machines ## author : alexandros setGeneric("lssvm", function(x, ...) standardGeneric("lssvm")) setMethod("lssvm",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m$scaled <- NULL m[[1L]] <- quote(stats::model.frame) m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 ## no intercept x <- model.matrix(Terms, m) y <- model.extract(m, "response") if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- lssvm(x, y, scaled = scaled, ...) kcall(ret) <- cl attr(Terms,"intercept") <- 0 ## no intercept terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("lssvm",signature(x="vector"), function(x,...) { x <- t(t(x)) ret <- lssvm(x, ...) return(ret) }) setMethod("lssvm",signature(x="matrix"), function (x, y, scaled = TRUE, kernel = "rbfdot", kpar = "automatic", type = NULL, tau = 0.01, reduced = TRUE, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, ## prob.model = FALSE, cross = 0, fit = TRUE, ..., subset, na.action = na.omit) { ## subsetting and na-handling for matrices ret <- new("lssvm") if (!missing(subset)) x <- x[subset,] df <- unique(na.action(data.frame(y, x))) y <- df[,1] x <- as.matrix(df[,-1]) n.action(ret) <- na.action if(!is.null(type)) type(ret) <- match.arg(type,c("classification","regression")) if (is.null(type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- type ## scaling, subsetting, and NA handling x.scale <- y.scale <- NULL ## scaling if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")] } } ncols <- ncol(x) m <- nrows <- nrow(x) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","matrix")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(lssvm(as.kernelMatrix(x), y = y,type = NULL, tau = 0.01, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ...)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(type(ret)=="classification") { if (!is.vector(y) && !is.factor (y)) stop("y must be a vector or a factor.") if(is(y,"vector")) { y <- as.matrix(y) if (nrows != nrow(y)) stop("x and y don't match.") } if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (nrows != length(y)) stop("x and y don't match.") } else if (is.numeric(y)) { y <- as.integer(y) lev(ret) <- unique (y) } else stop ("dependent variable has to be of factor or integer type for classification mode.") ## initialize nclass(ret) <- length (unique(y)) p <- 0 svindex <- NULL ## create multidimensional y matrix yind <- t(matrix(1:nclass(ret),nclass(ret),m)) ymat <- matrix(0, m, nclass(ret)) ymat[yind==y] <- 1 if(reduced == FALSE) { K <- kernelMatrix(kernel,x) KP <- K - (1/m)*colSums(K) beta <- solve((KP%*%K + m * tau * K), KP%*%ymat) b <- colMeans(ymat) - colMeans(K%*%beta) alphaindex(ret) <- 1:m } else { G <- csi(x, ymat, rank = rank ,kernel= kernel, delta = delta , tol = tol) rep <- sort(pivots(G),index.return=TRUE)$ix G <- G[rep,] GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat[rep,,drop=FALSE] beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alphaindex(ret) <- rep[1:dim(G)[2]] } alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau ## calculate class prob. ## if (prob.model& reduced== TRUE) # warning("Class Probapilities not supported for reduced model.) ## if(prob.model & reduced == FALSE) ## { ## pos <- as.vector(ymat)==1 ## neg <- as.vector(ymat)==-1 ## ones <- rep(1,dim(x)[1]) ## onesneg <- ones[pos] <- 0 ## ones <- rep(1,dim(x)[1]) ## onespos <- ones[neg] <- 0 ##Kpos <- kernelMult(kernel,x,x[pos,],rep(1,sum(pos))) ##Kneg <- kernelMult(kernel,x,x[neg,],rep(1,sum(neg))) ## Kpos <- K[,pos]%*%rep(1,sum(pos)) ## Kneg <- K[,neg]%*%rep(1,sum(neg)) ## classmeans <- c(sum( Kpos * coef(ret)[pos] * as.vector(ymat)[pos]),sum( Kneg * coef(ret)[pos] * as.vector(ymat)[pos])) ## kneg <- K%*%onesneg ## kpos <- K%*%onespos ## M <- (diag(dim(x)[1])- (1/dim(x)[1])*rep(1,dim(x)[1])%*%t(rep(1,dim(x)[1]))) ## kcentered <- M%*%solve(diag(dim(x)[1]) - tau*M%*%K%*%M)%*%M ## prob.model(ret) <- list(Kpos=Kpos, Kneg=Kneg, kcentered=kcentered, classmeans=classmeans) ## } } if(type(ret)=="regression") { if (nrows != nrow(x)) stop("x and y don't match.") ## initialize p <- 0 svindex <- NULL ymat <- y G <- csi(x, ymat, rank = rank ,kernel= kernel, delta = delta , tol = tol) GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict alphaindex(ret) <- pivots(G)[1:dim(G)[2]] ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } kcall(ret) <- match.call() kernelf(ret) <- kernel ## param(ret) <- list(C=C, nu = nu, epsilon = epsilon) xmatrix(ret) <- x[alphaindex(ret),,drop = FALSE] ymatrix(ret) <- y nSV(ret) <- length(svindex) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, x) else NA scaling(ret) <- list(scaled = scaled, x.scale = x.scale) if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- lssvm(x[cind,],y[cind],type = type(ret),kernel=kernel,kpar = NULL,reduced = reduced, tau=tau, tol=tol, rank = floor(rank/cross), delta = floor(delta/cross), scaled=FALSE, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } cross(ret) <- cerror } return(ret) }) ## kernelMatrix interface setMethod("lssvm",signature(x="kernelMatrix"), function (x, y, type = NULL, tau = 0.01, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ...) { ## subsetting and na-handling for matrices ret <- new("lssvm") if(!is.null(type)) type(ret) <- match.arg(type,c("classification","regression")) if (is.null(type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- type ncols <- ncol(x) m <- nrows <- nrow(x) if(type(ret)=="classification") { if (!is.vector(y) && !is.factor (y)) stop("y must be a vector or a factor.") if (is(y,"vector")) { y <- as.matrix(y) if (nrows != nrow(y)) stop("x and y don't match.")} if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (nrows != length(y)) stop("x and y don't match.") } else if (is.numeric(y)) { y <- as.integer(y) lev(ret) <- unique (y) } else stop ("dependent variable has to be of factor or integer type for classification mode.") ## initialize nclass(ret) <- length (unique(y)) p <- 0 svindex <- NULL ## create multidimensional y matrix yind <- t(matrix(1:nclass(ret),nclass(ret),m)) ymat <- matrix(0, m, nclass(ret)) ymat[yind==y] <- 1 KP <- x - (1/m)*colSums(x) beta <- solve((KP%*%x + m * tau * x), KP%*%ymat) b <- colMeans(ymat) - colMeans(x%*%beta) alphaindex(ret) <- 1:m alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } if(type(ret)=="regression") { if (nrows != nrow(x)) stop("x and y don't match.") ## initialize p <- 0 svindex <- NULL ymat <- y G <- csi(x, ymat, rank = rank , delta = delta , tol = tol) GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat[pivots(G),,drop=FALSE] beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict alphaindex(ret) <- pivots(G)[1:dim(G)[2]] ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } kcall(ret) <- match.call() ## param(ret) <- list(C=C, nu = nu, epsilon = epsilon) xmatrix(ret) <- x ymatrix(ret) <- y kernelf(ret) <- "Kernel matrix used for training." nSV(ret) <- length(svindex) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, x) else NA if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- lssvm(x[cind,cind],y[cind],type = type(ret), tau=tau, rank = floor(rank/cross), delta = floor(delta/cross), cross = 0, fit = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind,drop = FALSE][,svindex,drop=FALSE])) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } cross(ret) <- cerror } return(ret) }) ## list interface setMethod("lssvm",signature(x="list"), function (x, y, scaled = TRUE, kernel = "stringdot", kpar = list(length=4, lambda = 0.5), type = NULL, tau = 0.01, reduced = TRUE, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ..., subset) { ## subsetting and na-handling for matrices ret <- new("lssvm") if (!missing(subset)) x <- x[subset] if(!is.null(type)) type(ret) <- match.arg(type,c("classification","regression")) if (is.null(type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- type m <- nrows <- length(x) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","stringdot")) if(is.character(kpar)) if(kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot" || kernel == "rbfdot" || kernel == "laplacedot" ) { stop("List interface supports only the stringdot kernel.") } } if(is(kernel,"kernel")) if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(type(ret)=="classification") { if (!is.vector(y) && !is.factor (y)) stop("y must be a vector or a factor.") if (nrows != nrow(x)) stop("x and y don't match.") if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) } else if (is.numeric(y)) { y <- as.integer(y) lev(ret) <- unique (y) } else stop ("dependent variable has to be of factor or integer type for classification mode.") ## initialize nclass(ret) <- length (unique(y)) p <- 0 svindex <- NULL ## create multidimensional y matrix yind <- t(matrix(1:nclass(ret),nclass(ret),m)) ymat <- matrix(0, m, nclass(ret)) ymat[yind==y] <- 1 if(reduced == FALSE) { K <- kernelMatrix(kernel,x) KP <- K - (1/m)*colSums(K) beta <- solve((KP%*%K + m * tau * K), KP%*%ymat) b <- colMeans(ymat) - colMeans(K%*%beta) alphaindex(ret) <- 1:m } else { G <- csi(x, ymat, rank = rank ,kernel= kernel, delta = delta , tol = tol) GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat[pivots(G),,drop=FALSE] beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alphaindex(ret) <- pivots(G)[1:dim(G)[2]] } alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } if(type(ret)=="regression") { if (nrows != nrow(x)) stop("x and y don't match.") ## initialize p <- 0 svindex <- NULL ymat <- y G <- csi(x, ymat, rank = rank ,kernel= kernel, delta = delta , tol = tol) GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat[pivots(G),,drop=FALSE] beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict alphaindex(ret) <- pivots(G)[1:dim(G)[2]] ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } kcall(ret) <- match.call() kernelf(ret) <- kernel ## param(ret) <- list(C=C, nu = nu, epsilon = epsilon) xmatrix(ret) <- x[alphaindex(ret)] ymatrix(ret) <- y SVindex(ret) <- svindex nSV(ret) <- length(svindex) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, x) else NA if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- lssvm(x[cind,],y[cind],type = type(ret),kernel=kernel,kpar = NULL,reduced = reduced, tau=tau, tol=tol, rank = floor(rank/cross), delta = floor(delta/cross), scaled=FALSE, cross = 0, fit = FALSE ) cres <- predict(cret, x[vgr[[i]],]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } cross(ret) <- cerror } return(ret) }) #**************************************************************# setMethod("predict", signature(object = "lssvm"), function (object, newdata, type = "response", coupler = "minpair") { sc <- 0 type <- match.arg(type,c("response","probabilities","decision")) if (missing(newdata) && type!="response") return(fitted(object)) else if(missing(newdata)) { newdata <- xmatrix(object) sc <- 1 } ncols <- ncol(xmatrix(object)) nrows <- nrow(xmatrix(object)) oldco <- ncols if (!is.null(terms(object))) { if(!is.matrix(newdata)) newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = n.action(object)) } else newdata <- if (is.vector(newdata)) t(t(newdata)) else as.matrix(newdata) newcols <- 0 newnrows <- nrow(newdata) newncols <- ncol(newdata) newco <- newncols if (oldco != newco) stop ("test vector does not match model !") p<-0 if (!is.null(scaling(object)$x.scale) && sc != 1) newdata[,scaling(object)$scaled] <- scale(newdata[,scaling(object)$scaled, drop = FALSE], center = scaling(object)$x.scale$"scaled:center", scale = scaling(object)$x.scale$"scaled:scale" ) if(is(newdata,"kernelMatrix")) res <- newdata %*% coef(object) - b(object) else res <- t(t(kernelMult(kernelf(object), newdata,xmatrix(object), alpha(object))) + b(object)) if(type == "response" && type(object)=="classification"){ predres <- max.col(res) return(factor (lev(object)[predres], levels = lev(object))) } if (type == "decision" || type(object)=="regression") return(res) if (type =="probabilities" && type(object)=="classification") { res - prob.model(object)$classmeans return(res) } }) #****************************************************************************************# setMethod("show","lssvm", function(object){ cat("Least Squares Support Vector Machine object of class \"lssvm\"","\n") cat("\n") cat(paste("problem type :",type(object), "\n")) cat(paste(" parameter : tau =",param(object)$tau, "\n")) cat("\n") show(kernelf(object)) cat(paste("\nNumber of data points used for training :", nSV(object),"\n")) if(!is.null(fitted(object))) cat(paste("Training error :", round(error(object),6),"\n")) if(cross(object)!= -1) cat("Cross validation error :",round(cross(object),6),"\n") }) ##.partopro <- function(z,s,m){ ##return(2*pi*(1/sqrt((1/z)+s^2))*exp(-(m^2)/(2*((1/z)+s^2)))) ##} kernlab/R/kcca.R0000644000175100001440000000451012105726255013140 0ustar hornikusers## Simple kernel canonical corelation analysis ## author: alexandros karatzoglou setGeneric("kcca",function(x, y, kernel="rbfdot", kpar=list(sigma = 0.1), gamma=0.1, ncomps = 10, ...) standardGeneric("kcca")) setMethod("kcca", signature(x = "matrix"), function(x,y,kernel="rbfdot",kpar=list(sigma=0.1), gamma=0.1, ncomps =10, ...) { x <- as.matrix(x) y <- as.matrix(y) if(!(nrow(x)==nrow(y))) stop("Number of rows in x, y matrixes is not equal") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") Kx <- kernelMatrix(kernel,x) Ky <- kernelMatrix(kernel,y) n <- dim(Kx)[1] m <- 2 ## Generate LH VK <- matrix(0,n*2,n); VK[0:n,] <- Kx VK[(n+1):(2*n),] <- Ky LH <- tcrossprod(VK, VK) for (i in 1:m) LH[((i-1)*n+1):(i*n),((i-1)*n+1):(i*n)] <- 0 ## Generate RH RH <- matrix(0,n*m,n*m) RH[1:n,1:n] <- (Kx + diag(rep(gamma,n)))%*%Kx + diag(rep(1e-6,n)) RH[(n+1):(2*n),(n+1):(2*n)] <- (Ky + diag(rep(gamma,n)))%*%Ky + diag(rep(1e-6,n)) RH <- (RH+t(RH))/2 ei <- .gevd(LH,RH) ret <- new("kcca") kcor(ret) <- as.double(ei$gvalues[1:ncomps]) xcoef(ret) <- matrix(as.double(ei$gvectors[1:n,1:ncomps]),n) ycoef(ret) <- matrix(as.double(ei$gvectors[(n+1):(2*n),1:ncomps]),n) ## xvar(ret) <- rotated(xpca) %*% cca$xcoef ## yvar(ret) <- rotated(ypca) %*% cca$ycoef return(ret) }) ## gevd compute the generalized eigenvalue ## decomposition for (a,b) .gevd<-function(a,b=diag(nrow(a))) { bs<-.mfunc(b,function(x) .ginvx(sqrt(x))) ev<-eigen(bs%*%a%*%bs) return(list(gvalues=ev$values,gvectors=bs%*%ev$vectors)) } ## mfunc is a helper to compute matrix functions .mfunc<-function(a,fn=sqrt) { e<-eigen(a); y<-e$vectors; v<-e$values return(tcrossprod(y%*%diag(fn(v)),y)) } ## ginvx is a helper to compute reciprocals .ginvx<-function(x) {ifelse(x==0,0,1/x)} kernlab/R/rvm.R0000644000175100001440000004145712676465015013065 0ustar hornikusers## relevance vector machine ## author : alexandros setGeneric("rvm", function(x, ...) standardGeneric("rvm")) setMethod("rvm",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m[[1L]] <- quote(stats::model.frame) m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 x <- model.matrix(Terms, m) y <- model.extract(m, "response") ret <- rvm(x, y, ...) kcall(ret) <- cl terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("rvm",signature(x="vector"), function(x,...) { x <- t(t(x)) ret <- rvm(x, ...) ret }) setMethod("rvm",signature(x="list"), function (x, y, type = "regression", kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), alpha = 5, var = 0.1, # variance var.fix = FALSE, # fixed variance? iterations = 100, # no. of iterations verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ... ,subset ,na.action = na.omit) { if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") K <- kernelMatrix(kernel,x) ret <- rvm(x=K, y=y, kernel=kernel, alpha = alpha, var= var, var.fix = var.fix, iterations = iterations, verbosity = verbosity, tol = tol, minmaxdiff=minmaxdiff,cross=cross,fit=fit, na.action=na.action) kernelf(ret) <- kernel xmatrix(ret) <- x return(ret) }) setMethod("rvm",signature(x="matrix"), function (x, y, type = "regression", kernel = "rbfdot", kpar = "automatic", alpha = ncol(as.matrix(x)), var = 0.1, # variance var.fix = FALSE, # fixed variance? iterations = 100, # no. of iterations verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ... ,subset ,na.action = na.omit) { ## subsetting and na-handling for matrices ret <- new("rvm") if (!missing(subset)) x <- x[subset,] if (is.null(y)) x <- na.action(x) else { df <- na.action(data.frame(y, x)) y <- df[,1] x <- as.matrix(df[,-1]) } ncols <- ncol(x) m <- nrows <- nrow(x) if (is.null (type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- "regression" # in case of classification: transform factors into integers if (is.factor(y)) { stop("classification not supported with rvm, you can use ksvm(), lssvm() or gausspr()") } else { if (type(ret) == "classification" && any(as.integer (y) != y)) stop ("classification not supported with rvm, you can use ksvm(), lssvm() or gausspr()") if(type(ret) == "classification") lev(ret) <- unique (y) } # initialize nclass(ret) <- length (lev(ret)) if(!is.null(type)) type(ret) <- match.arg(type,c("classification", "regression")) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","matrix")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(rvm(as.kernelMatrix(x), y = y,type = type, alpha = alpha, var = var, # variance var.fix = var.fix, # fixed variance? iterations = iterations, # no. of iterations verbosity = verbosity, tol = tol, minmaxdiff = minmaxdiff, cross = cross, fit = fit ,subset ,na.action = na.omit, ...)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(length(alpha) == m) thetavec <- 1/alpha else if (length(alpha) == 1) thetavec <- rep(1/alpha, m) else stop("length of initial alpha vector is wrong (has to be one or equal with number of train data") wvec <- rep(1, m) piter <- iterations*0.4 if (type(ret) == "regression") { K <- kernelMatrix(kernel, x) diag(K) <- diag(K)+ 10e-7 Kml <- crossprod(K, y) for (i in 1:iterations) { nzindex <- thetavec > tol thetavec [!nzindex] <- wvec [!nzindex] <- 0 Kr <- K [ ,nzindex, drop = FALSE] thetatmp <- thetavec[nzindex] n <- sum (nzindex) Rinv <- backsolve(chol(crossprod(Kr)/var + diag(1/thetatmp)),diag(1,n)) ## compute the new wvec coefficients wvec [nzindex] <- (Rinv %*% (crossprod(Rinv, Kml [nzindex])))/var diagSigma <- rowSums(Rinv^2) ## error err <- sum ((y - Kr %*% wvec [nzindex])^2) if(var < 2e-9) { warning("Model might be overfitted") break } ## log some information if (verbosity > 0) { log.det.Sigma.inv <- - 2 * sum (log (diag (Rinv))) ## compute the marginal likelihood to monitor convergence mlike <- -1/2 * (log.det.Sigma.inv + sum (log (thetatmp)) + m * log (var) + 1/var * err + (wvec [nzindex]^2) %*% (1/thetatmp)) cat ("Marg. Likelihood =", formatC (mlike), "\tnRV=", n, "\tvar=", var, "\n") } ## compute zeta zeta <- 1 - diagSigma / thetatmp ## compute logtheta for convergence checking logtheta <- - log(thetavec[nzindex]) ## update thetavec if(i < piter){ thetavec [nzindex] <- wvec [nzindex]^2 / zeta thetavec [thetavec <= 0] <- 0 } else{ thetavec [nzindex] <- (wvec [nzindex]^2/zeta - diagSigma)/zeta thetavec [thetavec <= 0] <- 0 } ## Stop if largest alpha change is too small maxdiff <- max(abs(logtheta[thetavec[which(nzindex)]!=0] + log(thetavec[thetavec!=0]))) if(maxdiff < minmaxdiff) break; ## update variance if (!var.fix) { var <- err / (m - sum (zeta)) } } if(verbosity == 0) mlike(ret) <- drop(-1/2 * (-2*sum(log(diag(Rinv))) + sum (log (thetatmp)) + m * log (var) + 1/var * err + (wvec [nzindex]^2) %*% (1/thetatmp))) nvar(ret) <- var error(ret) <- sqrt(err/m) if(fit) fitted(ret) <- Kr %*% wvec [nzindex] } if(type(ret)=="classification") { stop("classification with the relevance vector machine not implemented yet") } kcall(ret) <- match.call() kernelf(ret) <- kernel alpha(ret) <- wvec[nzindex] tol(ret) <- tol xmatrix(ret) <- x ymatrix(ret) <- y RVindex(ret) <- which(nzindex) nRV(ret) <- length(RVindex(ret)) if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross!=0) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="classification") { cret <- rvm(x[cind,],factor (lev(ret)[y[cind]], levels = lev(ret)),type=type(ret),kernel=kernel,alpha = alpha,var = var, var.fix=var.fix, tol=tol, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="regression") { cret <- rvm(x[cind,],y[cind],type=type(ret),kernel=kernel,tol=tol,alpha = alpha, var = var, var.fix=var.fix, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } return(ret) }) setMethod("rvm",signature(x="kernelMatrix"), function (x, y, type = "regression", alpha = ncol(as.matrix(x)), var = 0.1, # variance var.fix = FALSE, # fixed variance? iterations = 100, # no. of iterations verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ... ,subset ) { ## subsetting and na-handling for matrices ret <- new("rvm") if (!missing(subset)) x <- as.kernelMatrix(x[subset,subset]) if (is.null(y)) stop("response y missing") ncols <- ncol(x) m <- nrows <- nrow(x) if (is.null (type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- "regression" # in case of classification: transform factors into integers if (is.factor(y)) { stop("Claasification is not implemented, you can use ksvm(), gausspr() or lssvm()") } else { if (type(ret) == "classification" && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if(type(ret) == "classification") lev(ret) <- unique (y) } # initialize nclass(ret) <- length (lev(ret)) if(!is.null(type)) type(ret) <- match.arg(type,c("classification", "regression")) if(length(alpha) == m) thetavec <- 1/alpha else if (length(alpha) == 1) thetavec <- rep(1/alpha, m) else stop("length of initial alpha vector is wrong (has to be one or equal with number of train data") wvec <- rep(1, m) piter <- iterations*0.4 if (type(ret) == "regression") { Kml <- crossprod(x, y) for (i in 1:iterations) { nzindex <- thetavec > tol thetavec [!nzindex] <- wvec [!nzindex] <- 0 Kr <- x [ ,nzindex, drop = FALSE] thetatmp <- thetavec[nzindex] n <- sum (nzindex) Rinv <- backsolve(chol(crossprod(Kr)/var + diag(1/thetatmp)),diag(1,n)) ## compute the new wvec coefficients wvec [nzindex] <- (Rinv %*% (crossprod(Rinv, Kml [nzindex])))/var diagSigma <- rowSums(Rinv^2) ## error err <- sum ((y - Kr %*% wvec [nzindex])^2) if(var < 2e-9) { warning("Model might be overfitted") break } ## log some information if (verbosity > 0) { log.det.Sigma.inv <- - 2 * sum (log (diag (Rinv))) ## compute the marginal likelihood to monitor convergence mlike <- -1/2 * (log.det.Sigma.inv + sum (log (thetatmp)) + m * log (var) + 1/var * err + (wvec [nzindex]^2) %*% (1/thetatmp)) cat ("Marg. Likelihood =", formatC (mlike), "\tnRV=", n, "\tvar=", var, "\n") } ## compute zeta zeta <- 1 - diagSigma / thetatmp ## compute logtheta for convergence checking logtheta <- - log(thetavec[nzindex]) ## update thetavec if(i < piter){ thetavec [nzindex] <- wvec [nzindex]^2 / zeta thetavec [thetavec <= 0] <- 0 } else{ thetavec [nzindex] <- (wvec [nzindex]^2/zeta - diagSigma)/zeta thetavec [thetavec <= 0] <- 0 } ## Stop if largest alpha change is too small maxdiff <- max(abs(logtheta[thetavec[which(nzindex)]!=0] + log(thetavec[thetavec!=0]))) if(maxdiff < minmaxdiff) break; ## update variance if (!var.fix) { var <- err / (m - sum (zeta)) } } if(verbosity == 0) mlike(ret) <- drop(-1/2 * (-2*sum(log(diag(Rinv))) + sum (log (thetatmp)) + m * log (var) + 1/var * err + (wvec [nzindex]^2) %*% (1/thetatmp))) nvar(ret) <- var error(ret) <- sqrt(err/m) if(fit) fitted(ret) <- Kr %*% wvec [nzindex] } if(type(ret)=="classification") { stop("classification with the relevance vector machine not implemented yet") } kcall(ret) <- match.call() kernelf(ret) <- " Kernel Matrix used. \n" coef(ret) <- alpha(ret) <- wvec[nzindex] tol(ret) <- tol xmatrix(ret) <- x ymatrix(ret) <- y RVindex(ret) <- which(nzindex) nRV(ret) <- length(RVindex(ret)) if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross!=0) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="classification") { cret <- rvm(as.kernelMatrix(x[cind,cind]),factor (lev(ret)[y[cind]], levels = lev(ret)),type=type(ret),alpha = alpha,var = var, var.fix=var.fix, tol=tol, cross = 0, fit = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind][,RVindex(cret),drop=FALSE])) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="regression") { cret <- rvm(as.kernelMatrix(x[cind,cind]),y[cind],type=type(ret),tol=tol,alpha = alpha, var = var, var.fix=var.fix, cross = 0, fit = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind][,RVindex(cret),drop=FALSE])) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m)/cross + cerror } } cross(ret) <- cerror } return(ret) }) setMethod("predict", signature(object = "rvm"), function (object, newdata, ...) { if (missing(newdata)) return(fitted(object)) if(!is(newdata,"kernelMatrix") && !is(newdata,"list")){ ncols <- ncol(xmatrix(object)) nrows <- nrow(xmatrix(object)) oldco <- ncols if (!is.null(terms(object))) { newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = na.action) } else newdata <- if (is.vector (newdata)) t(t(newdata)) else as.matrix(newdata) newcols <- 0 newnrows <- nrow(newdata) newncols <- ncol(newdata) newco <- newncols if (oldco != newco) stop ("test vector does not match model !") p<-0 } if(type(object) == "regression") { if(is(newdata,"kernelMatrix")) ret <- newdata %*% coef(object) - b(object) if(is(newdata,"list")) ret <- kernelMult(kernelf(object),newdata,xmatrix(object)[RVindex(object)],alpha(object)) else ret <- kernelMult(kernelf(object),newdata,as.matrix(xmatrix(object)[RVindex(object),,drop=FALSE]),alpha(object)) } ret }) setMethod("show","rvm", function(object){ cat("Relevance Vector Machine object of class \"rvm\"","\n") cat("Problem type: regression","\n","\n") show(kernelf(object)) cat(paste("\nNumber of Relevance Vectors :", nRV(object),"\n")) cat("Variance : ",round(nvar(object),9)) cat("\n") if(!is.null(fitted(object))) cat(paste("Training error :", round(error(object),9),"\n")) if(cross(object)!= -1) cat("Cross validation error :",round(cross(object),9),"\n") ##train error & loss }) kernlab/R/specc.R0000644000175100001440000002543412676465043013354 0ustar hornikusers## Spectral clustering ## author : alexandros setGeneric("specc",function(x, ...) standardGeneric("specc")) setMethod("specc", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) na.act <- attr(mf, "na.action") x <- model.matrix(mt, mf) res <- specc(x, ...) cl[[1]] <- as.name("specc") if(!is.null(na.act)) n.action(res) <- na.action return(res) }) setMethod("specc",signature(x="matrix"),function(x, centers, kernel = "rbfdot", kpar = "automatic", nystrom.red = FALSE, nystrom.sample = dim(x)[1]/6, iterations = 200, mod.sample = 0.75, na.action = na.omit, ...) { x <- na.action(x) rown <- rownames(x) x <- as.matrix(x) m <- nrow(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(is.character(kpar)) { kpar <- match.arg(kpar,c("automatic","local")) if(kpar == "automatic") { if (nystrom.red == TRUE) sam <- sample(1:m, floor(mod.sample*nystrom.sample)) else sam <- sample(1:m, floor(mod.sample*m)) sx <- unique(x[sam,]) ns <- dim(sx)[1] dota <- rowSums(sx*sx)/2 ktmp <- crossprod(t(sx)) for (i in 1:ns) ktmp[i,]<- 2*(-ktmp[i,] + dota + rep(dota[i], ns)) ## fix numerical prob. ktmp[ktmp<0] <- 0 ktmp <- sqrt(ktmp) kmax <- max(ktmp) kmin <- min(ktmp + diag(rep(Inf,dim(ktmp)[1]))) kmea <- mean(ktmp) lsmin <- log2(kmin) lsmax <- log2(kmax) midmax <- min(c(2*kmea, kmax)) midmin <- max(c(kmea/2,kmin)) rtmp <- c(seq(midmin,0.9*kmea,0.05*kmea), seq(kmea,midmax,0.08*kmea)) if ((lsmax - (Re(log2(midmax))+0.5)) < 0.5) step <- (lsmax - (Re(log2(midmax))+0.5)) else step <- 0.5 if (((Re(log2(midmin))-0.5)-lsmin) < 0.5 ) stepm <- ((Re(log2(midmin))-0.5) - lsmin) else stepm <- 0.5 tmpsig <- c(2^(seq(lsmin,(Re(log2(midmin))-0.5), stepm)), rtmp, 2^(seq(Re(log2(midmax))+0.5, lsmax,step))) diss <- matrix(rep(Inf,length(tmpsig)*nc),ncol=nc) for (i in 1:length(tmpsig)){ ka <- exp((-(ktmp^2))/(2*(tmpsig[i]^2))) diag(ka) <- 0 d <- 1/sqrt(rowSums(ka)) if(!any(d==Inf) && !any(is.na(d))&& (max(d)[1]-min(d)[1] < 10^4)) { l <- d * ka %*% diag(d) xi <- eigen(l,symmetric=TRUE)$vectors[,1:nc] yi <- xi/sqrt(rowSums(xi^2)) res <- kmeans(yi, centers, iterations) diss[i,] <- res$withinss } } ms <- which.min(rowSums(diss)) kernel <- rbfdot((tmpsig[ms]^(-2))/2) ## Compute Affinity Matrix if (nystrom.red == FALSE) km <- kernelMatrix(kernel, x) } if (kpar=="local") { if (nystrom.red == TRUE) stop ("Local Scaling not supported for nystrom reduction.") s <- rep(0,m) dota <- rowSums(x*x)/2 dis <- crossprod(t(x)) for (i in 1:m) dis[i,]<- 2*(-dis[i,] + dota + rep(dota[i],m)) ## fix numerical prob. dis[dis < 0] <- 0 for (i in 1:m) s[i] <- median(sort(sqrt(dis[i,]))[1:5]) ## Compute Affinity Matrix km <- exp(-dis / s%*%t(s)) kernel <- "Localy scaled RBF kernel" } } else { if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") ## Compute Affinity Matrix if (nystrom.red == FALSE) km <- kernelMatrix(kernel, x) } if (nystrom.red == TRUE){ n <- floor(nystrom.sample) ind <- sample(1:m, m) x <- x[ind,] tmps <- sort(ind, index.return = TRUE) reind <- tmps$ix A <- kernelMatrix(kernel, x[1:n,]) B <- kernelMatrix(kernel, x[-(1:n),], x[1:n,]) d1 <- colSums(rbind(A,B)) d2 <- rowSums(B) + drop(matrix(colSums(B),1) %*% .ginv(A)%*%t(B)) dhat <- sqrt(1/c(d1,d2)) A <- A * (dhat[1:n] %*% t(dhat[1:n])) B <- B * (dhat[(n+1):m] %*% t(dhat[1:n])) Asi <- .sqrtm(.ginv(A)) Q <- A + Asi %*% crossprod(B) %*% Asi tmpres <- svd(Q) U <- tmpres$u L <- tmpres$d V <- rbind(A,B) %*% Asi %*% U %*% .ginv(sqrt(diag(L))) yi <- matrix(0,m,nc) ## for(i in 2:(nc +1)) ## yi[,i-1] <- V[,i]/V[,1] for(i in 1:nc) ## specc yi[,i] <- V[,i]/sqrt(sum(V[,i]^2)) res <- kmeans(yi[reind,], centers, iterations) } else{ if(is(kernel)[1] == "rbfkernel") diag(km) <- 0 d <- 1/sqrt(rowSums(km)) l <- d * km %*% diag(d) xi <- eigen(l)$vectors[,1:nc] yi <- xi/sqrt(rowSums(xi^2)) res <- kmeans(yi, centers, iterations) } cent <- matrix(unlist(lapply(1:nc,ll<- function(l){colMeans(x[which(res$cluster==l), ,drop=FALSE])})),ncol=dim(x)[2], byrow=TRUE) withss <- unlist(lapply(1:nc,ll<- function(l){sum((x[which(res$cluster==l),, drop=FALSE] - cent[l,])^2)})) names(res$cluster) <- rown return(new("specc", .Data=res$cluster, size = res$size, centers=cent, withinss=withss, kernelf= kernel)) }) setMethod("specc",signature(x="list"),function(x, centers, kernel = "stringdot", kpar = list(length=4, lambda=0.5), nystrom.red = FALSE, nystrom.sample = length(x)/6, iterations = 200, mod.sample = 0.75, na.action = na.omit, ...) { x <- na.action(x) m <- length(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if (nystrom.red == TRUE){ n <- nystrom.sample ind <- sample(1:m, m) x <- x[ind,] tmps <- sort(ind, index.return = TRUE) reind <- tmps$ix A <- kernelMatrix(kernel, x[1:n,]) B <- kernelMatrix(kernel, x[-(1:n),], x[1:n,]) d1 <- colSums(rbind(A,B)) d2 <- rowSums(B) + drop(matrix(colSums(B),1) %*% .ginv(A)%*%t(B)) dhat <- sqrt(1/c(d1,d2)) A <- A * (dhat[1:n] %*% t(dhat[1:n])) B <- B * (dhat[(n+1):m] %*% t(dhat[1:n])) Asi <- .sqrtm(.ginv(A)) Q <- A + Asi %*% crossprod(B) %*% Asi tmpres <- svd(Q) U <- tmpres$u L <- tmpres$d V <- rbind(A,B) %*% Asi %*% U %*% .ginv(sqrt(diag(L))) yi <- matrix(0,m,nc) ## for(i in 2:(nc +1)) ## yi[,i-1] <- V[,i]/V[,1] for(i in 1:nc) ## specc yi[,i] <- V[,i]/sqrt(sum(V[,i]^2)) res <- kmeans(yi[reind,], centers, iterations) } else{ ## Compute Affinity Matrix / in our case just the kernel matrix km <- kernelMatrix(kernel, x) if(is(kernel)[1] == "rbfkernel") diag(km) <- 0 d <- 1/sqrt(rowSums(km)) l <- d * km %*% diag(d) xi <- eigen(l)$vectors[,1:nc] sqxi <- rowSums(xi^2) if(any(sqxi==0)) stop("Zero eigenvector elements, try using a lower value for the length hyper-parameter") yi <- xi/sqrt(sqxi) res <- kmeans(yi, centers, iterations) } return(new("specc", .Data=res$cluster, size = res$size, kernelf= kernel)) }) setMethod("specc",signature(x="kernelMatrix"),function(x, centers, nystrom.red = FALSE, iterations = 200, ...) { m <- nrow(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(dim(x)[1]!=dim(x)[2]) { nystrom.red <- TRUE if(dim(x)[1] < dim(x)[2]) x <- t(x) m <- nrow(x) n <- ncol(x) } if (nystrom.red == TRUE){ A <- x[1:n,] B <- x[-(1:n),] d1 <- colSums(rbind(A,B)) d2 <- rowSums(B) + drop(matrix(colSums(B),1) %*% .ginv(A)%*%t(B)) dhat <- sqrt(1/c(d1,d2)) A <- A * (dhat[1:n] %*% t(dhat[1:n])) B <- B * (dhat[(n+1):m] %*% t(dhat[1:n])) Asi <- .sqrtm(.ginv(A)) Q <- A + Asi %*% crossprod(B) %*% Asi tmpres <- svd(Q) U <- tmpres$u L <- tmpres$d V <- rbind(A,B) %*% Asi %*% U %*% .ginv(sqrt(diag(L))) yi <- matrix(0,m,nc) ## for(i in 2:(nc +1)) ## yi[,i-1] <- V[,i]/V[,1] for(i in 1:nc) ## specc yi[,i] <- V[,i]/sqrt(sum(V[,i]^2)) res <- kmeans(yi, centers, iterations) } else{ d <- 1/sqrt(rowSums(x)) l <- d * x %*% diag(d) xi <- eigen(l)$vectors[,1:nc] yi <- xi/sqrt(rowSums(xi^2)) res <- kmeans(yi, centers, iterations) } ## cent <- matrix(unlist(lapply(1:nc,ll<- function(l){colMeans(x[which(res$cluster==l),])})),ncol=dim(x)[2], byrow=TRUE) ## withss <- unlist(lapply(1:nc,ll<- function(l){sum((x[which(res$cluster==l),] - cent[l,])^2)})) return(new("specc", .Data=res$cluster, size = res$size, centers = matrix(0), withinss = c(0), kernelf= "Kernel Matrix used as input.")) }) setMethod("show","specc", function(object){ cat("Spectral Clustering object of class \"specc\"","\n") cat("\n","Cluster memberships:","\n","\n") cat(object@.Data,"\n","\n") show(kernelf(object)) cat("\n") if(!any(is.na(centers(object)))){ cat(paste("Centers: ","\n")) show(centers(object)) cat("\n")} cat(paste("Cluster size: ","\n")) show(size(object)) cat("\n") if(!is.logical(withinss(object))){ cat(paste("Within-cluster sum of squares: ", "\n")) show(withinss(object)) cat("\n")} }) .ginv <- function (X, tol = sqrt(.Machine$double.eps)) { if (length(dim(X)) > 2 || !(is.numeric(X) || is.complex(X))) stop("'X' must be a numeric or complex matrix") if (!is.matrix(X)) X <- as.matrix(X) Xsvd <- svd(X) if (is.complex(X)) Xsvd$u <- Conj(Xsvd$u) Positive <- Xsvd$d > max(tol * Xsvd$d[1], 0) if (all(Positive)) Xsvd$v %*% (1/Xsvd$d * t(Xsvd$u)) else if (!any(Positive)) array(0, dim(X)[2:1]) else Xsvd$v[, Positive, drop = FALSE] %*% ((1/Xsvd$d[Positive]) * t(Xsvd$u[, Positive, drop = FALSE])) } .sqrtm <- function(x) { tmpres <- eigen(x) V <- t(tmpres$vectors) D <- tmpres$values if(is.complex(D)) D <- Re(D) D <- pmax(D,0) return(crossprod(V*sqrt(D),V)) } kernlab/R/ipop.R0000644000175100001440000002544511304023134013202 0ustar hornikusers##ipop solves the quadratic programming problem ##minimize c' * primal + 1/2 primal' * H * primal ##subject to b <= A*primal <= b + r ## l <= x <= u ## d is the optimizer itself ##returns primal and dual variables (i.e. x and the Lagrange ##multipliers for b <= A * primal <= b + r) ##for additional documentation see ## R. Vanderbei ## LOQO: an Interior Point Code for Quadratic Programming, 1992 ## Author: R version Alexandros Karatzoglou, orig. matlab Alex J. Smola ## Created: 12/12/97 ## R Version: 12/08/03 ## Updated: 13/10/05 ## This code is released under the GNU Public License setGeneric("ipop",function(c, H, A, b, l, u, r, sigf=7, maxiter=40, margin=0.05, bound=10, verb=0) standardGeneric("ipop")) setMethod("ipop",signature(H="matrix"), function(c, H, A, b, l, u, r, sigf=7, maxiter=40, margin=0.05, bound=10, verb=0) { if(!is.matrix(H)) stop("H must be a matrix") if(!is.matrix(A)&&!is.vector(A)) stop("A must be a matrix or a vector") if(!is.matrix(c)&&!is.vector(c)) stop("c must be a matrix or a vector") if(!is.matrix(l)&&!is.vector(l)) stop("l must be a matrix or a vector") if(!is.matrix(u)&&!is.vector(u)) stop("u must be a matrix or a vector") n <- dim(H)[1] ## check for a decomposed H matrix if(n == dim(H)[2]) smw <- 0 if(n > dim(H)[2]) smw <- 1 if(n < dim(H)[2]) { smw <- 1 n <- dim(H)[2] H <- t(H) } if (is.vector(A)) A <- matrix(A,1) m <- dim(A)[1] primal <- rep(0,n) if (missing(b)) bvec <- rep(0, m) ## if(n !=nrow(H)) ## stop("H matrix is not symmetric") if (n != length(c)) stop("H and c are incompatible!") if (n != ncol(A)) stop("A and c are incompatible!") if (m != length(b)) stop("A and b are incompatible!") if(n !=length(u)) stop("u is incopatible with H") if(n !=length(l)) stop("l is incopatible with H") c <- matrix(c) l <- matrix(l) u <- matrix(u) m <- nrow(A) n <- ncol(A) H.diag <- diag(H) if(smw == 0) H.x <- H else if (smw == 1) H.x <- t(H) b.plus.1 <- max(svd(b)$d) + 1 c.plus.1 <- max(svd(c)$d) + 1 one.x <- -matrix(1,n,1) one.y <- -matrix(1,m,1) ## starting point if(smw == 0) diag(H.x) <- H.diag + 1 else smwn <- dim(H)[2] H.y <- diag(1,m) c.x <- c c.y <- b ## solve the system [-H.x A' A H.y] [x, y] = [c.x c.y] if(smw == 0) { AP <- matrix(0,m+n,m+n) xp <- 1:(m+n) <= n AP[xp,xp] <- -H.x AP[xp == FALSE,xp] <- A AP[xp,xp == FALSE] <- t(A) AP[xp == FALSE, xp== FALSE] <- H.y s.tmp <- solve(AP,c(c.x,c.y)) x <- s.tmp[1:n] y <- s.tmp[-(1:n)] } else { V <- diag(smwn) smwinner <- chol(V + crossprod(H)) smwa1 <- t(A) smwc1 <- c.x smwa2 <- smwa1 - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwa1)))) smwc2 <- smwc1 - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwc1)))) y <- solve(A %*% smwa2 + H.y , c.y + A %*% smwc2) x <- smwa2 %*% y - smwc2 } g <- pmax(abs(x - l), bound) z <- pmax(abs(x), bound) t <- pmax(abs(u - x), bound) s <- pmax(abs(x), bound) v <- pmax(abs(y), bound) w <- pmax(abs(y), bound) p <- pmax(abs(r - w), bound) q <- pmax(abs(y), bound) mu <- as.vector(crossprod(z,g) + crossprod(v,w) + crossprod(s,t) + crossprod(p,q))/(2 * (m + n)) sigfig <- 0 counter <- 0 alfa <- 1 if (verb > 0) # print at least one status report cat("Iter PrimalInf DualInf SigFigs Rescale PrimalObj DualObj","\n") while (counter < maxiter) { ## update the iteration counter counter <- counter + 1 ## central path (predictor) if(smw == 0) H.dot.x <- H %*% x else if (smw == 1) H.dot.x <- H %*% crossprod(H,x) rho <- b - A %*% x + w nu <- l - x + g tau <- u - x - t alpha <- r - w - p sigma <- c - crossprod(A, y) - z + s + H.dot.x beta <- y + q - v gamma.z <- - z gamma.w <- - w gamma.s <- - s gamma.q <- - q ## instrumentation x.dot.H.dot.x <- crossprod(x, H.dot.x) primal.infeasibility <- max(svd(rbind(rho, tau, matrix(alpha), nu))$d)/ b.plus.1 dual.infeasibility <- max(svd(rbind(sigma,t(t(beta))))$d) / c.plus.1 primal.obj <- crossprod(c,x) + 0.5 * x.dot.H.dot.x dual.obj <- crossprod(b,y) - 0.5 * x.dot.H.dot.x + crossprod(l, z) - crossprod(u,s) - crossprod(r,q) old.sigfig <- sigfig sigfig <- max(-log10(abs(primal.obj - dual.obj)/(abs(primal.obj) + 1)), 0) if (sigfig >= sigf) break if (verb > 0) # final report cat( counter, "\t", signif(primal.infeasibility,6), signif(dual.infeasibility,6), sigfig, alfa, primal.obj, dual.obj,"\n") ## some more intermediate variables (the hat section) hat.beta <- beta - v * gamma.w / w hat.alpha <- alpha - p * gamma.q / q hat.nu <- nu + g * gamma.z / z hat.tau <- tau - t * gamma.s / s ## the diagonal terms d <- z / g + s / t e <- 1 / (v / w + q / p) ## initialization before the big cholesky if (smw == 0) diag(H.x) <- H.diag + d diag(H.y) <- e c.x <- sigma - z * hat.nu / g - s * hat.tau / t c.y <- rho - e * (hat.beta - q * hat.alpha / p) ## and solve the system [-H.x A' A H.y] [delta.x, delta.y] <- [c.x c.y] if(smw == 0){ AP[xp,xp] <- -H.x AP[xp == FALSE, xp== FALSE] <- H.y s1.tmp <- solve(AP,c(c.x,c.y)) delta.x<-s1.tmp[1:n] ; delta.y <- s1.tmp[-(1:n)] } else { V <- diag(smwn) smwinner <- chol(V + chunkmult(t(H),2000,d)) smwa1 <- t(A) smwa1 <- smwa1 / d smwc1 <- c.x / d smwa2 <- t(A) - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwa1)))) smwa2 <- smwa2 / d smwc2 <- (c.x - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwc1)))))/d delta.y <- solve(A %*% smwa2 + H.y , c.y + A %*% smwc2) delta.x <- smwa2 %*% delta.y - smwc2 } ## backsubstitution delta.w <- - e * (hat.beta - q * hat.alpha / p + delta.y) delta.s <- s * (delta.x - hat.tau) / t delta.z <- z * (hat.nu - delta.x) / g delta.q <- q * (delta.w - hat.alpha) / p delta.v <- v * (gamma.w - delta.w) / w delta.p <- p * (gamma.q - delta.q) / q delta.g <- g * (gamma.z - delta.z) / z delta.t <- t * (gamma.s - delta.s) / s ## compute update step now (sebastian's trick) alfa <- - (1 - margin) / min(c(delta.g / g, delta.w / w, delta.t / t, delta.p / p, delta.z / z, delta.v / v, delta.s / s, delta.q / q, -1)) newmu <- (crossprod(z,g) + crossprod(v,w) + crossprod(s,t) + crossprod(p,q))/(2 * (m + n)) newmu <- mu * ((alfa - 1) / (alfa + 10))^2 gamma.z <- mu / g - z - delta.z * delta.g / g gamma.w <- mu / v - w - delta.w * delta.v / v gamma.s <- mu / t - s - delta.s * delta.t / t gamma.q <- mu / p - q - delta.q * delta.p / p ## some more intermediate variables (the hat section) hat.beta <- beta - v * gamma.w / w hat.alpha <- alpha - p * gamma.q / q hat.nu <- nu + g * gamma.z / z hat.tau <- tau - t * gamma.s / s ## initialization before the big cholesky ##for ( i in 1 : n H.x(i,i) <- H.diag(i) + d(i) ) { ##H.y <- diag(e) c.x <- sigma - z * hat.nu / g - s * hat.tau / t c.y <- rho - e * (hat.beta - q * hat.alpha / p) ## and solve the system [-H.x A' A H.y] [delta.x, delta.y] <- [c.x c.y] if (smw == 0) { AP[xp,xp] <- -H.x AP[xp == FALSE, xp== FALSE] <- H.y s1.tmp <- solve(AP,c(c.x,c.y)) delta.x<-s1.tmp[1:n] ; delta.y<-s1.tmp[-(1:n)] } else if (smw == 1) { smwc1 <- c.x / d smwc2 <- (c.x - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwc1))))) / d delta.y <- solve(A %*% smwa2 + H.y , c.y + A %*% smwc2) delta.x <- smwa2 %*% delta.y - smwc2 } ## backsubstitution delta.w <- - e * (hat.beta - q * hat.alpha / p + delta.y) delta.s <- s * (delta.x - hat.tau) / t delta.z <- z * (hat.nu - delta.x) / g delta.q <- q * (delta.w - hat.alpha) / p delta.v <- v * (gamma.w - delta.w) / w delta.p <- p * (gamma.q - delta.q) / q delta.g <- g * (gamma.z - delta.z) / z delta.t <- t * (gamma.s - delta.s) / s ## compute the updates alfa <- - (1 - margin) / min(c(delta.g / g, delta.w / w, delta.t / t, delta.p / p, delta.z / z, delta.v / v, delta.s / s, delta.q / q, -1)) x <- x + delta.x * alfa g <- g + delta.g * alfa w <- w + delta.w * alfa t <- t + delta.t * alfa p <- p + delta.p * alfa y <- y + delta.y * alfa z <- z + delta.z * alfa v <- v + delta.v * alfa s <- s + delta.s * alfa q <- q + delta.q * alfa ## these two lines put back in ? ## mu <- (crossprod(z,g) + crossprod(v,w) + crossprod(s,t) + crossprod(p,q))/(2 * (m + n)) ## mu <- mu * ((alfa - 1) / (alfa + 10))^2 mu <- newmu } if (verb > 0) ## final report cat( counter, primal.infeasibility, dual.infeasibility, sigfig, alfa, primal.obj, dual.obj) ret <- new("ipop") ## repackage the results primal(ret) <- x dual(ret) <- drop(y) if ((sigfig > sigf) & (counter < maxiter)) how(ret) <- 'converged' else { ## must have run out of counts if ((primal.infeasibility > 10e5) & (dual.infeasibility > 10e5)) how(ret) <- 'primal and dual infeasible' if (primal.infeasibility > 10e5) how(ret) <- 'primal infeasible' if (dual.infeasibility > 10e5) how(ret) <- 'dual infeasible' else ## don't really know how(ret) <- 'slow convergence, change bound?' } ret }) setGeneric("chunkmult",function(Z, csize, colscale) standardGeneric("chunkmult")) setMethod("chunkmult",signature(Z="matrix"), function(Z, csize, colscale) { n <- dim(Z)[1] m <- dim(Z)[2] d <- sqrt(colscale) nchunks <- ceiling(m/csize) res <- matrix(0,n,n) for( i in 1:nchunks) { lowerb <- (i - 1) * csize + 1 upperb <- min(i * csize, m) buffer <- t(Z[,lowerb:upperb,drop = FALSE]) bufferd <- d[lowerb:upperb] buffer <- buffer / bufferd res <- res + crossprod(buffer) } return(res) }) kernlab/R/kkmeans.R0000644000175100001440000004651312676464725013717 0ustar hornikusers## kernel kmeans function ## author: alexandros setGeneric("kkmeans",function(x, ...) standardGeneric("kkmeans")) setMethod("kkmeans", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) na.act <- attr(mf, "na.action") x <- model.matrix(mt, mf) res <- kkmeans(x, ...) cl[[1]] <- as.name("kkmeans") if(!is.null(na.act)) n.action(res) <- na.action return(res) }) setMethod("kkmeans",signature(x="matrix"),function(x, centers, kernel = "rbfdot", kpar = "automatic", alg ="kkmeans", p = 1, na.action = na.omit, ...) { x <- na.action(x) rown <- rownames(x) x <- as.matrix(x) m <- nrow(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","stringdot")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(kkmeans(as.kernelMatrix(x), centers= centers)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot"||kernel=="stringdot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(length(centers) == 1){ suppressWarnings(vgr<- vgr2 <- split(sample(1:m,m),1:centers)) ncenters <- centers } else { ncenters <- ns <- dim(centers)[1] dota <- rowSums(x*x)/2 dotb <- rowSums(centers*centers)/2 ktmp <- x%*%t(centers) for(i in 1:ns) ktmp[,i]<- ktmp[,i] - dota - rep(dotb[i],m) prts <- max.col(ktmp) vgr <- vgr2 <- lapply(1:ns, function(x) which(x==prts)) } if(is.character(alg)) alg <- match.arg(alg,c("kkmeans","kerninghan", "normcut")) if(alg == "kkmeans") { p <- NULL D <- NULL D1 <- NULL w <- rep(1,m) } if(alg=="kerninghan") { p <- p D <- kernelMult(kernel,x, , rep(1,m)) w <- rep(1,m) D1 <- NULL } if(alg=="normcut") { p <- p D1 <- 1 w <- kernelMult(kernel,x, , rep(1,m)) } ## initialize lower bound and distance matrix dismat <- lower <- matrix(0,m,ncenters) ## calculate diagonal kdiag <- rep(1,m) for (i in 1:m) kdiag[i] <- drop(kernel(x[i,],x[i,])) ## initialize center-newcenter distance vector second sum vector secsum <- dc <- rep(1,ncenters) mindis <- rep(0,m) cind <- 1:ncenters for ( i in 1:ncenters) { ## compute second sum eq. 1 secsum[i] <- sum(affinMult(kernel, x[vgr[[i]],,drop=FALSE],,w[vgr[[i]]], p , D, D1) * w[vgr[[i]]])/sum(w[vgr[[i]]])^2 ## calculate initial distance matrix and lower bounds lower[,i] <- dismat[,i] <- - 2 * affinMult(kernel,x,x[vgr[[i]],,drop=FALSE], w[vgr[[i]]], p ,D, D1)/sum(w[vgr[[i]]]) + secsum[i] + kdiag } cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) while(1){ for (z in 1:ncenters) dc[z] <- -2*sum(affinMult(kernel, x[vgr2[[z]],,drop=FALSE], x[vgr[[z]],,drop=FALSE], w[vgr[[z]]], p, D, D1)*w[vgr2[[z]]])/(sum(w[vgr[[z]]])*sum(w[vgr2[[z]]])) + sum(affinMult(kernel, x[vgr[[z]],,drop=FALSE], ,w[vgr[[z]]], p, D, D1) * w[vgr[[z]]]) / sum(w[vgr[[z]]])^2 + sum(affinMult(kernel, x[vgr2[[z]],,drop=FALSE], ,w[vgr2[[z]]], p, D, D1) * w[vgr2[[z]]]) / sum(w[vgr2[[z]]])^2 ## assign new cluster indexes vgr <- vgr2 if(sum(abs(dc)) < 1e-15) break for (u in 1:ncenters){ ## compare already calulated distances of every poit to intra - center distance to determine if ## it is necesary to compute the distance at this point, we create an index of points to compute distance if(u > 1) compin <- apply(t(t(dismat[,1:(u-1)]) < dismat[,u] - dc[u]),1,sum)==0 else compin <- rep(TRUE,m) ## compute second sum eq. 1 secsum[u] <- sum(affinMult(kernel, x[vgr[[u]],,drop=FALSE], ,w[vgr[[u]]], p, D, D1) * w[vgr[[u]]])/sum(w[vgr[[u]]])^2 ## compute distance matrix and lower bounds lower[compin,u] <- dismat[compin,u] <- - 2 * affinMult(kernel,x[compin,],x[vgr[[u]],,drop=FALSE], w[vgr[[u]]], p , D, D1)/sum(w[vgr[[u]]]) + secsum[u] + kdiag[compin] } ## calculate new cluster indexes cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) } cluster <- max.col(-dismat) size <- unlist(lapply(1:ncenters, ll <- function(l){length(which(cluster==l))})) cent <- matrix(unlist(lapply(1:ncenters,ll<- function(l){colMeans(x[which(cluster==l),])})),ncol=dim(x)[2], byrow=TRUE) withss <- unlist(lapply(1:ncenters,ll<- function(l){sum((x[which(cluster==l),] - cent[l,])^2)})) names(cluster) <- rown return(new("specc", .Data=cluster, size = size, centers=cent, withinss=withss, kernelf= kernel)) }) ## kernel Matrix interface setMethod("kkmeans",signature(x="kernelMatrix"),function(x, centers, ...) { m <- nrow(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(length(centers) == 1){ suppressWarnings(vgr<- vgr2 <- split(sample(1:m,m),1:centers)) ncenters <- centers } else ncenters <- dim(centers)[1] ## initialize lower bound and distance matrix dismat <- lower <- matrix(0,m,ncenters) ## diagonal kdiag <- diag(x) ## weigths (should be adapted for future versions !!) w <- rep(1,m) ## initialize center-newcenter distance vector second sum vector secsum <- dc <- rep(1,ncenters) mindis <- rep(0,m) cind <- 1:ncenters for ( i in 1:ncenters) { ## compute second sum eq. 1 secsum[i] <- sum(drop(crossprod(x[vgr[[i]],vgr[[i]],drop=FALSE],w[vgr[[i]]])) * w[vgr[[i]]])/sum(w[vgr[[i]]])^2 ## calculate initial distance matrix and lower bounds lower[,i] <- dismat[,i] <- - 2 * x[,vgr[[i]],drop=FALSE]%*%w[vgr[[i]]]/sum(w[vgr[[i]]]) + secsum[i] + kdiag } cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) while(1){ for (z in 1:ncenters) dc[z] <- -2*sum((x[vgr2[[z]],vgr[[z]],drop=FALSE] %*% w[vgr[[z]]])*w[vgr2[[z]]])/(sum(w[vgr[[z]]])*sum(w[vgr2[[z]]])) + sum(drop(crossprod(x[vgr[[z]],vgr[[z]],drop=FALSE],w[vgr[[z]]])) * w[vgr[[z]]]) / sum(w[vgr[[z]]])^2 + sum(drop(crossprod(x[vgr2[[z]],vgr2[[z]],drop=FALSE],w[vgr2[[z]]])) * w[vgr2[[z]]]) / sum(w[vgr2[[z]]])^2 ## assign new cluster indexes vgr <- vgr2 if(sum(abs(dc))<1e-15) break for (u in 1:ncenters){ ## compare already calulated distances of every point to intra - center distance to determine if ## it is necesary to compute the distance at this point, we create an index of points to compute distance if(u > 1) compin <- apply(t(t(dismat[,1:(u-1)]) < dismat[,u] - dc[u]),1,sum)==0 else compin <- rep(TRUE,m) ## compute second sum eq. 1 secsum[u] <- sum(drop(crossprod(x[vgr[[u]],vgr[[u]],drop=FALSE],w[vgr[[u]]])) * w[vgr[[u]]])/sum(w[vgr[[u]]])^2 ## compute distance matrix and lower bounds lower[compin,u] <- dismat[compin,u] <- - 2 * (x[which(compin),vgr[[u]],drop=FALSE] %*% w[vgr[[u]]])/sum(w[vgr[[u]]]) + secsum[u] + kdiag[compin] } ## calculate new cluster indexes cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) } cluster <- max.col(-dismat) size <- unlist(lapply(1:ncenters, ll <- function(l){length(which(cluster==l))})) cent <- matrix(unlist(lapply(1:ncenters,ll<- function(l){colMeans(x[which(cluster==l),])})),ncol=dim(x)[2], byrow=TRUE) withss <- unlist(lapply(1:ncenters,ll<- function(l){sum((x[which(cluster==l),] - cent[l,])^2)})) return(new("specc", .Data=cluster, size = size, centers=cent, withinss=withss, kernelf= "Kernel matrix used")) }) ## List interface setMethod("kkmeans",signature(x="list"),function(x, centers, kernel = "stringdot", kpar = list(length=4, lambda=0.5), alg ="kkmeans", p = 1, na.action = na.omit, ...) { x <- na.action(x) m <- length(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(length(centers) == 1){ suppressWarnings(vgr<- vgr2 <- split(sample(1:m,m),1:centers)) ncenters <- centers } else ncenters <- dim(centers)[1] if(is.character(alg)) alg <- match.arg(alg,c("kkmeans","kerninghan", "normcut")) if(alg == "kkmeans") { p <- NULL D <- NULL D1 <- NULL w <- rep(1,m) } if(alg=="kerninghan") { p <- p D <- kernelMult(kernel,x, , rep(1,m)) w <- rep(1,m) D1 <- NULL } if(alg=="normcut") { p <- p D1 <- 1 w <- kernelMult(kernel,x, , rep(1,m)) } ## initialize lower bound and distance matrix dismat <- lower <- matrix(0,m,ncenters) ## calculate diagonal kdiag <- rep(1,m) for (i in 1:m) kdiag[i] <- drop(kernel(x[[i]],x[[i]])) ## initialize center-newcenter distance vector second sum vector secsum <- dc <- rep(1,ncenters) mindis <- rep(0,m) cind <- 1:ncenters for ( i in 1:ncenters) { ## compute second sum eq. 1 secsum[i] <- sum(affinMult(kernel, x[vgr[[i]]],,w[vgr[[i]]], p , D, D1) * w[vgr[[i]]])/sum(w[vgr[[i]]])^2 ## calculate initial distance matrix and lower bounds lower[,i] <- dismat[,i] <- - 2 * affinMult(kernel,x,x[vgr[[i]]], w[vgr[[i]]], p ,D, D1)/sum(w[vgr[[i]]]) + secsum[i] + kdiag } cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) while(1){ for (z in 1:ncenters) dc[z] <- -2*sum(affinMult(kernel, x[vgr2[[z]]], x[vgr[[z]]], w[vgr[[z]]], p, D, D1)*w[vgr2[[z]]])/(sum(w[vgr[[z]]])*sum(w[vgr2[[z]]])) + sum(affinMult(kernel, x[vgr[[z]]], ,w[vgr[[z]]], p, D, D1) * w[vgr[[z]]]) / sum(w[vgr[[z]]])^2 + sum(affinMult(kernel, x[vgr2[[z]]], ,w[vgr2[[z]]], p, D, D1) * w[vgr2[[z]]]) / sum(w[vgr2[[z]]])^2 ## assign new cluster indexes vgr <- vgr2 if(sum(abs(dc))<1e-15) break for (u in 1:ncenters){ ## compare already calulated distances of every poit to intra - center distance to determine if ## it is necesary to compute the distance at this point, we create an index of points to compute distance if(u > 1) compin <- apply(t(t(dismat[,1:(u-1)]) < dismat[,u] - dc[u]),1,sum)==0 else compin <- rep(TRUE,m) ## compute second sum eq. 1 secsum[u] <- sum(affinMult(kernel, x[vgr[[u]]], ,w[vgr[[u]]], p, D, D1) * w[vgr[[u]]])/sum(w[vgr[[u]]])^2 ## compute distance matrix and lower bounds lower[compin,u] <- dismat[compin,u] <- - 2 * affinMult(kernel,x[compin,],x[vgr[[u]]], w[vgr[[u]]], p , D, D1)/sum(w[vgr[[u]]]) + secsum[u] + kdiag[compin] } ## calculate new cluster indexes cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) } cluster <- max.col(-dismat) size <- unlist(lapply(1:ncenters, ll <- function(l){length(which(cluster==l))})) cent <- matrix(unlist(lapply(1:ncenters,ll<- function(l){colMeans(x[which(cluster==l),])})),ncol=dim(x)[2], byrow=TRUE) withss <- unlist(lapply(1:ncenters,ll<- function(l){sum((x[which(cluster==l),] - cent[l,])^2)})) return(new("specc", .Data=cluster, size = size, centers=cent, withinss=withss, kernelf= kernel)) }) setGeneric("affinMult",function(kernel, x, y = NULL, z, p, D, D1, blocksize = 256) standardGeneric("affinMult")) affinMult.rbfkernel <- function(kernel, x, y=NULL, z, p, D, D1,blocksize = 256) { if(is.null(p)&is.null(D)&is.null(D1)) res <- kernelMult(kernel,x,y,z) else{ if(!is.matrix(y)&&!is.null(y)) stop("y must be a matrix") if(!is.matrix(z)&&!is.vector(z)) stop("z must be a matrix or a vector") sigma <- kpar(kernel)$sigma n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 dota <- as.matrix(rowSums(x^2)) if (is.null(y) & is.null(D1)) { if(is.vector(z)) { if(!length(z) == n) stop("vector z length must be equal to x rows") z <- matrix(z,n,1) } if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(sigma*(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n))))%*%z - z[lowerl:upperl,]*(1-p) lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n))))%*%z- z[lowerl:upperl,]*(1-p) } if(is.matrix(y) & is.null(D1)) { n2 <- dim(y)[1] if(is.vector(z)) { if(!length(z) == n2) stop("vector z length must be equal to y rows") z <- matrix(z,n2,1) } if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize if(upperl < n2) res[lowerl:upperl,] <- exp(sigma*(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2))))%*%z-z[lowerl:upperl,]*(1-p) - z[lowerl:upperl,]*D[lowerl:upperl] if(upperl >n2 & lowerl n2 & n>=n2){ res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) -dota[lowerl:n]%*%t(rep.int(1,n2))))%*%z res[lowerl:n2,] <- res[lowerl:n2,] - z[lowerl:n2,]*(1-p) - z[lowerl:n2,]*D[lowerl:n2] } else res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) - dota[lowerl:n]%*%t(rep.int(1,n2))))%*%z } } if (is.null(y) & !is.null(D1)) { if(is.vector(z)) { if(!length(z) == n) stop("vector z length must be equal to x rows") z <- matrix(z,n,1) } if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize tmp <- exp(sigma*(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n)))) D1 <- 1/colSums(tmp) res[lowerl:upperl,] <- D1*tmp%*%diag(D1)%*%z - z[lowerl:upperl,]*(1-D1) lowerl <- upperl + 1 } } if(lowerl <= n){ tmp <- exp(sigma*(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n)))) res[lowerl:n,] <- D1*tmp%*%diag(D1)%*%z- z[lowerl:upperl,]*(1-D1) } } if(is.matrix(y) &!is.null(D1)) { n2 <- dim(y)[1] if(is.vector(z)) { if(!length(z) == n2) stop("vector z length must be equal to y rows") z <- matrix(z,n2,1) } if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) ones <- rep(1,blocksize) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize if(upperl < n2) tmp <- exp(sigma*(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2)))) D1 <- 1/colSums(tmp) res[lowerl:upperl,] <- D1*tmp%*%diag(D1)%*%z-z[lowerl:upperl,]*(1-D1) if(upperl >n2 & lowerl n2 & n>=n2){ tmp <- exp(sigma*(2*x[lowerl:n,]%*%t(y) -rep.int(1,n+1-lowerl)%*%t(dotb) -dota[lowerl:n]%*%t(rep.int(1,n2)))) D1 <- 1/colSums(tmp) res[lowerl:n,] <- D1*tmp%*%diag(D1)%*%z res[lowerl:n2,] <- res[lowerl:n2,] - z[lowerl:n2,]*(1-D1) } else{ tmp <- exp(sigma*(2*x[lowerl:n,]%*%t(y) -rep.int(1,n+1-lowerl)%*%t(dotb) -dota[lowerl:n]%*%t(rep.int(1,n2)))) D1 <- 1/colSums(tmp) res[lowerl:n,] <- D1*tmp%*%diag(D1)%*%z } } } } return(res) } setMethod("affinMult",signature(kernel="kernel", x="matrix"),affinMult.rbfkernel) kernlab/R/couplers.R0000644000175100001440000000770211304023134014063 0ustar hornikusers## wrapper function for couplers ## author : alexandros karatzoglou couple <- function(probin, coupler = "minpair") { if(is.vector(probin)) probin <- matrix(probin,1) m <- dim(probin)[1] coupler <- match.arg(coupler, c("minpair", "pkpd", "vote", "ht")) # if(coupler == "ht") # multiprob <- sapply(1:m, function(x) do.call(coupler, list(probin[x ,], clscnt))) # else multiprob <- sapply(1:m, function(x) do.call(coupler, list(probin[x ,]))) return(t(multiprob)) } ht <- function(probin, clscnt, iter=1000) { nclass <- length(clscnt) probim <- matrix(0, nclass, nclass) for(i in 1:nclass) for(j in 1:nclass) if(j>i) { probim[i,j] <- probin[i] probim[j,i] <- 1 - probin[i] } p <- rep(1/nclass,nclass) u <- matrix((1/nclass)/((1/nclass)+(1/nclass)) ,nclass,nclass) iter <- 0 while(TRUE) { iter <- iter + 1 stoperror <- 0 for(i in 1:nclass){ num <- den <- 0 for(j in 1:nclass) { if (j!=i) { num <- num + (clscnt[i] + clscnt[j]) * probim[i,j] den <- den + (clscnt[i] + clscnt[j]) * u[i,j] } } alpha <- num/(den + 1e-308) p[i] <- p[i]*alpha stoperror <- stoperror + (alpha -1)^2 if(0) { sum <- 0 sum <- sum(p) + sum p <- p/sum for(ui in 1:nclass) for(uj in 1:nclass) u[ui, uj] <- p[ui]/(p[ui] + p[uj]) } else { for(j in 1:nclass) if (i!=j) { u[i,j] <- p[i]/(p[i] + p[j]) u[j,i] <- 1 - u[i,j] } } } if(stoperror < 1e-3) break if(iter > 400) { cat("Too many iterations: aborting", probin, iter, stoperror, p) break } } ## normalize prob. p <- p/sum(p) return(p) } minpair <- function(probin) { ## Count number of classes and construct prob. matrix nclass <- (1+sqrt(1 + 8*length(probin)))/2 if(nclass%%1 != 0) stop("Vector has wrong length only one against one problems supported") probim <- matrix(0, nclass, nclass) probim[upper.tri(probim)] <- probin probim[lower.tri(probim)] <- 1 - probin sum <- colSums(probim^2) Q <- diag(sum) Q[upper.tri(Q)] <- - probin*(1 - probin) Q[lower.tri(Q)] <- - probin*(1 - probin) SQ <- matrix(0,nclass +1, nclass +1) SQ[1:(nclass+1) <= nclass, 1:(nclass+1) <= nclass] <- Q SQ[1:(nclass+1) > nclass, 1:(nclass+1) <= nclass] <- rep(1,nclass) SQ[1:(nclass+1) <= nclass, 1:(nclass+1) > nclass] <- rep(1,nclass) rhs <- rep(0,nclass+1) rhs[nclass + 1] <- 1 p <- solve(SQ,rhs) p <- p[-(nclass+1)]/sum(p[-(nclass+1)]) return(p) } pkpd <- function(probin) { ## Count number of classes and constuct prob. matrix nclass <- k <- (1+sqrt(1 + 8*length(probin)))/2 if(nclass%%1 != 0) stop("Vector has wrong length only one against one problems supported") probim <- matrix(0, nclass, nclass) probim[upper.tri(probim)] <- probin probim[lower.tri(probim)] <- 1 - probin probim[probim==0] <- 1e-300 R <- 1/probim diag(R) <- 0 p <- 1/(rowSums(R) - (k-2)) p <- p/sum(p) return(p) } vote<- function(probin) { nclass <- (1+sqrt(1 + 8*length(probin)))/2 if(nclass%%1 != 0) stop("Vector has wrong length only one against one problems supported") votev <- rep(0,nclass) p <- 0 for(i in 1:(nclass-1)) { jj <- i+1 for(j in jj:nclass) { p <- p+1 votev[i][probin[i] >= 0.5] <- votev[i][probin[i] >= 0.5] + 1 votev[j][probin[j] < 0.5] <- votev[j][probin[j] < 0.5] + 1 } } p <- votev/sum(votev) return(p) } kernlab/MD50000644000175100001440000001554513562454636012246 0ustar hornikusers4fd01f0b6bc53f02af524716d55599b2 *DESCRIPTION 3114a4e24b015242a1b61147f7cffe25 *NAMESPACE 7db9a58cb6e5aeae749727781fe388f5 *R/aobjects.R 0750c9216dfd490ac36814b8b1ae24f2 *R/couplers.R f8e0ac1a792745090fa9a8da65847804 *R/csi.R 89d1c67ac3de8ff1e48de1a2dc79d477 *R/gausspr.R ab289bc31386f29fa9b2bc9a667504f4 *R/inchol.R bfa34b64d293a380c5c4d045105d4496 *R/ipop.R 5f574afe5df7904fb80bb214f01fcc6c *R/kcca.R 67aed700531a0ce066bb9300e7f0169c *R/kernelmatrix.R c2688c1b636fb4fb3cf51870ddaafee6 *R/kernels.R 4df2eb88a79a9ba527515d471042c5ef *R/kfa.R 894f285bbb8e123968cdfcf88c2363c4 *R/kha.R 87fb64fa9308b0337216933d6aa8cdd4 *R/kkmeans.R 78cd6c834753a4f6c9f2ce570df37aaa *R/kmmd.R 03fc2d9d2bc5e3d2719397c9e1bf137f *R/kpca.R b9d06cfc6866fbcef5d38ad8da944242 *R/kqr.R ff61b3c5df4768196226de43050f2f15 *R/ksvm.R 1df633ae0f402d126694715b89472a42 *R/lssvm.R 9a6305a7f6f48b3d5b9897aee24c7a88 *R/onlearn.R e011e88368b20e857e139cea577cc056 *R/ranking.R 1df11b3a35b28147563ca4a01286a739 *R/rvm.R 42578bea93efc1ad1488b72c8acec274 *R/sigest.R 159df23cf242faa6b7c1a0feb40bdf6d *R/specc.R 9d223ce30e55f376312b878c3c18e895 *build/vignette.rds a314ed48dca2f02ed29859ad64918192 *data/income.rda b1841131a43e1b43148d18b9df9e6982 *data/musk.rda 052b704603c371c754db6fa74331bb25 *data/promotergene.rda 4a46500a709711ee45f393a13be8c98d *data/reuters.rda 888d288f540ebec5661f431d7cdb8155 *data/spam.rda f4869068ca79b09d0b5f821b02d42c15 *data/spirals.rda 8eb5dc3055a1211456543d81811124cb *data/ticdata.rda 3343578028d05274271ebf66d9383da7 *inst/CITATION 68fe0d0d842fbc1b217f45934a8edf7a *inst/COPYRIGHTS 0d1b1a09dbb52e3b0e58676170c3ce3d *inst/doc/kernlab.R c4c223d07206b59e2d43a585d07164b1 *inst/doc/kernlab.Rnw 9d0b0783e6a9607a774d3bb803b7265a *inst/doc/kernlab.pdf ca7923a78d389602d891a3cf6a5193d9 *man/as.kernelMatrix.Rd c0c282d5b6dd984608a1d5b9c92fe478 *man/couple.Rd e36dc0b16ba570c99ead7a48394dc66d *man/csi-class.Rd f87d54c4c4bf47f760cc6a779c7e525d *man/csi.Rd 704bfeedf89329461a20e4cb51a237f0 *man/dots.Rd 285c27b5d9a389dfd7e2f8e392de215c *man/gausspr-class.Rd fd9fe426e55ff79ffa5aabe84abd229c *man/gausspr.Rd b61d371ba2f8d8b137ec3c32a115c3ab *man/inchol-class.Rd f91fdd7d2e3c9aec28d31575d2ba0a6e *man/inchol.Rd 452553ee15225244a50b73aa08cca861 *man/income.Rd 9599ae27d6ebe41302c6236aa381b313 *man/inlearn.Rd bbcfe86bcb66e4b222b9ba13869fa2b0 *man/ipop-class.Rd c2e71c62027e5534eaf1f4c2dbcf0a6a *man/ipop.Rd 62c2b5318bb86222cb8d9cd361998d36 *man/kcca-class.Rd a5309043c8dccb762f1de6138b713b05 *man/kcca.Rd ef26a19723ffb7f6eb6dd3539905d6c4 *man/kernel-class.Rd 7357130456764a2b77cbf39d05d8dc98 *man/kernelMatrix.Rd 7a1e2bc5f883b6e7339bd717f0569eaf *man/kfa-class.Rd 22c7587c02310941aa5c484a3551ff70 *man/kfa.Rd 54afaeff97629d4a1353cdd98b5dde37 *man/kha-class.Rd a3d48c3acdc239d92a5762d283574d71 *man/kha.Rd d7cc0c1a8c977458f8262dbdd63b1b4a *man/kkmeans.Rd c3458139340043b2d63e9a642386582e *man/kmmd-class.Rd 6246385dba8697c83028cbece148c203 *man/kmmd.Rd b39a018897562f1cf907c7d0920186ce *man/kpca-class.Rd ba3a5bde31ea982871c7690edc588b23 *man/kpca.Rd 5a3b2344811fded04018d0b56d9bca23 *man/kqr-class.Rd 1ef59facd1ed13402b663beb16f6593a *man/kqr.Rd 3bdce4dc10887da4bacdac6830e66db8 *man/ksvm-class.Rd f98da25e651db60717100721a7a6f7cc *man/ksvm.Rd dd6a605572b276158f753cf3e3dce63e *man/lssvm-class.Rd bab982b9b6cdbdfa1d9c50cacd72408d *man/lssvm.Rd 95f670451348298d1c5daa00498f9f65 *man/musk.Rd 6d1c014b9f6bb8b59d032fd444bf5a04 *man/onlearn-class.Rd e14a6bd165c9595d1b014bd983d810b5 *man/onlearn.Rd 75f80214439e10c8d1b0104f5bcb44ba *man/plot.Rd f67747838e34ee3400ad4ffe299eba71 *man/prc-class.Rd fb4f0a2a30d3ec62e66a125f64d7f018 *man/predict.gausspr.Rd 69e21e71600ccf8a8df4a1adb84213fe *man/predict.kqr.Rd a92aae4f4aa90adbfc6d9f698426e55c *man/predict.ksvm.Rd 17510c748e43b26899603fff435572fb *man/promotergene.Rd f3a2c50017ea501680b53c9e221bf6b5 *man/ranking-class.Rd f478f729039a6352bc53c5b8bf5d8106 *man/ranking.Rd 8bee0b6c367f1c5f749b296ff48dcc23 *man/reuters.Rd 2b1f6b6093d9d0a915995b59caf1561d *man/rvm-class.Rd f406be43ad5c7a6d4e2b90c46e42d2a6 *man/rvm.Rd 86c5fd418857bae9a5c736e8c57a5c5e *man/sigest.Rd 38c1b0a597898ffd36fd635af5df2d32 *man/spam.Rd b176c7c0f1edb61818e9ecfde276f349 *man/specc-class.Rd 7c1efb159e6b590600d84151e848aca6 *man/specc.Rd c707c7af1229bdfca87272866bb3199a *man/spirals.Rd 149b3590c24913c3718c9f1d6c265b9a *man/stringdot.Rd 5a3d623ac56f129716429ba87481eaeb *man/ticdata.Rd fa4feb7dd29492877886e4d86d0cb8f4 *man/vm-class.Rd 2a6f9e9e044a78154d3cfda5936d6f48 *src/Makevars 2a6f9e9e044a78154d3cfda5936d6f48 *src/Makevars.win 3b77d80677bb88fb39cab4a7d2351056 *src/brweight.cpp 048d635dbf0db99a0b707bf0a9c06984 *src/brweight.h 50cd06527f816675b128669d222bee56 *src/ctable.cpp cb1e056dfcc52d5319e71981f9c90611 *src/ctable.h 342cbb0568a2fa8f27b1f0c42542737e *src/cweight.cpp 0ede046d861731d10f965e2ff8f50e4e *src/cweight.h 5c02223129df9d548c614acd0593645d *src/datatype.h f085fe8cca3cb634567600216eb4aad2 *src/dbreakpt.c b6cdcac3a097202b2b3687ba9ace8628 *src/dcauchy.c 455ccdeed46ccda0958453306fe9a951 *src/dgpnrm.c c9ae627ea63dec6d72867c2026121648 *src/dgpstep.c 821081c5c42e2a20237abcced03a3a6f *src/dprecond.c 165209b9e9410785dcee940d35d53c05 *src/dprsrch.c 33b02078ecd469dfda0aeb1e5ba98cb2 *src/dspcg.c e13d4f68dd0e3b613f40066c47387233 *src/dtron.c f3c6c30f24ade3e5aa146d0f0a6b11f5 *src/dtrpcg.c 616fbd8165eddace388ffc7ffd90c753 *src/dtrqsol.c beb2c099ff3dd87e3474a30a49a8437e *src/errorcode.h a0f99b7568a3b1c4f0e47437b022e4dc *src/esa.cpp ab96f4b2f43cc0306c88547ab6abe1ad *src/esa.h 5a7166f36e34cc037b9c2006f8bc00c9 *src/expdecayweight.cpp 7f04e95fcd76ee21dcea4d7138d96326 *src/expdecayweight.h d16372bf79ce22a92dfcf3c0d0b769e7 *src/ilcpfactory.h f103b80f529451ab71a425a31ed1eabf *src/inductionsort.cpp fd4a5ad4b79ca119885410bb45c7d12f *src/inductionsort.h a73c84f3f5fff2b34dfc76999c312068 *src/init.c 76adf49038c3585cf216cd033a9b4183 *src/introsort.h 0073f847ac8606d19e03cb0eeb27e0a2 *src/isafactory.h 94245de3f9b29eee07fd1f7d8d8929cd *src/iweightfactory.h d2d7af10799002c2392f038e7d767c3f *src/kspectrumweight.cpp b5d07bb286e3767cda7a371c50d0122e *src/kspectrumweight.h b1a983bdf87a406584defc0fa332c455 *src/lcp.cpp 6de81523902a1d4dce2b38ce3d57ce98 *src/lcp.h f47f3118ea197009f6f0e12edeb5fc17 *src/misc.c d5d113bf04eb7759c8fd0f915dd24c64 *src/msufsort.cpp 82af93b02f090a83152b52239e0e3711 *src/msufsort.h 36b8004ade5fe1c5c2edb01cf74ce5cd *src/solvebqp.c 01a09c0f7f2fb72637644b3830b56c26 *src/stack.h 079a2f29ea98ab6f5ca4e814bb2917ba *src/stringk.c 801972af49fa57499fc3e519d202a8ad *src/stringkernel.cpp 1c19c2215be7a2b25f7439fc061f2daa *src/stringkernel.h ae74f6ea199b5d5b9b4b045afac5fa40 *src/svm.cpp 670301bb88ff2b0f28ece190a96635c7 *src/svm.h 5f5910aab31dc2ebacb4b15caba8e873 *src/wkasailcp.cpp fd6807b3526c7d5442f66a2660bd9e4c *src/wkasailcp.h f48a5df5ecbf1ac1831e5582798eb57d *src/wmsufsort.cpp 2694af88ced7e4391e92120d0c90587c *src/wmsufsort.h a324922cf3b84ae82f364be31135168f *vignettes/A.cls 73a78b19f46531a6e27592ceeea9c379 *vignettes/jss.bib c4c223d07206b59e2d43a585d07164b1 *vignettes/kernlab.Rnw kernlab/inst/0000755000175100001440000000000012643171236012670 5ustar hornikuserskernlab/inst/doc/0000755000175100001440000000000013562451344013437 5ustar hornikuserskernlab/inst/doc/kernlab.pdf0000644000175100001440000173701213562451344015563 0ustar hornikusers%PDF-1.5 % 117 0 obj << /Length 3625 /Filter /FlateDecode >> stream xZKsW0`qr*T>@ H" W!= H.W.9HcvdM"]elR*$_&6oǵqQNW߯3̭¹ iҿ׷YiW0ă ˸*2&_"h8ύz@uZ^l ]y%3U;CoixC2f$d1H!UilrC_wkf;Ħ^awgVHEYny=&}vD6eqC'RLwj{KiUq {M<#YMȄq/ͧYg֭Bu#nzsĩ:#耋'f/(t;Ŧ^=e~;|}xay3_s?.ԑ 0y|*~Ӫt>ɠ7qjGU.r'^:n~Iq*]UɪHMŪ9|}@;ʭiq<<`L@!`p0V`U5=nbJe+Ws&b)ЁH]:=t@YFC1aS[4{S~BpB,i RΖ8nN Ey@|RR .c{=-D=h(@6$'gf'U_L46ؘ}ABcM* i_ih& 0jCzjK*Le")S]fd!ao$A=/mu{)`nmвj2Ǚr>~E}݋8AGϖTQDPHr`aճbUp@*[7i&̍Î | hHin 6 UGUQ]*1;pFUPK=5#Bi '%KlNgGD=_Ot\*}9:gK' @wS>Qr[-uW_vքګ]9UV*a7B!<ȬL4]HOBySICxǻk:6.$4fk}r?ҁ1S4`&`NUkϒE͸Cy^]VrzG( ;\hlR m q<`'Y%e [. DXqBGipG"Wv]8Z{l xŔ)9c6T.); 1-vL›hOhó \h4?nZCpGNd:6/!,.TEs ASydDn2~^:VNjؒöA c tHj[#ODߥY$ D=e+V C7 ݪ߭oVߦK".\5] gC!|F !/sALhE@6,ٛ7WH^Ke&.#JxA$`mfKC:w ](cY1?tH~ɖ@-E E82QNp x9Q)]ӝr)A}sF'vGV[ ' B<)s:~RvTq2-+u-汬}7k;E5vW`U:&ylyW)U͹~iP㮊 އ0!\AF5o?\8WnyԘ?Բwᗏ0k'S/UnE.XiRVR?M0P2D6!/ o7zQ1vVK1/;u,vgI%vR6?jWun>DG}pAߜ)Q!>?(̙W2鰞7g魯6jɄ`/s S⿞KJ.MK}O'rˌ)Լ,H꼼hgO)?}kUpYw*`"n͐H+ ?oP%!X`&ŽO۝/FRǩJnVm.%@z.rQwTu)#jY'7GJ AzBTJ 2kx D.KhTsaWq |)ٱ[H8 <\kV2, RE3S*)  }2g3X}P2/m?zf{;;&z|i>\ M F=m^eX2AgT=?]5׎b endstream endobj 150 0 obj << /Length 4394 /Filter /FlateDecode >> stream xڵ[Y~ׯ`%܊9 vǑ*TĒbMpw%*=}`K)\a4YM__}yjM1)Ulr{?)I檌r|4(W:)`FO;*oVIq<}:&Qb2Ua#@PMUͯ힟ώzfg#of² RcQW5,֥U<(`md퇷E$v-D,=_HW{ϟMC |o鴃ɀz{8X\Z^q~(ю;mktJZq7A@QG i%yIo;ʹVe*Gdj毡0xtHY~F.}F,ci1EE򼴛 H%_FEQ/GKح:spFS!=Kai lܲS8ۊ`PO(9SdΏPI<.BLbWg ^Y`h/_!n%k-ܥIw%x؎:ȳNApvWmؼjgcPA5CBhHȝ5I*PAZ H#")ۨyFodM lD >x:k`w恙cx j8@9$~="B)gzj#"fC5"fq2z)N*ɋ\(Ѵ_(l7*3I2||dD$EJNar(q IגYE" /Y˶y`lNUf;b#to+vP><< E*Jk# j2D&J:ɐtq!J >lNAdf-Uu teEy:(ͮE;hM\$%`-ؓu󾶩V_ފM4R%/ΦZ 59W4zwN& N>9BzO֩0X~w_M'lO p I|$*FL|* œwI3WYQI'TtŅKzf#%ZK>(dLFt2! CB`dlOjVYxF5.0xT,= ypv  $^Tx$0rb-#\/aox¾l w8ѯvD['kCVăK؟ƪՏm9 D;ZɎQZZ!0 u;W/%qTfS#" /!+B~=dAE+a)6MR-roNڳ\q|)br`ĎZDAR<h(.7T\*NŤ)d!RckkWFk(#,Ei^×yZ6~wIhDl%vq ˵8+bv;9Osr k[rŠ al#p.xs(06j'X310O9|$dڼ|jXxeJ0${o_ٮ6"w0>DS% /t_:\@NG.Mr#դ{{;::s& 9ȴ4M\Š<#8FGR6hSR) 1켥 p]|`̞b>}79B-]ۈ*X;CẪcǽ,KJ`YǗ׮8Ja>2E@஠"YSlDMё4h|J[pOP$3Te_v7a)(eIJ/@e9h,}i͟0,3.WsWD"Sp"σ8r\W<.O=0rf["eʦqy~?ƹѦ*-F-oFM]!j%_{'W,sVYU2Q''!t /F q}QBȩm INv\"JDNln<&)äb1K=q=m~q~MNL@ps]5(;wEfa_˲`ﰂ'XF&Mt3 >Tj2;Rx}{m9JsP/4[qOGʽ~%#Rq^Ʌ)Sb!.xo$ ̏ʺRu1j}0#)M'S좠K$$(I(0Fd*8k 2p 8CV~|bsma߇Nڗo/WB%ohNX[ [ wM5;{DM}*B[25<ݴQhvlz& M&KL뎅QyX#z{U38 WX.n;,7=z\1hйQG)=A;W"4J1ͫ&Kh(b0gՀRJUWo%f2%Y*EER?P9-aI:J!Aiu͌*t̳tQw>Gx~V 2A;~T1A?< mnA =5Š;SY?|ʹ7v_Ze:jK ~%˿$@I+%|n)4E;_DW$7J-aԂ6~KJz eKb37W_pƟBs.,)R5AmשFUb+eӕ"Cvz$l;k ,TæEST9`GĬs@{}^*bBq:[MfNcNR_\:o:pXj [h| g(w/bHЧ;fr]YT@K~N`|nZ{63‹}O}-5ot endstream endobj 2 0 obj << /Type /ObjStm /N 100 /First 802 /Length 2469 /Filter /FlateDecode >> stream xZKsFW1.W~\ruxH@!}dLZFsP 1I&e2ǜ`ILj?&eb`31 $S1ΔbZ*,62Ղ%87x Y#Y@Ɯt3#2XWGŬ`AFf5  Ec ,≓@)PҤdxC+j)g^ZKIE: K$1isЎl"p<$XQ<V$PBᅆ hFvm +(+e5(`商4DXY dSgX :XdF2aAeQ;ύ$8h?FY)  Bh2;𔋃8E`2!RUIxؙNRE<׺HfI)K?`$( G{!|4w,h`h`4FR & ^ `(-f!+NYCaiUSsYlwSy}}v{ Ne٦#J\،oN./ɽ["<OJTݐ u{؆'KrM9ߣ?8T'e_:eXf2RsWO?ש_6xLqYngqNM-7/zoLv,=єVmY'>?ۮʚxnf. :d7i~RU}ǵXf h *m%4^܌1)#IsO7jw|ˮ^j7MShV}W}슜$k_[$ecke|TmӪ$OH/a菕4wiI^ --·CoWԎƥlXhۯ`{C2 dA|٬Rw1;48v(w$g#-Ax9㦦d]{ʗiտ"gŋĊ_4+BI8I]mzfx<-{)+Ll$ ZρQ]7j+ҊWYc7"xVX<J"dDȩQ1p_ BP(8@}hyէ1e>8ټnVzk0$t\Ӂk,olQޮVnB52 dF+T"@P~tp$Ȟ">EMm.-5NC܊V|M­/>dԉ;E ٍ2*&y2L'3t:әLg2_sǼ&̠cFWL-27,Mf:tˎ%9Lmo,r/qtL,D.I1%r _  <5 ) Xӽ']Ԅ)MFA(bX iX?1 36ȬWumZJUܿ*`)U|yYAԓcV(.18D -pDWCӨaN_IoSKtQ2@}eTHH}Lq34G&`1Q;u}ڎJ0eȏm({\ܠ7V=z`*>+6U~bUU{.}9VOi12E"0hVxӞOIN_e^Q)/|9* FPoif)>h|1x<缜)@ŅHZdߖoEmT?x?UHx_&P`ġB4;y|մU_ԮӶb(ݠ×jmU%]7.v,)Z"80\XnyˮUݕ߶6Ճe˶u7y c^3?8 w> stream xڭk #P/pV%zܥM |JAkkmlɵ\6HI۠a->8ËExW*4_?.*J(YU&[o?/?;%7[ٜVƘ? mqlw<'o\k+p]v?u@Фi' xˆ+ب,׻,_ָ~X6MeqmbؽsNk~H]ky8K X7-ahuw yb$\]o8䌄4Y@$T 픂AD^C7rrBG:#L@dGq-VIU k}ibe\ڰ4l'o2'$hS/c;pCl{]"qR"b:Wy'9(Rߍ-@053'GYc"> z.<>q1"8ћـ-=7= ].lO<_<&5$&! oLNv%QQw 쇽O)aULV.IUڶgDR ҁD r&6`aNBR*tyeJ,K]Š}#xAXC9Z 0 P J@tѷUAcwwԹF@d Â̼%:!k诪yfycfG>+h7alpj3̚$2E:Ǭo,g6JaQA,vX5[Fz<-(?mQ,tpy=ޭWY(%B@eC|NjZegEcR1|7T oF>zW:, #SgVQ`DsnC2#vV2*lRkPqqCt4H7L5dw|Uet{T: /2)HXEl VlbHFjbqgu =JXvrc/֍hAgBW OBW zk{n'=Êg 0s : 8̩QCzA"qtt4}N5VJhc--e𞧦2/!ホyTx!aW?礊]r6[Wo 9a5[, 3k8(it'TJRLhOe#G6l2͓& 6: wZz]~#I(v!j:MFf=<3v.An늬 L!쳖MV8RhdjO1bX$>9z:NWLl]RW.U; UN{ٯnb"“:t\2Q Nb #oK-[Y?- 9zI82M4^i0!jp3B?D={=u0e&!.Ulaa8!-*e".aM}|ع+9, ٬떯⊖߅SB 6N"bNbX ^gu'F]ٻiIMYݦ9x4b()8^~'| "4S , ;r@Dia헃dž:{~5+q3!KSDq1UQ9$ n>\cP@%kB,^æPEc}]Y݁'7p&}yVV&?9Az~{hQI!(n]_tbV)'{kRYƑm-Ȳ8iLf*U^/P%r]#IHq~pP٠HC=5s3X6F=; pDEwL]>AzՄ7 G5*z#*Ss .ڳ Ql.I[Q] .m [IssH̤ nz'C/YT.Y iGZm}uJ֦E2ptFy%IBuvX8M P@$ P8m;(M#!=CdCS޴>RG8ѐG0``=X=JHN] n6Ik v$F~{0x7T"mW5yYe6!,WAs"8頻(CNNL)͢pn=W1{|Q8c셋?}9Y{5 ]=I]~/<nW"K)cݎ=K4AV֝sv#8]˜|5Zբ\XyTC^>Ac r1-ʒ[rR&K\]bEBFeTKft,QqzzE899Iĺ e[AUS8!+0T80&LǤ0f"Pl%nTbKZA1Ƚ5q;y@䁓jKFT5"F ..TnVz&݉gmJ,U9Z[ 74ީzi系t*?ѢNͬ=)ʑy}#~BE| +-sF/PF#ZDp˺%{t Odkc'㩗0kRքdt||T.'w HwDlDyrp?[3k1Z;-toOt%%jk2({MW˯/b:[V ?Pz{OjX.jPQFQKk&rġ*В$vսlpH}VsrBʙUeɥs0 6;z#AW|q^Fo85Z+|g/|ރ +eFל++&3 8`J Y8\s=W]9@<\v2 }701ev.IfqpQЇqaxԷ)<,חQ&ҐG]^nԢG ?"eN#Jz|>I_Ԑ8e_h,֤Y4z~uk"s HA\N (Xu7{kIoA[o3 NE\&]>\%a?k#%t(ȹM8Eb"UO'A[%'6gb5?tmrA\gRZ?pܽnO&&!Ϣ|u(g @{l_2c%ݡv.j#yL7Bqb1g;$q.> stream xڽZYo~`/>rƮȅC{Hb!-ySWtsZǮ@3}UUW}u4}7oYT˫Y[V-g߼)h.j] А/L3/tyVYT ,#}_x.hqҵwXeV5&P7[^*ywW0}(7# Xa5pd)dyQPS.nmZh8[bryEp—Gyr'[ʈ6@r%o#h-I3!|HF_A U[ "ZqK!=I{lsݑs;$a/nq˞(*;s*9;? B2Ofup_+?c&ܮX'\Hi@zc`'@I[7>Ⱥ0$wL/nI7<'Āݩ vdih۲VPp[ǁfQ bD6~K DʖY=r jOW#gn!UBն{n=d)l^,|Վ2Ppkr[&ray4Hn5Dꚢ%JWh${=s VZA[uc;<)χveLQd4ӧٴ*_5*vܡO4 ``0d`@wD ?آF-Dh($36"B*k#^[KKu;3qU?ZQp}Ѝ^h8[4)Ô%mg̎A}L2s9>(#~1Mt+Tc̃Wf~uxl* 7-=[@9Km46!jeEM4ϺrFt!Ánu@) Fd:D\Y-(X͸L@|8똟k|LIǕiwSkLpLkJ\/]*? IƢlۤc4lK:eKD$?!J=2iHa\U {܃6~Ťl"o3.e$5ɵmr̿)5vl. 8+"<u9!vL*$1R Z;>nI]pxRaq>"݁WؾR\a)%4A ^ v<%&+"_ tZSj*Y^RP ʦԴg J !㢫ᡀUW`ݯ*FRaG]a5|cW/ @˶7R齍[s5O/FeEo]hv{لjA!^s9Tde[a$DxBQquG?kSnT,>s}5Ch%7c2Ecv*zMJ6@S(TҭF@vQU!=-XU,د&kK/vXz| ǾRiaNJмU`Iax`uterƄt}'wP\F۟Qcg bC|v43= |jFS{ 4  `4/ i}ƜUu 8Әl޶({Nt~u^4<ôcc>C`rSi<#} 02?}XVd,NsOiCx_T%_H_o0'\YXQ& gBF endstream endobj 211 0 obj << /Length 3550 /Filter /FlateDecode >> stream xr_!@!I$M;dh[Y]\Z@dgIl pppdY6󫋏tvV4zL9xJy1Z~5;:}w|-@s~}x1=`C3 8"{~_vko+ nθ@/.E 9l[*^lz=mpt:q˅)-bX%uG]ʟ .5Hv':k,@$VHbOҵ-V:/@ _eEreLTu;¶P j˳~FU5?jZ7BeieXHBe٣!c6S8zB)[OTs~?3nWλN 짅ά+ z^Lg,*+ݐ( $K[uǿf-yTɮ%!ndfïw \\&î:ldeʾ-HFZе*sīc ^G9^ȳu-?S,by!4@VAQ6yP!¹ԿS\xSvlU?c+U=^\d?0g ZR*jTPv  pJRMR&#pmv 졽\RIz3 ?ܶba`K|$ >?x`}d/M㕲#ټ-m?12U5DD~Z?㱡5_7a0/5EubF&h V&qMVy DҀ |dS:і݅͟7{d΂ظ.h$nm#]ֺwa2sR@cTJ22Qe BAjUN"[4.Vҍ,OSd;Å\;)plG9H8.0pN^ΙL݇GƥuJa0tc7Sw!ci סh.3=W\YFe~0L 9gep>?)R趱H Di !<87odtC܆r#Q͊Ku4szpk -$n<L qTgMcon"f{,pCA4_QEpaM-:L*5@,N?5ɓR7<?$,Gb̭HuF_V+r!= th U Xk: ;NNOV872dI;ټ wZ ]Yh-j /;0"$g*橄|Kɻ5M>hŁ}M'q$&t^!Q۬q&Bp٪f/Ɖk a @v3mK`p%a +&(IE{K$#b=5W$5I͐cd xwW|G/}?]m^Ϳ1eӉ=y3K:=LEډH''+JD<):ykL k~pZ{Xp&ۀ,=D\v2kdh\4$|?9lR2BI;q=Ot~ٻ%[Gጱox+R3(iponad `b j+Cf]x<>?OH?C_D ʀQT/)0 'ifh_T&KIsl[4| Q443iB7$rpY >O% (N3`$:z7pK=T.!N{fžY=j `f7T%Ha>Ni95/ÿbh\Zk;B/z!iM0mдz~ÊՇąB/têN,ÝgN#دAO)֚;7Aև)BplF>%T%,K"qq5KZ5KX> (zŹQ ]Ӕd$OhSd  D"G,g)[u+>pOB0J&ۺD uVN۹>~ubx>uN1R6lr{)RM[yQ͏D=+N8Kg\p-exŞH[/+O?b򝗗`=@"EGI_" =>3EQD40. FjZѷ84|+1!wٓqhi!y-I/6Zx/ZFDk)RjHslN)atA*U ]vȨ- TѽFV0;]+! d1yl.xY…ud*:k̰Ӟ,[AZ >]W@RA*u/'E !˪ɼn4;* q7֡E[&NYLo~Y;&}je{d$EHTLi3/] Q2>O/gjȵt`NTod&0CB8SU8[|"<"ᣓP3 k/c¿R>:#"8!$斍]/чPT)C) t(%س]؛d&2IOΣl<\cҀ# 뒮nY%%/QBp? 2NURRas> stream xZY~/`qW7Xq5 ,yX`ZǬ("lv=#gHͫX"W߿3l)JWJ=~^;6WpMF|Y~g˨0ߣrʷE4 M#̀\2#"?#d$* Hz槸/rtb;109|R%H0'l 't242i,3Ue;#۵ool76aM6R\:RfhwgCkE&*HB~-i-;iGJʹ cѶ&6>Oįؒ3)d:7f~& Ky` U2k) +w؄Ynjԙ2t]n:oB4;.1jRHC+D[a&@.ӣXaqsʟ޾ÂJslB~X+>|MHڰ霭.VӐ&4JµͷnHySX rs÷qw#1 {ݺFi2uKI7XKF}rc\ wuw_#X7'vv Y֡_ީ4 R9r${` kyrRKFu}Z/ |Es盻rW8eO ( ])YQ/}0mT*IJ:p7gB u⭆_`gKz|EL†l0ny`-gAMEquh . D'jʧlJY00 kŃZOU*ʜ:kY Y0^o}B Lay=ΘL6wx+R@d}_2e ֜v,ddENpBYԗ*%?a: %Y΍Ah [N5j!dn¶vg|ӀJŠƛe1YO i{Uq4#_XY/( y>HJ.I$ t`]/Ĵ4<ٓ{$тo/qz_R/'Cu"aӱSU2M z|AhYr=nի.[ eYi:Ͳt@Q~M 3 p*?׾|GDjAl}z&zuelcs A6&u$E;( D:RѲʳi ~+ʄ 2a۰;?Pp c2jYe>\, 3͈eV6Dž꘸Nhs눫NӀ ʫM(ؒlɹ.H2Ic P MD=lMP}2իd$2H&s%yOni(pyy}VLv֟\%A܂R7$u6VvRWbT=;D:5OH I'E\+iKKhْM_^Z7YZߦZ+IS@,ζ dx<,~&x(8즸 paYn .FrR7Y-)ooMɗ7MCD%u)pSJs)RluvpSQPҨ\OΑJQ:5c, *V83beYEH>9)qէ(>Yy#4)WC -Ė<}C#-^='J)q^IJmɠ1OSOKƪ,[59쏭@-WLBTDS)q4u`]'ct kxpBEw6k6Md,|r%KXW^"Rusz!/?5pvlwI3w;dY١)>wg .emb5~:&N4[w8![6KBY'BW mE[0.Xz?둿N޸g|cz齦.ؽpS+ns8ZjwVS:a$.:gSW^;^]܄iһn8< wz?Z2ԉ*nM i:<#M*K:~ic˗I֤_3r0+ zd_h _._C endstream endobj 245 0 obj << /Length 4546 /Filter /FlateDecode >> stream xڭ[[6~P˪k,7ޒd7٬kr*[V,=b+=7 NeA\Z-njy-ڢLxsЭ*˺hmxsiB/7Ow#Z[ O;7~n=6v'➟a ?.~~@bD5е{PW_Bº`畩뢪be]-.Vmi Muc 㶇X0]YX[G_I+rj@Kp zvb+v?wdn?2/y@R2x{@3 Ǒ{W=w l#/aVZm)g?!=4ػZv7,p #7\ҫ&! $$ɎX_ K $?5_y?γ]]ba# m a+c!olт?êpp# =1ؽp)i34G8Gz@=sas@&G㻋0N:"-i dΏ"-,,c)TDtl{(\6[+X2k# ʘtGR):K^Um o x`Ju~8yC|G|hױQąڅ\c*P#{e2!dC؛xn8e9g0S+fЉd"z*d$xNĂHA`^@ssS5rxwmx < ^Wiu7%xS@hcsr nx51ʚ O2*927':$!|&Nl;h(Zp##kj`}/JJxv{5,O RhjyST߾U\fqDx·b^xXЯ]c}Fm@?Lyj22zCMt2ދ҆Uz{ th]tHd 2FAͤXlc GF ܁N7 .:_i< Y`pQL|-Qɟ.9,6&Ⴙ&$;ϗģ=IO! on|)3 Ï+A?*vkwÉ 1wXky@]z{ 8a=c#KeUpl>)Pנ荆2z=d$ &-7Dպh Y%KTր'G]8Z<0]m*"P2`C~4V9UԼ8 T9e6AMlͷQ 4.՜.]՜ 6mjoB>EP9"7wo#-v*m=BщQXXM H۲h`2 T ),u0HZ!R3+_ u JLTݎ_\-~!򔱣rP2|¶OH;@pȁN* <^unu)/:'0ma3DT]2_ EcQ2%k/Y*kؼ$Ww!2H&X0U:\aP99u3iirn-޴䶌E1k0E(ALG  'Î܂eL曰KTa2,! 0unAMhcܮi+"X.mIX[x$Ȃb (z Ɵ"xVm;HpýC~3y5;vv'Heˎ/3:+3cx'g9hmLY|lnx'UaM)Rۀuv[}fy 4rc(na2cIǗ9j߇Hٕ i/QnV[w!^ DKu4l |) 8c Nkh}}αθF9ޘ`^a$y 4EY 8b٘љ63Mm<z%#w_ǟ3! :Qch1D@H֍,VM qbu⽭A8`Nԁda?&k,=bf-]|`>[oO$.}j? CVS]`ĝXza~{7+iz>1@@,[eUƸTE3 amli7/BS,J/LSۘBr޿gpBH7wmI1~0N];oc]Fz쁧"'<@Y(&zՀ:9+#sњ~N*b>~u b烬 EU1k&9C.J3QWNHG/KEMPY}AMd?fiS>3L5Y u =liL`6wT4SНdΎ}"l,TFZEjvuW)uX;t4p{fj,u;SRA%;^*973@܍Z2 ͬDŽPR4N$3 [?, }n Hףy@?lVS׿Ĺcp VQ@K 9B_?xva^0 Y8 fi8ɭ_Z +X0~uI<Fj|`\!7Z\>+ւIӤiD^;0/rc4Nm g"o}_ kw\h5ġ[AI͵$;\y?030`Aň!,Lu^,<d*m&zo"oXHse]$p?aMCPsr cxd ;9E|nvW3 nn}HUL$g[oC\xmQgVS?n5t%VN{e.8.6c bvvYq4ȵgS0XX/3i0TWR:2QF2f"u"uэqvptl+ѫsR:&UROp*G,n'Iammxȷcռ2>KpSv!Oʉ]8^:S\b 1ÈXkI͹ys$[hJ?>vv ku۹ KCEM[Âb$QThU˻+DuŚU Z9  g*)?8jI endstream endobj 280 0 obj << /Length 4828 /Filter /FlateDecode >> stream xKJӫTB{z* %I#.D$!d3{o"L P(Bq8? X 7O`>v.mvAuah'~q W:8P!18{6^\ Hj\נAps`QOqE5؆2CI*GQO,W6hdQrVH$~T ωօԼgHP=!И.r0Ŝl5` ~4sSUޔ=0^ink&xGJw+@@=Ҩ(, HP=F|./ =Y,x` u 5iR kZ!|%%'qR1$i{ͼG}lF!J(褽 fo.F apY!gDLr<0ڧƛ}V RʷmZnl[%=s>D?uq~_"m(ғɽ.2lޮ&TN't% OG\rgOH'쒗p$ݮmin6~pzބٲ̡ V#GQlr֠dYHd-غNHE 0>rSE[Pгr5Y*MMS&&lDZG(` d#Q19Թ9^H_gb[yz^x=3 Jfyc5dNй ĭ=8ZEaa?MT`ϋ` ~B78-"Y%Z &)ւ9h~=8ڢњq["2E61\lk7xMfj U#~#Ma~4j/yWȴ[:, }V +;\GEב]1F; +}-|5l8!3+T9)bIHGSv{a^=e4(] %)RTIq*ʐAN6D% uǽp0aoeQz"Zdv<wVcv>$06~ƒ { yU/}H;?#%Dϋ`g 3 ?ߤ"eHP TRQӍ34_)]@(VYf6_CQi"ٽF_ߔ4!xNmYzNJStH!cBBDj\ߑL O^{j}LvGkՁPq KϿKQͽe(`ҹ4qKe~_Qn=(@\z#tOEM/y`GN+zY Q7vڄD*@'~nfO^WK6^ qpuBK< FGN(Wm.*h@p}}ýGM:`Zzz~fkC0\$/EօƴxrL㼧1 ÈS ) eM keMJ%G4DtloEq1p$A[UlBj@%zaZJ@v >P;w~ɠN‚ֺ#J,!bt) 3J!4C̜50!hBh 95$$7jkH5.@ȣ BVC_SbvΪ?&FsPa_R$ykbV͇A'̂,]VCxmb % (L9}.lM(| Ӣ$y=Iޢva6!8,q9AYor+K[g,Ch0{A ]}<69S:XF=:%.$FԄ- '=Bz 1} X> 4z h4svJ WY;ĩFYw@82a60[5X`>xXwc& ĀzL) %PF-WcZ@NZtOF^͆YZʐǜUA.YnIUsDn qG׭K[IM7x#xaZ}yH 6m"q˛ &hVQL%eɡta-4$5nInA/ﺘ&惂k:Fch>iGlvð.qy! 4U;=uvʉh YWu)Jӗ96* s P-u O 65IRkt}x>SsNǧ;w5ьs'"uQB% FA.HB ˸!Zp%XBՈ RRXH"7%URB DRO@eap*D}xCU.g` $+c):XSL_ڐ&C.5s\ݯL\Cb'G鰱@|:] B筰vdl93MM_pOCVgkL&MM ̊J.*3Y h'1$ tL~|bi(^O?&ƌfFz*SEhw >KQi 9}Q:.=nC_)qa\dEA IQ3r=tEԹ䪝Uihf.4[ dvVkձ3E{%zS/^|;~$stHٱyG1*cCDep]Ι# dakVM⛫"*ˣVXq>%/vKqR"ly\냱"JCoȘ`+%8jJnn-UUE.n&tCO1هݱ0!eksIN%ejGmZܮh =zkU7?e$t-bIxVH'#gˬτm]«Xdc07 PγH+ϻxG+9g/k >ӂ}? aR58Qw|8pDι&j`d0uA}Zm|^^.s#fA(! $Vn? C$$ P4C?L H \d*O{.i잜΋ReE _ˤHrBRkޤogtÙR_5}"e)ToqЬ:0J9xyFo{ʋhSq6/p\CsPN +*mSxq=]dc|l' {{.0&2Mv3L!6o߽[[KNECaTO[F50 ǦսyHr`xB4ͬ@ x!/45r,cv4𙁅aˀ,Bͅ !(HT(Δ[R4J;/XT8Ւ3+XwwUx5{;z endstream endobj 178 0 obj << /Type /ObjStm /N 100 /First 881 /Length 2372 /Filter /FlateDecode >> stream xZMo9W8G1@&A6$86H${߾ڲd;QlW*} M4#t±帚1F?d՛īX&SVSD;]Uz}wA Zv +])ODRITFRnWc' 3|nI&؁ ؁lLxqAT&"D<WvNTL[xGgX I$8IAaq ٤"4f+5%+ZX\4U^(&{NE'ъZ,|O(Rw!k (pBB1TwȠRPx?tn"~gRaJsG"HS"(Y5%uQe'B&)JzS r l fkײ!O)'zw.B29@d<6Lث-:ή0̵؊0'd1_6.I?Or (k)^*@28n ry5ϖڼ2ͳOL6[/wy|єOjq_@nI $. m^C4/.ެ/Ib]vHis(~Mxie6Ѣ2i=ix0Ng~hO}O{*hmQ.Uu;[E_ l%~^PQ~XeFbK zvq;T[PbC> ~ :8+t'jk9)Qn'. |gF[oI\@^=oMZ|+Hr2L 8} Zv:}lM..*p"3a9W e22t[*r9bY E> `D ] TPxFLpծa;z*/۽ޕrsFtGWﯻ"k;╝Y| ͞;2`YXteaNp6!팅T: G_mr7z5K}%f_n#>eQl{bbz8N{M̞wb.~W+t1+K Ti6U'oM6m'\)@.ښlR,dQӏهHKʈ3iP|QnM%X军f"Q{I[1=bZeAPR*8K1U!H;$%Ycx54S/ Zذܓ+ݦSȀ!hʀd/vYf,8^aGU#JM  jٮVa17Are s֗p0p9=App_{Iyx-͋owӧOt5k|}a۷vgyf{G4bH-5뤌&u9O dlϖw5 o ئS_O|ʓMۧO)sR^Oק>kO/7.R ǼbYZd;%J#d&bՉ lS",Pך;I]@ 6#$ap D{[H 숐B@O zH!a]D8ҳz=@bpeP[d~zE1+:; a#9םoa.3.lOWv:?6zh߲+&3*Q g|ߔ!_<$\,; b endstream endobj 310 0 obj << /Length 2488 /Filter /FlateDecode >> stream xZIsϯ`"hfT]N$frDhF>8=6tlJRzy 5oޏ|}Y*$oRqi/+ ƳMn1Q+3ڟn82DQ]9-LrW$H$tnVs08ъIh$EjG*XFBQȤqY<ʶ>QRQ ,ځX{Zy&z=$Y "- 2zRlH"p9l*-y e dqfG@vNaOH qvmu͔&cmuUIm2i%Q^nM -_ж*[K+MDBDYq~Zaj8nEBĴ^kK=z=XH;:ʐ.qcي^r6.=+VtN4z5{tUEPV,ax;R3roZ|C6AS{W}zE͘.s,pxBG)&isgaE;QA[{EDGYb쮯R{Ϧ `֡0%n䑍9TPY-3"e#oC4 Ю7sL77*qbł=K;m3 5Q:bk"Ia z:aQa KeͰ-PȞJ yt`nXyeBZ2iOA ԅ6.i[Uei8<2ٙt޾ͺ]{z4.x_ncS/VDgXcxޏ Bbjcl+{߅2/GEp:~:A8-ڡN:Fsx:WI%+ P+$:sĈ}ϯY Y^%8t` :6 6{vHqŷ3ojAS=©[B7K|&?%v[xTERF|u0p&&JLC^CsD{y?%Rksֲjݍ,rYrbN( GsTzsqm9S%fU+> sk>=ir{˒٩_׮<ڻ/zP7\Sȁ9Bwf蕻m"#敱1Ȟ=cĪ̻:9*6H!*9wN0O=V,x*嬨xkȌ[oJ]Z1ʃ%q -]R^ZK~'|,B&؊ +"xF1؟6 G( #F=חY5 'Ic]V$$6ʔ||@<-EfsLcE-S4UDlߧeu⒪k&~s/`tI0񭗎+G_s㮔SASJU.mRi؁1!{ˋvk k/)I:|oB2)Ho  endstream endobj 319 0 obj << /Length 639 /Filter /FlateDecode >> stream xڭUێ0}WX}J&x\BխVۇ]B(wCRڗ MLf9ۃ !sQDER1B,ɢB'b2?|h$ ** hfq'K'Q0'FPG)d."\.3(@e((IrQ[97;ʿy_0OwT+GbCϙ/0UF6;gѴ:Ma po|F寈fK3iT]m*0֔ߏ}U֐Jl9ر)rt]Ѝr 8_NlZ,-uGUGPu#T)% endstream endobj 305 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpp62ULS/Rbuild220d320fe682/kernlab/vignettes/kernlab-005.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 322 0 R /BBox [0 0 720 720] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 323 0 R/F2 324 0 R/F3 325 0 R>> /ExtGState << >>/ColorSpace << /sRGB 326 0 R >>>> /Length 60350 /Filter /FlateDecode >> stream xMʲ,4߿€gHp1pN"qٙUYeovx9#tQg__e;u3ϟ?Oc O/|>?Σcߝ $3^rp]NeKw%ԘIcΘc-1LAߩmBв= H+Gɴ}wֿ{9}}׿߼w_ ݷ ~[eTۯ>UwLݦ~w.mw_A9~߳nkr&wg?Ӡr{ზ6O}~F~.Kn C͇|]8=C4]uktsMm%usA6;]% د ^uo~m>PZ~m/SN4}G}zl7)Y1mӑ'柿^ifA'>S~'!}'ڍ/} g(vg_e=C;C/zbwV^p#w]U׷3L_?ns煼W?^§o??ȿsw9ٷ_wV w;k M:?>֫VYM[͠h/gʬ?Ǣ0~Bƛ_}:ο+kkUUyF?x'yU?aU>JM][wQN{cRHBeúۉq;:WK5/nGV*NPZb]rjpP.NPZS7|8jJ%3˷t!^tk4kot1gH@SX[yg:֊3/hBq !p[cr7C U>o*t!->o*t!ǯ\5ޠ pLgV9oۨJCbx~OOCz g{]j5V9!o§e\oY4 ;pqljB h>/~=j >oAk&r>CopK [g7A!+4VIҿ%9{O#6J$Zk@'ֿ='w}5*\aPa7$$ZUy>/ #PICyG>m7l\+/|-<( 9;IZ8aK g}U8а!T:%Zg=-]+%Iji4y/ oNI& z>\:볽]+%Iji, Ծp*\h*|:[ +ʹ/|(t-g/I^k+NḞOjp8**t!EiQp +,q~ǐO#UpP93(x`Y~ htGk>-폷d8Tѹ7pt*t!ׯqUC MГ \qN7XG !NALh1BFw8E<=U$jE;I\N^78 6J$@UYA)>g/IV/NƒMoX_G )I=ExC6e=!'LcIJ!-BSIs^R$ l Jae9{IZ}q4<"s:oNIϒ@Ȧ,РVV8a;v"^'" 6J$OVQHkkߩ\{k$ 3eQXY}Bwi4rOmV  6J$A($kpH8@!I^C IPhpH8@x (A3~~P 'Qi_!I^C IrPhI;; f\_.JqW 9~߅boTWe.9rFR|?=:HG uƉ)lBZ) |p5 iOg+t_yC iٯ!pH8@ ·[C qVpSx/X s@cIj>[ Tb57g. !_ (gB ҂_C iٯP!WpB\pj8$pð<=P[«?`RK-u=jPS iMدP!F*A\Ֆ8[5~7^U!NKi1&A\j Tgw}MtXti +I2ZZFVHF )I 8b'-l+46V$'LI:;WBS-;qu0d[A)>g/IV/NƒBR!/v< 6J$%~<-v`ȶKRXY}nV/NFGA\D_y:lNIK3o~Z줥m$Y8aKU+F )I189Li?d[Aݰ.'LI,A\`l$*? X|VH~3lNIkdF!- d8$Z2PKð nK~- (]gGx-- h]K>dʯZ2PHK~- (c=֒O<}ra eqkv[= IMQYκʜTb̃ Q+\߅NJ0}V`i9Y@"iX#i. 3tɝyo\1cu͙ۊɯ-+z ںM孥`MZO#:Õ~8 (f!3$&ᐄ$8xI0L($:xI0L)$L+XpIaŷxA XAOB9+I3XZ, >s"+A0q& W^fXHcYlnv%zN6l WyK'{ H ۔Z_0fYBz;o Wy 4 pv?x\+KU, > 6TQ'fz^ $`ϑȡ 11Y^&$(x11yܯ11yگ11imֺĘdcc_cc_cc_cc_cc²_ccҲ_cc2G117 +\>xmQn\ ffLkypl/8u'3=\¹&żWżKoOPR+ǣq5hIqck{W31iG /hLe/rN<hu5G1Li;FL_h YqeLRCY|Z4q|ފ]VI7s$I70mk*z݁>Wf_R> 1XF_׾yV㪫[&_I/s$I70~kRUbZ i _/Uh\7f)(I3ʊi-+c(`|\h\Wf]OW},⯺" 1qw0@=>+<Ui¼j>F]B.sxZ4m_F5r)7f^W5r2w\_SRb^+xLbdo1V-H̘V\#d2ʁ)Y1/Sm<&1F1-T6sTWż zy-\?rs{?J 9uPK~IQ̋~IQl+ۼ=Zpk'}~%a$(F*Y/qV}:V8#0z?W|aI1 V 0zwG2[N#0S>3e5޸2S*8ؘ>p,i>su՝kR+$(tXucc :117+$([dya1Ě SdQ>2u~3HLjx_ZøL5+}$5޸|1QMT1) 11YIQL &pLb̍BU<&1FqP`rПæRpMԿr5yqA@?>׬2S皫j5*aR,ݟgf\sUQ&Ŭ@лUl| PXUIqc{}ucnp͊+3ezj\2fQZ'|&3֫/j>*WGwc3w35%2iQHD1PquS\!s/S^}Y~_i tl0Z5Y/}VI/s-{fw^bMī3 V¯ǫ}TFCM[&_I7s㙟3G/&eeK]A7 i 4 b1FUIxǜ&[]G´GƎgf)=fbLHA %u@Fz̷,864%qM+3deU,O9olʯ/Nfzj[\b֩@ۂccNsS$(f酶 :Q7k]g ׬u#$(f11hA٘Y 19 bq@ccs$(fk?xLbtMccB9|Yg Ţw״Iqcffb1P7fō3=ܴ|OQw)i͞Ib+3ezj#\2w1¤;Y1`~ߟtBF Nqc{}{:R)ukqcLJ;2}SUxZ4늂W˜߃Lt3G/&eZf}a5Ny 4XI_G諪£S\!s/hLiw:ƀz澗nLRgJ-d2gjrIij*atWf˔k#ײԓ:Pakgu|%B}/ jޅ]qQy 4XnK4LCT-d2g5CMI͊YgFeky>UAU !s9jJjV̺ mD#'S`f!\2wGzi#\b9@cc΁ss6Ln[r-)8#wشl*g$+WBZ8&1F1k%11WȪ"-'=WYI`XC0;^o]\m'4lL&4NMjYM7fτHPO IQO IQ"w[0LQ)iZUs!sytT kVzhpLbbPccn żBvw]Xx}ApTn׬ Z_eL볞h_#iSܘ}n޳5+̔yp7Z~ Ӫ?+7gf4X?D F4  {oY|\G%"=VbMV׆+e;y ?z5)SdʻE}nP~^˝jWG2C^t|ު'1ҿh%d5^?5T˜v_hK IWÒ^+ 4 |za3ܛ0iGt2Uu.bʄi+̐iYdX +Rw0@?3Ϗ叿6X`y Qu-d2p͊YyhA5g34\#d2%ifŬN$(&u8&1A?> ļcQ5C1%=GX8FBz58&1F1%11ƢXȪlLW& K|\e%c3b I2pæ:=U`3)1+}* ^occc^ XŬ_X4r_ʯ4f+~~kX#d1u^ XĘ48&1F1/4|z+L y5)̐i Y̴x[K^ÌjqeL5WMSi:}$5ݘ]&#C13^Ccc}Y9)ޛ'UW5Iqc{Fj&##&ukV\)sUywgY1.b?U TZF ƍ2w3[̤0=ƀ¶]^'m諲£S\!s/Sg\XռP27=t3?z5YͿރ;dkڂy>so2h Qu*-*-~I7 s9Ɵc)&!sUΏ6d1wYʏHä=/k^~N)STLIM7k!#C18&1F<~rX'%Z8X(STssMfk;%Wf\sCz[>VIkLkT\)sUiJZgXLkzxޟ->Nܘ$֨2S皫5*s}R:1xf5S'}\'a4\{ii5ӝZSk[7&2RymrS\!s7S)Liӕ`aЕ? Rwx in5TWV J0FxǜkdYurWYXqg*A7&݀=t]#/3{ `M&݀=t]#_+~1?p7 s9[q:ZqTg5 cNȢ|`XXUg5 cN5roZ~@ͯCl/ LjVkMxSԫxu?Z N!s9[m߲J@ש 2Ef@dRb#QOŤ$(fu ތ$(nj\ЃBȾNLy'kTԥ4GLv-GJQ1T/u5#P3kT:a}UmcS^ۂccNsS$(f -8&1F1Tm11Ym I1jޢn;^25&d~dZoG&5)f]*` ϑȡu$M$(.vs|l,vSe25&d~dHIM7:aȄHP̺즂cc vW+_A=P^ao2+25&d~57[ߨឧF`JjTܘ1sM:Sឧf$Fō3=ܴ4q~u1$֨1c皛vG5*bP%LkZפu^E ۨqm|VE2GM0`5Bsf5^ol̬)1A7q4{sx4U{݀=TS)/!FSظǜjJYM0iJ.Bo4{̩Iw(>XS"l 6d2ghbҔ6]Yo3 X-Fu~NXH5*f50$ݚAeu5&Ŭ/8&1F1kj IQ\u?)~ٴ166yAڳq=iZ&w|zABm|pªl7jϊF9bLg5xۤaMX1aACcb >ìì_]exLBa֯@cb ~c+ԼA I1h^pLBx2ez5&d)a~;[i+> j\vdw(sMԸPaLxì ᘄ{.NdZƄgLiv>2d]&gs$pf v1 1YObpLB|L⪓NVI~˾MnuF!+n#pò>RIг1A#{UXL쫍ĔvBlhtdM11Y(M11Y(M11IEIqP7"$(Te qΪ00LϦvj2vTj1 UeŬ>zEIQju$^oFcNG|Wg~SԘYnh?S劌LjRj#c ϑȡ8tğ# 1UeLVE4MF^yS:V0RbMYd399w911Y(]i]SbM)2[ "o&cQ2uj7MS3K4Bl)_~ZnͲ~Cl5yq*r d<nz5[W?>Sd߹.sgbJ.VsУ;}]e}m">932OZ2chM/cb=W[ui_kl52'}FTnz^u{ԣӾe}"W932 x1FMңe}"߀8ty)U`Ƣv zt W)2[.בàfL5~ym M7 ֣Ӿe}"V]tLk*⦛^HΕm?)2[͌M~v4%g`JX=o8(+Gʶ]WM- \gAS;}FLџ{2nŬ6 a7u;71ϤZ0W%!{^FH-dϙ}dV󍛪{X$5"I);y//v35o^7U{fAgf)Fh$|fί)FL>U7v*jnj^8 vg^a5)ȋ!zj%.ߗKj4 X`n۱<'WnX˫5V_8 N pw11@pLbo" bJ;IL\u} qtT195$(f;wccSpLb̍sY ($ 1?^Mҩi~#nN]&2I1;5LxD?nb ;f<'qp6x ~9 ~cc5;˘SbM)2[LjVNxSsc5#c( >kRSd6%W^.sgbʟ]}|n7 =Le}"՜i<GReF|SϚoq}>Sd9ݗ$eF{צM,8e}"߆5ty)Uv aSϵז|i_貾Ljx/șS3}FTwS Wb"GW?>Sd^ߍ}n)Uv $o8rG]Wfx̩>Ȕ6UopBqqp_!Sdٙ$>[\Gƨcp~ߵ|#ď.+d̦ggNst{ڗk>4Ѽ@# pFwxpW$(1eJx˯4uЏ.ct;czٳq߸9;q|q$zմ\]F7Ljfl4IȔvהuﰗ~W/Әyd39A_h~UW _Bl5<;Gi̔wՔwꜸ޸;`tGOLv>3g5HIjpsN;镻jX#19GJR㍫sb.Pgf)I7Ήy$HLv>3w;wG9QVG޹;5P㍫sb޲_ ܘFb9x៷6jLP1GH|9=oD!| c 4 ٩ߝIa9,[YK& 0&:pq7yzkȆ+ q)2[͌~0ZU)]=[Ҿe}"sݻao|duՔw J|MfzNs`uL{LT?<~t"Yζ.͏sּ7}3Ef9ſhk>#SsxVZDʔ92}fg̦^n9AbJ ڍ+نs);csPe3V3Es%Ψ$&csL "Sџ?wW~!S}dƐɺ؜od 0nsJ=6v3͘4asu-Cxq-+}$dfl^ggs%vj}|=k4=)߷ln||}- ;&`y4W?c9;sL~+F;?Z$&Fb蜍yG}LGaq6 w&0$"^7f;JLi7|MY7n|x8qVf=2̜-[ᖹx0s}ǣLjfl.4cĔuהu͇w7x̣G&;GJQ1ۛchļfd]mklpRW=?L1S^jpsǝwkX#1Ĝ#%w[cG^P㍫;x|7?knT#1ܻ6vP㍗ɯjlLT1ULyzxLb;?拾gD6ߘi"\t뷼&nXpe/=3!MZ+pq<W1Xn48(1Hoۡ $ܰ9ֽ0Wk7"J'B7.N~د %H1N5F'8&1Fquׯ D=oȔO 1łBsL4:b2̞7x8q7:11>q 1猱gsA8xмo I1mz(DV9# xN¡=h[ccǍeI>L-Se"GHuj2ܨ&tc1ك1$(fsmz̞yt|շWg/!2EfK`|L[oՋ_$HL%07ڷ,_{nPc'_^s96iלhgw[ q)2[͌ܝtnnܸy~e9ͻ`t|d,QVssK| G ":Ӽy7)[klPa{ᖃe}"ǯ =5sgduk٘Pags|YW?>Sd.g7ILv>2Ot:M?v}3E2V铎}L =|{s1v{pϻKh+:C{#Sst)ׂZ#u2O~$<%ع6)u6ܣ_?_hγO⼂ٵP3Er NZ d2}91?c\fj9+׆3τu:)yֽ܆s-L ke#/dJ?C^ q^2N Vƴ11߷l|<98+}dƐɺ~+oﭙD_/Oi+OqYϘ[6c>Sd.݉|F}%1i7#o͔ 2}ђ_{cY؆0c>Sd.cX!S:tV&3晬Þfg}wsߤ;b񜑣TCΝslp5S>WĔ97t??kt6s͍˧ܴc&$s16nƸKu٭"NGWL:ZpA:;JAdVϥ[uyG}LG5qmiq`ô~tg X=/5sQf\:iQ߰y+Տ.zx4pLWd9놯)ƍ|~k1s$怟F+n SUў;ǰ̯0:I3EY݈7ILv>25eݸq$qd536sw `ʻkd7nƥdd#<_5p}7x6h B| G "s\~~7g33B7݈I7 rU}MGOLV3csb>w{:;=u$/\_ҏNIϙC-Ro\]YkX#1)sEJRO٦D5SM<&1FR½ }%2} &Qq_O/ޯ5I-1Ȝ{HIjqurkS/~knT#1lkmzD\#zc@h45DW&pj,Sv+W̝Hh N_¨bc¯p11rۋ81_E!$tR)Ԩx 7Б ?"/;1g(Dԛ` 5*>PavIQ\e Osb;I)b 5;{C20;W $(Uvcc u}@deNU$ٹ nW8&1F񼁒Uݯ_D6fvC1;I7P$)>cc7?Qnǣ?Sݩ#78=)wEר􅃶=0#588;#-gJj^m.Žy"XdO{ЊwqJJ5d"zJ2@q᳠yK~{܄e}"sGt򭙸d#s~Gt׸^w~m ի]8r>>Sd}CNV"u`>S~/|ܷXsE@3͘7YؖxH 9fD Tj K[c61߷l|\P|)}:}tSd.'dJ?C\dHSsE7k_%x[|)sXu$;][6 ΍[67>Sd z)vLw xF.)+b|,:r'ݚ ,g/l:7CL|o|bt|%dn\Hw|L~\YGo\e}~۴~\t?6r]LyG)Sh7T۵^ش~tg]#t}E&;SqLH( r51Տ.Mݨ~JL!\}o^N)mW'gy\_Za/P }n)u hOO/S:75^>p4&1YW8&1FqYٯ =3p(f*8{ ,D6f2xN¡8x__I1q 2+a`s},xI|\n٘cs},ccpLbbo./͛N!Ě Sdxʔ I7v_ ߼jFd22>.nڸ#Sy_71[2c92[7od\5sg)Uq r=8=>&3"OlÙ;^GIf^33Ls~tپ5? rަϏeaH92]_᭸Z̑y>O)׊!Id>3``akR;WweY޵f(Z\ LZ!I d*^Sxn~x-B9rǷLk1mƾz+ϫp ZI2r^gHk/, 3&ɵLnFk^F׆Mо5BU'7n${ӗ92;ӫ : sd.PgJB?\ed}x~𡣿fʯ:d^/ #|ƹHf8#\[=Us!ULh Jί ?:2iG#V઀YULE: mq60#ɬC\> BY1''W^~.$udelNU,ݫ2ͯzvF1Y'"s3W!Ig}Of IgXG{pU,K*@ <> [n:>uK7+W Ig[Ң; I:nmqvә+X2W綘m݊͝PsLdnɺ|䃹\S@k kj[>n;173"}dƐɺon!hD0ڤϊo7+}t=,yWd9uPhrܺQݻg7.i>B\ft=;sJ+1Y7S஁&}q 49JWdҚ#Vマ31n14ϰ agwW?:}La FLWb3fԔtCq kxr_$G&9;-Sލk8zՑ_?;sG1Ϙf)FՑ|vg9޹} k$&;?{~=|trIC G "n` ddgo]n@MI7WGryR8k8zb3;cƁ:Y7|MGzۏNG^-Rb7n^z|5H|b| sO{W5SvNo<&1Fq _x!:^1g1qRbO߫o5꼾&|/<)>r4ж|fƾau&/>v|LJ4n^ Fa3S/ey_8}\&-iN8=KTLjx3yeY)x_8!PLyk42%rИFd9tk:vյ W7Ɨ5H-dVs_eu=ȻL']oW^G2ERsx7MҾz&!g9Jwm8wQn]Wﵓ[amca'F=՞7w^\9"g}Kf 2ER2>J2cd`l;3u\ S~oW~pCB6C_xP4H$d֍7NvLnFZG}εzke; WE;+?Cɵ"sy>!SgHk~e|fʯ dJz: mWa_ !hGJ[YI oEN?c\dv<49@ԯ? 6ߝD/O H݀6+~;̑>79ݥ> sd.YctG{I:d>3}*+.S [hZw("u9JGح*s!ULe7#9*\H2del;RFWe_Ȕ֬IV|*?#ɬC|=, BYwפ͍Wgsnю&$ )s7,o<+ ͉~|N5f=ӷ$xr% ~?# Ͻ$2i#)nUqt;}t8tHgE6>SdO)۟Bҫ3o{dή ?κg>3'ϲtdg17uGIf ~ >J:cɺfϐgRuMqs冃v)t73"sk%{:}dƐɺ}{=C-S>Ȕ!t۪+O/V;]پoɌA<kXfQCҍ_ T8cj>f}qs[q/Cw?ZILeLӧvvJWdnߨ~Ggd~p_8𣕤Tj^Û;h*sg9h|ߍ|?9]G')2_I~+2Y7~羷L>Sg?ڬv$~jv_;h+2C$q>q5I2zd{ n7)I}kNwItvRb>b Id#}Ot*w2ۊ eؗ_B\j>|Dލ"3<7n~/8$=2Y͌mWswvj8zmy%1ڍ_g)Fm<=q5HR#2̜"%x7;5)mBQktLT1O. 9dFm_Rn*~kqd#s^X/ZgzjFd{W\'K)&br)xLbbD+w̔+r'Q2ս w1ۖhkyCngO|\e%?A g:VCpi̓z]kTgʦ8޻Q%3#u8si&:3L3ָW2lߧ-F!Tj>J?_;0c\+Tgyw4εLQG_~w:g5E.@#Sޅ*s]e>sd.(ݡ ⎟{I:d><}ý9u?F~!S]˩+vζwc[rv=̑< 1Sgϲ$2YGΌʡuI/T7Lf~Aqs \OO2Er%2p%0\0\_T{sBͤBfؼ\~˃d>?MAʕ:&d }~%b~{}Hy6r͙ z"u)21${*R Id*XRW Ifόt vL)|u|`9/ ;{wc-?CdCLڍut+k3=kϽ ǔ*i?%$d̥O']+~$<]+~{mdL9[_7x"m--1UxxF}%1d°+b1Lv~G-_^$}Er o d`l(c}^ѥ8w;25$; rLtyQC&F;a;Ȕ; Bo<{_|JWd*cᾖ+Lv~5I:k­kdTjQ7sH/|fjkTKŇ7wAgf)Fe>6{'ob2 σxmT?/Ljf:NswLn|=sw '&;Ӛ}$5޸z?jX#1ܩEJRϯ]_DL_xLb̍9u=;lpaCds>15*nK߆$-2̜H5*^ͯPgnEJRϯ})&bع_t}CSo 5*~,ǻ>GLv>2Ts/oY;Ÿ}W2j%P ~@o40(Q=,?2ځ1MZ:Pq ?kFZy*&9UI1\ _x![. X浈3Մc&É뼽UI1_G| 5j:ךptd`1 1:靶٘gs":W8Sc #[|z˟̛wb_1Sd6/Y&qȔ)חyг{;z72s[1ό x|qobu.C.!4.;63~ⵀ#u# 7k3uhww %.E½Lu>n\uW4{W*#u#կKOZ*D:|w{:W0e4÷Wu?9SS>!-8rv=̑zXuI7dxý%Ջ__Ĕxl瓡_pIrVfk#SsYC>|lHpUp%0ưy^?ҽ)s8;QU˽}/z#f^O~g׳z)sYB&3>E~A_QQ\[wwޏ3< eׂ'Ɯ3o~G ^d~q+w1Hrm S }G smlO+-YfgJ#sd*8/]~U# w2?\ed}xz|?4\Uҽ)j:ֹ'X+uG] >Sd.}_o.o?C^3w|tZ$.?~7pH͔^Ĕyq ask~~dׂI~ 5Z@j|mZX39a?ҽ6)Alo\߿M2Ι(Ɍ!uG3gH3LǷԼ=_:R}~W&a0Sd.We3(Ɍc јy&;;}8x9U>;빯TM?wl}d`li)$}y˧~ 2SX/vZ4>|bNkd76/,ق3=톯IZ+_yat|bt$ol9fn& grQ|nN#O̝1H 5n7>}A'Nk4=ۻwC w;Z /v55H0{ཛI1Nrncb { N7>wzҙ7ߎkTkY~uI"̝O)Fͩmf'=<\a^yޡ 52O/Q kbɹxLBߊ48_!Ig56urzyՈe&;3Ez~?>q]:FXB! 7<qtpFseq$wS!&Q񴷷B&1nu<&1Fqsz瀃}}81e}`Pc.}Ap_ ~rs|p>LG| 5*>t踯5舩b11ϻ9٘39 >?S}}82&P⹼25H5*n~U?Gf)F͟o$|ڍ~iMޢ#h5H-dV3c۽ܝ `|]ܷGO{qtY_}l5"?\Ȕ=hur+Ce}"}Ý+5sgdQ+0k~9sieLz~-1B0̐tg҇^لCsO-?^ &4ݾ2f^ ~kggƶclgLk܌߇kϽ ǔ:uN;5_1fA}‡sd*}oW*#u#٫Ճg굢DW~UL[G\?<ܺS'iE:r#ƔFޅ^O~g6WXϮ?uI7dsws}!Sџhb19-}v=̑ٺp%0b2L[ǹ}am6Htsd*5'0gדz#u\0\oT #JuG~}!S Տ {3=ƴr2Gv;tϮ292[7/#~-Hr"SVV|vRҹ&;{u͟_Ύ O"sdڏ|_e~g:L'Ts/ULgƶdpUU%RUGuǮaO# eׂ $CgHk 3%׆g3?7LOz|\ߛJfׂZ"s)nX!3$ɵLڍjrZs/k;7\ϧa||)O)uP>J2cd`<4!Tj3HLih>߸U(ލ:h%S{a"5g5I27o|㞁8zd i nS I7u#Y>RbOkFd9EJQqusu;ǹAbd3s"%s]> wp_gf)InNZG|5HNxasLP1=ώt컹3`7Z^½5I2Zdɿ;vQq\_mA'f)FsG~S8'f)F}[_ާwzYڿ,av $+@@H,\UYU}'qFFsi5Hl{bh>t֨ru Fdyp]qo]o5zֈL=1}D4AFbyp#%Xi"[qLb.^wc9SN^{s{:fKL=1* C+~o1hL Kȷwn  ^Ck2ɴMW?O ifcs눟7ۢiF= _¬a9oY11[VYxݟ l)hT/;͎ '='8&1FuE tCdgoƞI8LzPȾL)hl gGL{:g}r>Ƃ)1Fş(]M͎ sumuSd6O:#S~[5W_z׋-vܗ^{u<2G& >uL ( CGAdݟzEܺk= }i7sduߴ =$2=3Gr}c^=t-Ý\﷜>;۴=t><|d2k{,;6]͗ ip SQJw^Wgg}Y3+T~{` o Lf׫{ד rF33yu7`J{T~w [v(;дgI-^ϻpx#F}>9(xCyD)Ϗ7dJtS{j:PvڅgLg̑:(#cIp,Hr"Sڃ>9*ᨒQ1Q^g)nuZHO}w<*sd*'R:(#cIp,Hr"Sڃ>9*ᨒQL?w}հ^xߙd{۟*sd*g{ʳ(#F;~bb.m»M#͖t{bΏ)FNYig"MdY}-IQKM超q*1@LQ1w2NfQG(PM/*Rܺ:_r˻a6g "S7[z\mvi{ }l~+jj S^5~shݴsz]>Sd6om9_gd]CuM;R#{`Lo=-c>sdXz'{ 2uUc|!oӝ܋m+(eL~ :cX#[1Idr{X1̎ ǔv:?78+ Yg|TR92kGyp}e=sd=zX(D7>buv 7*Z] p,|̑>rv< vF.Le'/dzuT$}y@7sd2{=;7O?8$9ސ+\]Q:}{9aqgJ20G&n_~gǗW~Ǔ ̪xfw oh7V92Mi_?3[ lyru$27̓2Ȕ:%Vz!K.?zt,= =Sd6k؁=(ɱLmñ cRWg]9B$\2C:Lc;%=6ʎ $2u[ dzBTӷܦ:ͭ:m]1)2e#1Qbo1^V(5]W?l]}Ȭ#~{oec2LwbxR{#yYug滇L疉l{bۿo7Y|S$qd[7zuz_`5)] IWz%;NQ?;'&۞S>RDcg"M0ueNQ.MKQ[Srk8[bypjT|@1]s$Tܻ"?$a%3u4*10[v+;)9F1?NJ8tFu J+v~Dfʎb +n]~=~z+h8[b9s葒jo2hL s%h˜o=0&1o =0&1F1?OaLbb"˜ů5x]G#Sz\kTܺ˫Q $Ζl{bN >R_W,K4ޣYYIJ-uR*K]qF![BPao>IޜY.nnI4ԑU| Ih"?H cc~?toBU`Q$Ύ '0&1F8Vݚٙ?gmŭ?q覄q#1&Qw3 qvdC~K,GZG;-)1Fş{v6M4;b2#IaLbE%hW{"̔3ahvĤ8+J'R覄q#2Ɗ[w齡IlɶG)Ɗ{WywI4"m⮸[$2Efz.׫ӻ7L/Nx<}o1ݙe3EfcjXG32MJϤ?k_vg~ɟ;3MsCm˜32׌Uhkׁ}-0Pױ:o) 1G&|b1ۃ^Idn;m=Lxha1+ ?CwfvϤ]}KwcJ"us.7\XAoIWvϮfGL S IQl=ɫ}^c L^{f&8Pwťw{f&PXccs7tp11CWIQ ";3w'1̸M¡ $T $(n%p11[ :`Lbu`LbbwŭO1wiB{LnkyZ[N}ԉC5MhX\'ϝ'4 Ǡ w\p~q?1ƊANDZhvd8tŭ림j]8?A丟}CLA1py_c=IO "CĔ+1͎6ԋ;CODy)74Vy'Ncsgh+'&o-@@5mǰuЉt ~#i x(J"s[|_1;J`WM]׹ooPRsbzje#i ޼~edžWۺ=~ipS)|ʴkNOI;Cl 2E_LXvl̑ vɵLczjz;G?5懺Ґ)2~O f̳ a͊ÿ $( 11zB`Lbbw ]w>~ـ;f&P-|I1|hmW3Fvf36 Gšz4`Lb$gc+v̸M¡-G$(~ р11z4`Lbڳo⇷SԘygzB'dGf*~jPLcc*ccs=!0&1F1w@OIQccnssunT] -+~HB@8|^ 7j lDC)cZm44)Qz+0&1Fq].^:- rAL.!&hUDUij#&ác$(-";30̸M¡R ?t>@Ky3kT|[Oij#_֏1#ń 9 tk4|‰ڙxvd|7.b@L_7&8M.5I-1Ĝ >RF?>ԕ5HHL=2ߧw`Q&3k$&s>RF͇?m|4HHL=;dݦߎR+;|. q)2f^y4,;uwҳ])2怵?b2+xF<ݹ{_*% "i,yȔ^s4w;dפ6ۯ~ #i ӫ9h%_gO{c,hSVMt9.Kw_ 1V ʇ}$2:X5ipTSj<i>n;2w#W#>`lzKPۺ.2>*!:5Q=岯w$>LccXrloXѱBL/\vR"P#Rd}=&=6`%(m؝1诱LcZj:Z 7lxaQ2]#K˨̑42·ɱ{,96@IdnnXBL{aNuM d oPotL=L7]s/byo}eLP^ZAP2dμ|z̭K[]#ٺluL43uLnLFŝ35FnuHsc ȡ;GK0&1F1u` I8t@ IQ̝#mccst9&0&1F1w@ IQܺMtMjd697$n yM ÿeI8iGJA& ycc.QnCW=9 t)hT5UqL4;b2.=va7@丟c_X+qbѷ4lɶGXkx1*6t}ГbJ@LQq͸f4lɶ'L3DJsBNp.m0S:bb.v~Mt{fN5H. E@$ɶ'TQq8[~ hD&۞#%jT{3=1@$ɶ'[5[T;&rYϝ(㿌³L43Εj[ucS;Q3])2怵se[{`uFɅk=Q'J~g+d̦9ZL7߈v΀XC_Lc3cAIdn̺Z`"do}Aűw) f ߋ'L#SyWiyN`[+2Er_Hd1~FgF&[+)2:~ d1F^w_ v>n"S[o:'{2So\# 7_^^maӿySA/ɕ%iw &CIQ̮?t CIQ6sC?0L ;rM֙0;&|dI1w269_gEyuzk 0L3;22t,hRL] LDopZgT#s216 b209Zg(^u|jJz΄뮑 kNٕL4)>n#C1w6`Lbbv+(0&1F1)cc돝0&1CIQ̮?t CIQ̮?t CIQ:t{Ԉ?)Z@q!74@c I Iy[9+ >rM@LkT.k $Ύ st<š+$(nN/z?t(@'l-)hT]c?1pݹ#z?t @mG|5*.ck8;b2|F8&1Fqr#ƞ/>DofJׁXcΣ_n$lɼbOOFݣ͎_n FdyGJԨ{vI4"mO̩f)Q,_* Ad#s~VM>g)\k8[M3c׫jS T5y{Gϼ~vɺBlV}|)JRM5\أzzg+d̦92\gbʮUus`o6W~:Cc~3XG31eެj/9{<;u. T#w 7O]Ytw9.:~'V*lތ ~}J"s|ow|UM[ W, ΄ϵu=sd2?mCXrljt`J]zwweZC5ۃ>sdnдeQ`J]մ$ûRf{/"S[bzރyĜo5շB*17ve1oB׳ syB!sVٟP&+ם͡uC̵1'G?~j@!sYiV܀ɓt4wW23.1yc7<<̔j%2iWPhN:<ʴud+dsOBe1577?hzJg޻#)XqgLkͽ{ ^;)QbvN#[A9L][|GeW74^C~эFf~qE1sdnqd>&wt=''l33:T33i2}xL>4=sQ\84qZw)uh7>qeuiΘ̅kgf368  OØO}R ApAz΄]\⳿X5f9` aco*2 ϼ{gLȼv+>_>2j2Ls>n#C;o4G LqcBssMϮ|dd]&Fav;0&!0;RØ8K cb .5I1̎30&!09RØq$f\jcgpaLBav1 1q.yW?Lމ?No.8kL׮25Wyc~! v $xcCx&tHC*|^}C~ "8IQN$(^>Gⵏcϙҕ$&֨9ZjpNH%&۞3)Fp\';i"&LqLbD 7ȱML:kTܜm%[s^mO̙fDc/$j$&۞S>RR7/=h"&n9IQ̞3׃yQj؜VsJ"ST4WO'~xf5Hl{bNόUasYwa5H-d̦`5x''6ը]׀_q2\ bʮLQZfvH~ǯ |s~yyȜ9y7|r|'*sd2j9V!OW+[A)2[psp[|)mȾ]$l.M2Gf:_(u~-at]e/p_3?W6խaރyĜæ[n8-?Ц>üΐyĜY޺50~W:5SdrHSa^gy?m]i:Zq䕌3g|ڣ(v[ ҤLU G)7=]-Zw^ 4ݶCJdU}aT(6uǔN@7fʴ\],gҳԙf`Dcō2578h$2N5{&I4Vܙ1Zswѱ#%jR0c ȡ:ӣGp L=Sc"\ꁦ'*#3Mgm$r(n/UŎpxJ{{L=Sc"\\Ssg|dem$r(f?Kcc]c_7ס3u&d^wt:j:qoħp)Ѩ3Lkp7IFfpAYnۃ]ɵYΌ֚O|};0g25.Tq]8&1F1w .;|IQ$(fG.|?N;yzwFpsVģn̑Vy:sjd3̞-c='ϼq?d!sdv}ICʇNi_̳l=:Csjd߃b2ofj2E&:̳:CsZrRgKzuC` hgN+םՀCuc'1ʼyȜ92"iew\u>2^qcL5JdJCsWz9ݣ:aiI1; 11,IQΤwKqLbo(`ӏ=:/JzDon@cGf*f'2` ȡH-I1knon\3npTA+IQ 0&1FvUڣSHy*SԘyy@ B&i@>n#C1;uM119IQnF8s1zbk2'LsXs9 $(f 63p`Lbbvm11m麻L#[FOBN0;u0&1F1O11-ȱEL &֨[g))h8[b9 hqwMw.O4wfHLVk%)Ɗ*}?9eAFb9#%XqwZ hD&۞GJԨ-/+fbJϮMή΅:O;?;q ƳL)D靜as ޹ْyg0u͝[KF}Q~as.|++gMs_$#_Q3YTyշas`cKVg̦9 0`]lE)UTVu*LmOLm=SdZ$̺:#Sg7%]g?l}Vg[> (uϲK@׼e8gusfrL~7D.{yĜN0SkڊEz>5kVܘ)Zss@Y~SQp3o3IL֚4ODn#ւ cc[ /IQע8s|WZٙ= 16 bvO8&1F1˜잂 cc㪑^F_+W3W\qpOq11=$(fWE;0&1F1?8Y|O9|>+kCqմ9{ @I Tp\ { +IQn&<ccO?cc7:-o'Z;9v)4V<}ʑS5I-1Ĝjh>DarOq11̈́aLbbr@Sqpྲྀ0&1F1oP#qSVw rJ"ST4WܝO)(hD#2^x5 |x=3߃%^ IQ̞ ,0&1F1{&와cc>(Ϣ~~ kRܘ!0Ӹ|r}tBMgC*$|?$!ccg> IQ̞ ,0&1F1{&와cc') 0&1F1Sq 0&1F1}5ݶ>3Փoxy$5It(tqtd1nys͘ϕUA-\8y{p#6kCYݪˬY1Wr][Y}Gf5Vmfy̜W5{ߊc}qP{j3d1U;dšJYVaj@sZ_s2ުͼyLU,?uުͼyĜW5r?4W~E̓ڻUy5 9ju=/B}Ո[W2~òj5uUy3dfUM33YCZ8[OīsL{Z[<}&7f<4u3}VzOBZe ~)r x\ld33eFLǫtτ1(kVܘ)Zsn2|J5(KL5*n̔iUY~Ce9iL5*n̔iUz?#%jR̕]&F"bW9F1UƱcc*qØ\j:IQ̕qØ\^+ccs%ccseTYj,I8TV ccse0&1F1WV ccse0&1F1W>}5$(RNjۼor&hygH:˩t!j5m!!a߇S2`A,BuI0TZ: ccs0&1F1WZ: ccs0&1F1WZ: ccs0&1F1WZ: ccS0&1PinC琠"6y36dE<cP11 5PP11 5PP11ynS_hk} <gc__G :nGꨎb L+Fb^\ xsgj#CYLr^\NYk?Q-+d?{]5B!}3͊pHje~P9 ߯2ܪ2'w߈Mpy}Qp[%q$Y#d1U8,4sU^Հ#&y e*lAwfyk{]6{dsEfZ-Zw^CiRpUlhy5(Ӥ[^)wވU7X9FAl!9]'Q0=.33 jg]߳l~578˜;eBk{h]0V9X-Xw^ ʴܪP97*TkL֤3LfQ"e:B+ ΉV@֚[u5)J)TWaLbbBu$(J)TWaLbⲙuLbbbu$T*P]11R U+P]11R U+P]11R U+P]11RUSqBu$(J)TWaLbbBu$(J)TWaLbbBu$(nU/ʿޏWYuU!KꇄRz3pPsڦcLk͸N9ἆ{[kGõ᷑װ:Qc~Z8pIVlҲ}5Vck:6jǣcx%V~I=2skb u+FB^TXk^W':r^šn ^ck3 cb s0&!p.-u-\dl噿iY#f2OjVe͆Zukkuu괬3'eӲf\]i-Cep5[5bs^uҪ*u wfyiZ[nez׫dáx6'\g mVJz!景UOyTS-:ÜC3lj`VM=%h5Ifgo3̣L p׌L-Q&h8T^98Ӹ~l뺪G쭦ɳ3l̘<ު~ȕȬ i=3y%qx`jKTXjj:<{ō3-5*G:Ýb ]%3MjN~̗5ONfouX3:-hdZ0K,H焫sLkVfM uackP1 1 uacjX1 1C0&!0x. cb s0&!0x. cb s0&!0x. cb s0&!0x. cb/ԅaLBaB]$/ԅaLB^U~EG:q~B"yyju/qy3e+gͿ^fR(uo~Nk4ܙ1Ӹ%zT uP!}E$T㾢ccsew\l+Xccs5+~Lbbb׏IQUX_11 ]8&1PAUW+ccsW]qLbP\W]-ru{+T0[tyvUW̓Jqf\U^V06تIqcLJif&tNf Nfs-KUEU]׳o[]ŝ23 k]33L'V4_QHgW0mrPdUj-2zY6YV0:=. b=|+VumgM[k}dZ:[ ФLC>W}ay5(Ӥ?%bv6V )n̐y\)*^D&JR oѠ99`I{lQ*d^˳Sܘ!8S^%2Y/\gͫ怙&w_v+TeϾkyv3dfWffLV0sEnӣV'3KXF:}{Xfh%2]g;gs`͊3eZknc֨6L_Kr<$1ZsW&\{jjP11ڳW\{jkϾ^ccs׫qLbb=z5IQ̵g_118&1F1՞^cc*g_11;kӳO35&b^}xtbǿ#;j3c^5Z=~v5)v.n#C1מ}}$(4ׯ')_R~HB@T~zPa=C$(3ԫaLbb=C$(3ԫaLbb=C$(3ԫaLbb=C$(3֫aLbLš jkP11 j8Ԃj< _Ve^z5B!լY1׆}$(d-ܴ0VP*xޭ^!9vM抳6/k7B&cN4.k#r=ziy]. GiLeswF*QZ046^[wY4}7f<ĕO)#.X!~5uL^"3ߴ̳𳗨I>k1uΤW֝W2Mj[Yukٷ*.Nqc̣LykȤOuO<Мxu0Ӥ&x^ekٷ*.Ϯ Ǚ:(I3=m7CvV'3Ͽ ejٷ*.Nqg54Ӿ때a{Rn:'ZeZknX֤?bOuc攨IqgR^D9XWh5XL֚[5 Ď Y͘i՚YSšn fPk11n fPk11n fPk11n fc[߁^#ԑ<:=SԘyy;hu0ӤZڮuܪz?NqcLcϢif&t3ŊCڑ:aiRӽ~\V*?f} GFuQ"fzOe8֩Û( sL:txY<}7f<5ݳN:5GMDMy+Yd=fDMtp%*Y7;eUȲNU[}g3̣LfQ"Sʹccw>'Ze݅}KeYxϳSܙ}qLLU-]Q7f4G]'e-ٷ>4p= qdu 3/sjU|r̳Wܘ)ZssX21*>gsfI4c,&šJvVUzO465)>0SkQ .wFs`wfʴܝ x]OJx'c$XqcLk9a .wNF=$+n̔i95uNHgDcō257g5^ulD30Ɗ3eZknNT-*y'j|lt&I4Vܙ1ZswXQ.NIFŝ35wm1FJԤ\}&F"GeE`Lb~Gt\&z#=L=Sc"o]woyuEp詎Tg4Vܙ1Zswr5>AbvvpUE`Lbo)]ճuugjCj~ݙzD̓*7.uBC*$ <.?$!Wcc_IQ~x\0&1F1UqWcc_IQ~x\0&1F1UqĘ_IQ~x\0&1F1Uqq̉Z Śwfyi5dN(fޤΉV@֚ƚ=58H#?!񼛧!9sqDЛ:e?k/4&ō23e^(2}} Od Nf ҋAh647f<ʔ;"^D&tžo2:&V( sLGd?tD2'}xv3dg"w1A'l%j2VQ3y%t|Ӻ̳𳗨ʴE'}W֙g1S}Ƨt.foN϶jGv[&d9l w]n5H~ݎi:7fʴܜ(hVQ%ь&Vv{!h247f<4t̤7T2'^04t~~ru̾9M<;ŝgfJ5{tu!'*kVܘ)ZssXcyp.?-3K̬L֚S ׇtY͘i;QI1<IQ̮ <cc OJ:0&1F1IQyw#V??=SqL)Q*-83<2Ƃ3eZknNF-+=$o1RsNGJԤ]>n#Cu}igGt3~=SԘy5g5];/z(27fʴܜD 4R3Ib߸hNMQӂ5)ݎwjع2۞gnLiR!E>˔T!Y:! Pas11$(f?< `Lbbs11$(f?< `LbL$(f?< `Lbb[ccC_>cX䔌(saM3df4FLjb[蹧lN:aiy@I1+?x@[SS<TqgLCߪkf&ʉb_ʎoZ0̴7fȼD(I3}w}gatr熎BZ04(-.l/Uܙ!׌KXqcLkh4goGJbvvp¯߇w)׉=lZDspwXS .?}iLƊ3eZknN@9m˫kĚ*f*` ȡ_.L{gjLļvŚ޷)\&tr)xF֚6hNbQMw,S^uVוM9_ΗP!Y|!  /],p`Lbbv11 $(f /],p`Lbbv11B $T\,p`Lbbv11 $(f /]&|ա/'F~0&h|kC͊' $T]z^R?b=,]kkRܘ!8SPx4Ӷi02%s9P&śr53aM3dgʼQ"f*R>sFcW@&>q7~l)n̐y)>FLu|{\~jfō2M|lFR&}s~x{S-(eWgyKt|ʏ{pYL̳3Sٽ]$^.Mf=%j|p2y%̴:>ՠ;V鸟 ~N43^qcLksbʼUrt]w=D͔iro|m4}xv3deԴff:?ຠ3΁W5+n̔i9CQ?c 8E0wlI3MF.KDsLk3dM{8&1F1aX|cgm8ѱ&ŝgg4{L-ees`͊3eZknk7,c\L̒j&uaq,1foNqgǙ2^D\F@7fʴ<<ؙ]/%ь&(O\+rfoNqgGr^DiZG1L^:ohuxiv6 Jgߝ43/2eN33[J6kVܙ1Ӽs ,&fN4~Pԕ,2LW2/QT .o:<{ŝ35w4^V&Vǚ=D3eT>Bgm6lU폺+gՁLf$v^eաu+̘i{xe3͞IƊ'ur =\A֨3c;c=D;3fZkk_5{&wf̴=(.if/ao|Yf4'QB2&}s~xv;ϼȔffS@wf̴ҨFkLŵ׮X:f33fQϝԿ)YqgLkearI̿a͞IƊ3fZjv. i>˅Fp025{&7fʴ\x1?> Lh1S沰ݪ̿a͞IR7fʴ\x̿; n̔i,a#oOw7fʴ\Jv025{&I4Vܙ1ZswYX㻜H>ox&wf̴]ָ;AiL5V옷但hTWgq&Oug1af+`eZ;O ?/")XqgLka#:4ٓR|$jTܙ1Zswr"4H#˄HP?C3?3!󼚷_t<`~PHE~H8@a~/1 1$f"c`LBa~/1 1$f"c`LB_cb ~I1E0&!0{?_cb ~I1ތpLBaF]0&!FqMlsoB/?3lKXacLYM4J`jzedzWa2'^8̴l k2^opLBa&~>ay_š w~ hT2]`?%k6lLiţ;=}5^V^'ٛó3ܙ}quӌL~B*ilYqcLKOHc5w4z9 $1m/K͉>©ff SW5̘i)i1ys4sYUuK"Oٛó3ܙ}EP%asClpgLkOx"wjHgѰ1qfS8xlTԩ$7&δ4^߃0ysCs̒hLk槰&Ux?$f8&!0߀cbk@z&d[#ǵ1ҙ2gZkxW5sm=wߦ#]XqgLkͽI=u$hT1Rrgi4ZzC7־2LkQ }4zLcŝC h|}|bm=_ɝ!FÝ353>R&í$?O׿*w5y_zw}뺼ٛSWCɒ=sEq׃}^?G o>^a+^^-?!B'W7]?%[l=C˓=ҧwM?vScWo~{ ~ AI &=}!bOb߷=;u_rп?˧l ?w5M-Oƾ%rQG=^bӕb'bkuw:=U_ +2jEK赖]_oq^ou<>.w`V<|¯5{\}l}Fۧ,]~{_,Dx~?/E(v_0۞r^WYr̟?]Muzd+Yï߅:W a*'].}ٿ(~?yu{\g럯ˡ'sTuJ_*ҧ(?{_m,xL?qVLA/p)'kO~Q~|)~儻k? |S^r<5o|~3qN|;`_:zr]pg'G}}\=*^{+z}SҞW9Soÿ{彬o}u_vV <=kz~Ӷ?˱U^L=<y7c RK~]k}LnUY1.?wKqs׏_/u+~{ʱ0,ހg߳cg~]ndڑUQ~z~%](s]c[n>-;췯{s/|.yw^Pˡt:췯𪅁].?RN$Z0l_QN}kx~o{>m?a} }}T;iqծ.IfrV3 Y=r*)^+u_\v ׿wqWʂ}{)xxotGq9/~:Q%xO߾mޗ@9޻D0l⸛h_}|g?{Cr߾Q{J>] ^/a,W=Tn^1l3Y_z+meo,0l_V~>O^Gt+A<:Y/ʽ,W6u%?b^p_?G[|{췯g9|ŕ: la}[ދ+I0lo{Vy_Խy p1YN| ao(狷~w(Kr~}̕.ڵ6 {faYP詥C)$Owf%Y)*JCOae+ٗ: }5+ǵcQ C_Ig,t=`dCj_dr"ޤL9$܋X2hL_ק9D eҴ9യBPzZp2V7 ʱx^"jœvMv/07+*@ؽŅo݌n0_x\ܫ}a %PE])# n ߋ6'd~ȕ!`k`4)sZxϊct 4w>sKT:2=\L+Jsg膳U(jF-ЍGVXtD3nrQ Ҭx 5TfL+D Zv1E ;-Qhz҉4ZmXhγj!Dy@ͱ_X/?9E>l]Mq[ɡ}R=@BhrL iahИlQyoϊA[Hhԃ.4-z"ɸ\DO$wWhyp$j ѵes5{x5Bysl٫ y?_:0 //uت溙ABzM_X/!ѷԐr?ᤑ Eoקanq.u<]#(i endstream endobj 328 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 334 0 obj << /Length 3625 /Filter /FlateDecode >> stream x]s۸ݿB $H^f\zs7m^۹-ѶITE9_Jk;[$X,{Pۙ7Y5L7**jK=_E+~hc=x2=kx%C(ő߿e.3P6BɎfE^du7^"Ƿv0wǻizm rː{DЭd ^HtkCX`~[![ `o7$F%h563Լ)Ʊ*UW+C m\?Tjfk5:ӠHdE,@3klan d @Fv W`EOsbΪybIhΊ]ݔiM!'V**hOQ[RMmԎDb( -[Ƌru^B4'+P}Weu,x‡;J2ʺqV] v0- N=DG΋Cq{wij2"oAf;e]"R"o溄5`(>PQ0xvͫ3ѭU~D DϚ?LBR{RnlfDqQdenytJ?)l Tfbn/$*S"6ILVH$3Hc 1Zz~6T)[T~"% mJeWSIoj==Ȱrn]I @kPSŇʧSaE<H/ Fyn.'@/^.a DgpV Sx1EcQ7WMV >@n#oŎ^1埡Cm|e0QjC \u#jAaۚN綾kW׸|XK~H  se8ն *zRdU8 ,-0C:1>gGf+5#h.n6q zpWb55Ņӣ-hZΊY!&Ǐ.:>/ ?eB*jnjAU\A7Eu<0 G\I Jv,QD+&t;QE>e]J+CIʚ'Ǣ*ܐW ,RGKp "jɚ ϣÚ!5>^]jmTtۘP={-UԥtRqRu%t"{א hЍDSz(gw % b+"fSEec'XZ,Di3n v%GG 8rg/.L7$c,ySX*.<+rG9o}WNm̒ Qqk񧚏rfW@/I^˶}pI|d;,WP(nDrv袰9Wk !Wh}Q[ Qҭ[V@CkqTY8G۽CӺ>Tʊ7SJI,h(ێE(uTx]橩3U25n@4FFS*J1)VoWaQ9C4ڒ9q[lFZT@8`G$iS]cc`Ṭ'}e-CK;l ߃\ +{-->$">#}NcT=gn<  旜L]*v+Dci?ܸ+?ϯWƼ? eI=2c)^6nq\0cώnGR;oJn+"#9y~zdT',?xE6d9g^'4BJ^WІ V5pOsIɶ' KF JIC nI) FiKaʷ"`@z h yJG~ vNɾT^=Keuj08ab3V|.~/| $<}`$,P_!H53fj*3oJ_yɫRiLޡ`t]"DQ L'&6ߝTESwͧ UV$WM#[#<4S4D/~x9א4r\c!e„Gz٨KXLp)A2}T2gjRTͻD 1 IG)qxj|Wn7q&#d.ni0:MZht5ЁK91Pop!O0]%0 wZZAh C;F| pyIMf].;1k/peJhw(vZY#v]7+}/Xs἖vl`E<.>t!8AT'$= +HA(֮T>nqśIs3@܂_#\(حnM6Cqf\D Satb;G"\ato)~ڤ8ոφf[4R.D2be~g%5؏} &9wuݴlmÌ^>Sک^ Mܩ#9;v5CB´2ar6VqEYoJOޮANrK?Gk|Rcy繓S8J:s RS+M.p{* ]h $ ?K:y>P3MQS.rS^֟[xQ&xUVXYZ՛1g,}LӘ7͘$=I8~5BqmS'4kiU㇨h>vcڎۮ݉/#Htz\.q iL|~.(2fUf $窥~%>w1q]z8)ё1y,ሎh3 _g{^_&d((?N]ڈd8+>˓^a䟎RIU5zwHJh"Bf\ſ.4zf&-?f+4! ̂0+_dܡ׃.BTODE L5XScF .mu#7U|7D,i2T|ȘD9Uw<9 J'\(uؒ7a{ &PUA`L.en/sӒn@6htNhr׉LZعu^Dy ka\xۻP@KީBN@OSuz6rǩp䟸kV5'd&NA< endstream endobj 346 0 obj << /Length 3547 /Filter /FlateDecode >> stream xn6cVD%yln~8R׷XUH5`,5U,~P31⛫/h*Uͮge]4]fR]~ߕe$*j|Tv]y?߾YDtYX@ÓuZ#H)DLQڃ MԅUC]^6TVv0}OvmܠA^.jO0ݡ@$o̅`盫 /b&^if%Pr^qQFVBD/تٷ?iq."9@Zjwp"wGZ,tEYqG9-YXr10d Sƕ͌+fr.G /J!owN+fY~\b$G0R4H1~h-wp6vHV < ؽ'Xbu(U q_ZzyT" 8aYC*|CġJ!}r)dYƲB6DlJ[T{'9杯rW,n -=l9wp7=Jm7<dz~ω BLjˆDagz_BNq(I*ԉŒvoo9ץ#;:řiFCY\$L]OJ ]eiIn%)JCa 'E~-iܗ7GSKs:1W^W]" j5ZT%ph봴"Cvv1 R&2p:8r VEx vy_W^|<ɬxbHA9ryK0L0lS&Go X@=ׂ 81>n0/\"T%+Ȇ #x$KW\!,v\)hީeXVr{80#hq89LwƜRdcAFy'5Anh-{} o/v*w(D"$ |1/XepwVRptY#{tܣ..|Ex0+ǐaXvв_پILEAe֪xGmkwb}c$veE?(@0pR)6tSqM02@09[2ƹ^[$1~)=wK'PXB36%vqL׍?S3ǎڐ::6'qǙuu(ЩCzV p#vo ΈQ'U58yGW kǺ1rJdWz"u JTTҏ`7h2vrޯJxqgiz(Yzv*Ɔ.m?xhH0= #%[0PLCi[61$: `[f +uYZN*9quX('% ]œܣG_`Q,7dS,Ո:U֡Xs͠.jcOsWKӞvEӞDv@ =^yr2˙B66^ՉSp 0/{91&3) /&h3;z:']nwn>tÇ*ɴV'IFyW^[y!{Dk6ԐL%Sj*OktAX"rTԱr]"@#vTi}Qn\ИT~΅=m,o3mnT9K$\E6Ǚ Xn,k濴i̦k{>8DzŽ+c6OnkӞr dU:s6pP JG=ٜ#P.r@`oؼȳ=8dr-|+:rղPK JUH/?uf=VҜ,OϹ}|&9q^U^Lyr/&sc?D endstream endobj 329 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpp62ULS/Rbuild220d320fe682/kernlab/vignettes/kernlab-008.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 354 0 R /BBox [0 0 864 504] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 355 0 R/F2 356 0 R>> /ExtGState << >>/ColorSpace << /sRGB 357 0 R >>>> /Length 1939 /Filter /FlateDecode >> stream xYME ϯ#R)\_W E)d%SC{s,C6~].=ZloΔm%~vK]%U>;=w_m_:s.Ͽly$ {s>}[vwLJnx2_FKZ7i5mwo`電CO8lI&0h.% t#VJI9/2NCa AӄM(s.|XP#ϒo5kP~hHyIQl`>Y^)c8V̇A=ᵥ8rBRx̕R tVtESA)/YUdbP~TZL5GV0]nAyiX*͜aB4QP#ɰ8ȑaq(*H,gV) ']$ 8iyi U 5oә ~bj!V#`؂޲aVɰ,0 D9V3lA,_dXUs[T+NL,ћ8vԪ@V$X|sl uFu 'XTf'jn!h`mR`Xٛ,:(6  ,{3u- T@5tTa7}AJ"}EE#,zR m#l8\h#k!E|3Dq5#,H6}E_;~aQm[w#Z}vp# Ό}d|` b_n?)v]EcJ#-K 'Wy|~w}\~Wu8g^Ғ~,O{?]]Xo?/񼮠wbc?/^0y]X>?/a|ƻس Cٜ U够Ӗ-_o&"+S;·p#Kz{8 txxtbO ï~vaavԉT'ůLW=}<0ސ':r I7kJY֑ö2}yx>5r2XcA,`Aem55 E,?7T_c.hŢ:IPoB|> +;P Ԅ#\}4yUx~(ٻ ޛsAɮբș~yPURy;cF͓-Wr^,4d~(GT/,  I} au/( n}z\5:J׀!2տv-.yyϴC¿4t<ԩʲgA,=ϊY0oF/ ժW&dkukCҿO|@y.VigRܩe4ԧIs,T]b_Ժ58EAjZ b2g6/BګM7 az{ya8/П;l1iv7Iuk}gb/9|7Y"rw'nѵwڇ][n9,*K.8-EڻiR+&+8G3PMS5ƌ7^D?"mᕽMvӕ8ߛƋkxC|Ǘ'pT&#rptt 6<"u1/Oq7 endstream endobj 359 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 366 0 obj << /Length 2794 /Filter /FlateDecode >> stream xڵko~>@,}1w)ТI{Wwh_>Zur߼J+;mQ+qpf8/9MIe˷U8 S$7v`Xr6O.W =oG>ba gq>n|` ~öɚ x@~k/ |]| X8(]f?-R9~V2ڵȌMV4o"7vBF6)|ÃLei+(Zh\0zBz%sL9ؙ3lݲbЍPea"jV<[PvYqC4FtKqs@ԏ{F>Y9nSF I{4 b2g<4f3ys'Pck&@GXW#xHeOJ.d>X|\zjX"#z_b-Y629燥@U<~ufTj~وubچH57_}`ZxŁ9^"f6`>MT  lL [Jo3g0g̴sB3 i4'h`Qs2Re" Ѯod[aԭ)>F΁I؀[ڭp.uFUo)c'[*5݇m!"6;J'HWZx?%4tڲGg81'i(xcCL^X.UU 7Q(3Roy~J  ?@ LbTe \e^PD0,0Aͪ,#vsJ-Wȏip iJZh#2 :|Ļ8Q26@ba.l1Y )9!3fAm}LGb(pC@+c<$W(GrX G&w#ӊzr6xnyiI 俄"m XFE5¼QHP-`@\(SL=HISDww5{#`7a=x%ʇ0݂Ԅ8ISjC2v*ol||.G+cQtYµO$|9NB{w !P|{'uO%|twaJ59qt3Rm$X|]|`fK/[ȎvͶ?(J_Bq8*\ɝHw>Ʃ"2N[7FcvνL۫aބM]&1I qb<ʂy75ꟊ `KYR|G'ȭ'i2\N3e |f!ypaV}},\u bɇ:w]ijҍ 2ٔnEmky|P};!N&MqBx[ Th}$D ~Y=sѓtAqSK p} ,𽬓}3MHo| w^eS+ o}.U|Ǧ4Ġ#JW$.6*cxwSC t@R^djhE9Ha(R5jxU0Dݥ U' 8?'|x |(mwPC;qds gN$̳H`߀"(e8C8u@L|:_Zr 2Ehu0J )Cy;}Y[* #H]yTcGM;q5ΐ|QO)я!C.G'ppvqzÅ֟4;6jӆߥ6ҷ>*CbF_BM \$x*J.UB-ڇqPCTѐCVpe;&+X`,&6g6y?c=*S\()jr WVXrMJ Z5 ʑKCz=uUfh͞=?qɏ||bk;L1ͷd)VB(1䮔{w}\;:W* :wis>Qܥ͒ٸ5EfN~>sVI}_\T$RJ N;RT~gg_+y,k .ebUtHBЈ j'ܐn|:QWNH+ڨUGYƹ:_0A^EM݆0l)v2;z7Saw?( endstream endobj 361 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpp62ULS/Rbuild220d320fe682/kernlab/vignettes/kernlab-ranking.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 373 0 R /BBox [0 0 864 504] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 374 0 R/F2 375 0 R>> /ExtGState << >>/ColorSpace << /sRGB 376 0 R >>>> /Length 2608 /Filter /FlateDecode >> stream xZKϯmj1 -A 鐿+gf׫ A;b_e{ntiiCZ[R{n>۾} {an;y?'^߻N]ݯy{nx3 B6)iqxr{ϧ{JE-{X<װw=Sb+K&)R~WŞvm˞* {p=%wjcyv]1q(܄c7ŏť6j\=:+';V7{S6/{[Rϡ6"ɔcۮyWܡnu V zثW>u&XTUVQ%*=jsU ˎFG][fyQ 6)&،Y"~Ɠ ùqu>7՘dgpD\ @`3zg^ ֏ZߓtQ{Q!a~ HPT Hq ⾃OlJ Oe;1Otes\;?$7#X 3OkZfѓǏ@a$\yƟ#7bi6A+> InAc/vamk{烏>3jX·Ou$KB8rW]8[2>dž]\.gWkˑ}MOӥN?ɞfGqO;mUDCm)r:.f`vET42ǚ4zBa%0.,A6쬯)]bQ@uJA(vyD)Wxmc'b%W5N#*|TŏػZ FaZ1ִI|ol>%+ ׀hTKo`þ5ZvQ A`v#*KjhG2%3kβ0-]ڀ`)_.yv%Z녅j|qWo G!0M⪵݇gJ{xxb_A`ezCqc`XX XE5 l |B~FKpo\!avqŽu0lq0bs?"6&CbߏMGA{hχSKgl>.6Ѱ?+2s^NI endstream endobj 378 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 384 0 obj << /Length 3678 /Filter /FlateDecode >> stream x[[s~4/lB&i:vN;m3}p@KغD$@A;tҌ"8H 1ٟ.ξx[ .U9b`kme&xkLIJKU\U\f|soUE6yøs>Zg,"誊w}#+U^D.ѹ)f:a7r=6x[y>~4$R }΄jҡqpSh | I3frsjƏ9>L{ϣl5vW&n4j@wAF(rPu :( ;ܔ`(.*I9 U^&Hܖޜʾ<TYF|Seߤ$JVє.Pٗ?e9 2UOɞ,S4&U X}OQL^ImWٷg?IB @[ 4\h~vy%cx@亮WWٿvda~I3I =VYi].E^WhP nL/S"$513;>LMQ*M#`Y nd`XksN^@^EjkKDS5, }; w"kЙM%{ź]%ӛ) jl n!"SÊ](cͲi0 =)R5w?Q{g !˛XEn"0p /r4A&x^Q EnQGhW d1&8/aeh^uLՁL턂l&,.6B%}:hl'?|N7755vgW:#::K„>k!uw"0v!}ɪX< !:oL܎!)~IL87+Z-^EĠyLbpCv['16c/xlbo r&,6u>0s#~?1,$le ~۪ܜP'ԚevZ{z19^YAfXv< x>ҵUL.&Dك;!JC#2= T?UjHMF |;J1 u!#*ZCE O[ BW8ax=܍t|qp)R 5.668У6>4Fcn Lӡi}cQ{cT[:[m^֚FGW^&z+xnsbW2a@̨*=yndه}f3= oUD!RL$ɉ1A97b:aS)a 98b-${hlIF xBQ~LGb&⼸,[Czoiu@U$m о"Lys-Yysɛ҈,Y΁J []Vv(%q5]*L :eUEW,Qu3A[M4Dw&wI3>, O1ku C`~̱Ϛc^ Z6Kb-zJ::ʞ3=߆%iaLzUPODۓg|vC]yal<5IY{# S*7Xg3wǙ//:_A6m]:Z| vMz*bL8Yr%HUn.>ui27/!o.l'={)X<k*ײ hbRvo/ K endstream endobj 399 0 obj << /Length 2229 /Filter /FlateDecode >> stream xX[~_a.bx%MMCP>h5wGc{2)ffӧb0Eb[onyvF5J7aXjl[FOӎswZkoq'32vz/'~' '[0 Y{@DBkҕI6]"M!7ZWɪ3vӼOmi( x(Q>=b Q!W?@X&o _ wr𴯨M!/.T\H_"@PE얦\Y4Ԏ^SE,qeQ,pDmeB ?I]/Oۅgjse)1EUSe)aV׮ }O/?R _~tѲu.0˥|tV E/-g=rv%>N`Ts>EO81!៰Yx앲ch ylrY{/ &i8Qez2Pdg, XF,K>(ƻ&>pL[(6zlXivz6e-DnMac~AvXLKfXal)E{_BTpL[T i+2ߺ)aG[k-[o~#(p\wT^n*`k ^a>;lj$b"5C9cˏ;cxg9 gf254g\Uv+@%Yα %$Jb@$ZDSuE3@/}O=hNkքzdg"'Dr}Ǩ7~<3m\xPPT!!=cd\ǔxFʃρC^J#ߓ=ba5@ bfNRLt5H!߷t1#~媛~ۧƄ۵]/ 4i87"M`ҭ?,Ցdq #0CcŨi;sp:K^4ʚOtdf٭>΂ ~_hUğ.v`pEWpӘ)E1)Xp=aK{v?Ji)&׮T9JIn.Φs8$6{@LØ:({-Tq`PSY~s ?74Kk b]y)wxo1qY =;>1!'1J2e w!H!T~&|_CC)=SR T"'VD@%+RV`B=, v:ot w2f,"8w)Ppm41%F`aR;g) ׳> /ExtGState << >>/ColorSpace << /sRGB 406 0 R >>>> /Length 3064 /Filter /FlateDecode >> stream x\M ﯘnՆ@@(Z AIr`ITq~KrlXdOlq}{}CZsr{-۾k߻om?} {a{ן.7y-l.}wf'mxէGrJ rcKڬA<>f5o|e % [+&_>7׽["zr[T0zFQ7ڋJݮ8lyH[JF.ޡpFe QHO{ͷO>^bx[񈭃OZ)n6ziʞ&nŷq+}Ow\1:ױv^ocy)8q]@|eƵ7NBO\4KSM ~2-G#<-OlFkb,nQ,lWE#Mfg&4KlX৭PV`h2y G`e=\>oa= 8H]eXacv'3 ZFٌFFa07$I 2f6w.7r0wG*kj!`YFxP :'B%IHkǓ+ EJB1=׵*݁[Ƨfkh QH<#k^4:x Eq<XD C0 "]Njp&ð;8Tww1]{kvqD*Υܮ8:q]VlH2x2s< 6LDWĨ QFٜjUK 6`=B&'|Mp'I<8 =BWO%?\Pp A18Nő.3!DʵD24" 0z^g,t*P; qdLDT$*PB5zZKIBk/<`%%ߎf_ٺ1cHK6['ĔSΧ1B5-`DrYzfh`r\Iuˢ #cG akLd# m0yE0W~-fⅽ6(V >豄60 i%5jTiJ+ 3(8Ga -`ml]"Ӯ^Xz%A4.zDac=r <OVʥx(E86?Ǭ/NQWZ2Qy#㊋XҴg%^LidpJM>h$8 ˭eщ :Lj->KHfwڋw )[ E\3qlf'5fF/^*b Uu6β+@J,m^q1U&lVYL0]duld 5&#ѻ[4h,ښyxcmr9šϻBdtJax2zY'fJIEE\vl-9Ba)&#`t$C᧽i혩xW1 F w˿6-vO#aՕ.^PSk"Q1,}5;IK]I6<*2 SGCF?iTAe!`+5y. RtR[GQjG {a>#y# Wa2t gCf>;U4 4YyVv0bt1% *Nͬ#ŰilbR;fgk`)ƒ;BMMB.U \ʱ^@喓@=t&!~Y93e֏VQ%1Bo-G-֘gR ]<%W ̍Wp鳋wYM13n+Ȋt$&8ë>ҽ7N!!6!W(9.fȲ((eL)z1t=I:@l<N:dzUVX-fxܡyto&-,/M@t0 N7UQOAwNSKW9|O<0%Q. 8JplCdY'n ;JڝB!;`r#$> òmO;/ӭJ KY,PcI7̤+F'm|>ՄD֏v*58N{Żt|"rq~yQ<28 7Q_m`ۏGb$Cno}=/ _ޟh..,'; =guM3jYy6'/Y> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 415 0 obj << /Length 4754 /Filter /FlateDecode >> stream xi˯<#ƣ#pƚg1NF|twGO19 >:n3Ć9tLADjs+n98-4!l? 35T DKŘO;n[w|k" x< RDCF\Ğ _?yT`K<8X)Q 'G$#FҰEJP<-eJL@,6*;;R}5I-=%jef2nNp7caƐjX=L%pK^[3zn+:Xhpcec1<90 {m$VQŲ"\j'n5)ofXb xeMH&48$]<;+Evr"o>y$$03\ެo- vI?@.C-X ěCp 2-Ic wMXg9h[b *n)6x;If5Rϟ{[Cr 4d{nDg]H9΄"}fl2Y-k>frV+dCPh>oA*ЩMU- +BŃda3 Rg*J|՟vmw˘1K_H`b|6`6-}[!>k{'Kgs˪/ZLU48[ĥH|E2 #?86g ѶV1,=y(6֖̆i4 PC & )|cJkC%9kJ]JRhp 7& Y)&ZM `$+4݊,ڼ/yZ4loɮ}s;\o&/aU v3Į<*չ,mPuRF,ĴhSZ]S}!x0!)]RVRx伈d*2ޅRhc;n\n؟XőqA^1`Mg9*_: AӀ\A"/VItܣFxwT5`)KR֩yn0640E!rkJ:ed!PJT""gW .i%AaH7cwΚs9U]ל L!]eB gЄ3Hs.K[;e 25cݵ!! =$ N VlƀZHpf& V$g"91+ 5(PtIP(yl!D7QW@\V MDp & M(Ixv( .d3'&;S4 k3D1뭮T p,ؽXx ϷzVCkWTQ;FQ?|mx.^#bB8|pH2)g ~M>\K~pBYu {{Oʄ6/^1͵jo7݇ǒlS$Pc&<| RQ9-۟qP$RH;-k{L&#vn).Sw! /ݚ6:uE.v0Bml*:&Kq9_ YP@ze$,z 6`%/D~tcSCUdR<튆8:+PȄFeQ|$x8&/3ǴW z>{99qFB&U1S `?`6D wvxh!U>yz'o ֈ|)oKWQ I l֬Kư" B;:.5[,WKE,@^Hw$vOuTpL|#Rvb| N{`k,*%Bx|[xچ%WAK8usLFS~ 3iEr]wuܧQgrdd֌XȚxp[6B%BTGn:.HȪŸe\3wZb^(Ro ͎ؗ5 w|[ tӦT\Lu]nx]ww&9 'Q-VWփ ңHd =~FP]?^'V 7/9upsQGĹ2>%oIs?x9_t=rm=ɟ ‚W*DTlHߍ;/F>SG7Y}m vRG:zwB.+Ӛ$ X8hJd}[ǮGO,!~h ?Uғm_MYʿPx|dIRO|*6hZiz Obէe!a nTbOBBRYhW9Ni]edF'Y6Z /Jju],Z\nGoL{TS_؏2J:R/NV۔;KtFk0U8#PXX Tإꤟms6ǥwCQRBx1e~, `=ilHL;\Kt񣶘FM+Yp䗺H듦4\t9x8(6ܖWDQgH֣}&jb{Նuu(aM{H=~SOIڈ^ts6„wO0WݴƲ|0K,wB<ƴe8-"c|%@p0msK Ӗفi-/ᣖ3;CBT/_Ri>,ÇڷP趇cgoz^7HpsL{YTg>7~g0et*_y#&k4WW)/؇J& =ɇȭ&H?:Xg+)%OAI\ ~>(*"T3Eh1KvR;vu_^-L;B̔s-wl!WE &]V|;fD¿/S# endstream endobj 302 0 obj << /Type /ObjStm /N 100 /First 881 /Length 2289 /Filter /FlateDecode >> stream xZoF_-sv7 $ q-2 o(Y,YR-)<ڏofIP$&)IFh Qh¬s&x?ǃʹUVhs А3j I hZ h(,4Da,}XL$`9@\gYpP,FCcg-CLE)kZ?"X:2mXك` ux`CM9Eq` vw ^qX\q.F%=w$ -OCX@c[4)JT1ZhG^j),GXd2 NBl8X7ۈa$]l TH:=>WrhH&lT"h -ٖxx28l߂.8Yc0Y#0@e?!tw[ 3h`zlЏ& =4N4ř`}1tbD"8pO<`T0-Í/_3qaB߽okU29j>_~+Vj6l'x-g݌ƟL훺j˗"{ڇ5mсiqiӪؠ;()veoeA.'t\Y&cC/zhą~9{#K#V{=ߤD~S S컨El/f|:,fmj~*Fe".'#GFeLX-ta䯪Nj6HO_-i$qw}_5meu;^Q1m%Sُ%eCp8$yPAz;xժ~knX6-8z|RF8Gv%J|e#^)Z AF*/ևzF)՟T& T}T\_W* T,%&)c:'fLg:=HEVr&uBz#L\'/y9 ˤ{GB#9+S0&! Ȅcy]úPд@ ٥Za"&m@ρ*"oʺ:˛B|w} 80O=9w^6c̼?Lɧr8 h>,^Ua.IV*쮬3(nqqݠr٨nf2柋,Λ"z\|ɚr<*|8jZ!$XbV20*)Ά08_ ~W7t~7MVUq s7b =>%ɜ0dh&],o1ٿCQ>k݆Zs\zh.#RO*zrhw?#u@c{@^+ f='VrY,b 4+r*cvcMZ)gKYF 2$ȽiRY8*n- 2m؋ [=;q6ӹ#-h7kN t6pG^#ߣ2%-H?~眄50CXPP"& {ԣD8A%KfqsaOF‘n)) Nb`Q ֑)H5Q?k tm.mf {: dhdw 1,,l 7=[۾]Ծ?| +7Q~.zAJ*$X8aEtrvٰ@fІ@P%HBd3JL\~=E#GYGsd0r<^kh7AVO \rsWy8q@aw lU{dCX=9If/CK<}2G/=Tݴ|ImgpeRO[=lyVwi<8Q9-r/wD[ !>> ^ΝS0qiN#1;-_[|~#e4^۷_Nؘ$FF!H}~m=g"d8QO ;۴//$z7\wX29tLp}6pFՖe/] endstream endobj 428 0 obj << /Length 662 /Filter /FlateDecode >> stream xڵUK0 WxlRۉfX.?ü<`KwU7US\yB5՚xuhsg_C7bW[;BuڻK4{FWP{Mԯ3%NKd(fLdU1)yy&m=HBdZx[{H1x*\nGg;ǏwwVBi(^ [^&HP Q59m /{~eYk`)59:e3h g]"w]ٵd>v(9cqmo=ɶ.unw } ƥL endstream endobj 409 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpp62ULS/Rbuild220d320fe682/kernlab/vignettes/kernlab-kpca.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 431 0 R /BBox [0 0 504 504] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 432 0 R/F2 433 0 R>> /ExtGState << >>/ColorSpace << /sRGB 434 0 R >>>> /Length 3777 /Filter /FlateDecode >> stream x]ˎ WRZ]$빵0 B"$ے~xXw6Np,-g6sQ`Ur%g2 :qe58id>L_tmd/ b-QvH8 &r`gr".ud-C־ƳډpBFU[ <~NKdr6>A@x4I7]T oϵW]ٗ(\ sB 5g6B&s@.-G1 8 Bw9˜Ԑ}x=3:Nv+a6Ca4@pCe\,>̣ ?Mi$oh0O쯋N*C]S6ęjyq 7Q$;b( uU]>"A챳"W9phg V˹$xs;~Db`h81*P8&8+:Ƅ(0XGË|Rxp<9Z4 e L7hubV>.F)w&׹ɹ,M_1,6zh [aHUAzT`,Rz $ǖMab,גz`i’vt.O#qb4z-odž;T.B s4G&`ՄKc_MƹR#;O[˟fɰt,C^a25[ %"I,azA5Tu_]+440[U {^< IڇO #\rHn]Hn M@)_ZSx閬cmb(bv]j*B H̀mn7_1Nɗ@ӆB(gX8Z 8^ ,VzƎR2TFs<2uG3+"ߎowT$ْ!EvR$mt+QdܩuR-3qÕxqd&aiXҬ}%0#[N "0l|]1~uιe˴ Ze|fB"Tla0%z G$V1T/ |/:2He6E-颢•-iɍ^BO}i,}~ 7%GYUOq wEY-'Ub^QOUNɕD#CY_<gMe'bg$rfujRȦY1KGW ACQt>OsRă*ǸԶ(H8_< nf/C~k;^:mcJ&E1tԋ-Mo@0@;BPKntY#Zɝq8YM+3u?yhSE7ZL9ΡD(Y'XYz@ @eҹ/\hӒO&:beP7Ilf6#ĕGfȕ9 2(0fPזVPP0=E hSJ}ٕÍ,ͷwf8C?`sjLVXTO=О"LVnnϡNlKρw5C,V95C.-TO}ܓ-OzOjo015A<,bo9F aArUzwUta78ՑE N|A}>S|!Ө !Tx^GK $6)Dl͙uTX}If Xx@!ce!p5g_6VGM S &럙%X9˨UX jG, jDe\, |0pJ9Yh,D-筺 !Z'_w\%^?wdEԟ`q]볻03W/e+kx( hUjdz5qGi6@ݬ'J4(,  UUR+oEdoFfXY _F Ln%0g JȈCƙz=1Hn3I>I Z\ MԢ;pwͪ+5{dc"khHZƚQ7Sh]Z( e^l+(󤛗)#ywW#KVOPKĐ=Ytؖ\^t뫶tnl;Q&KD@Lt(M t\j''t}zZR%/FT$ojq&i+>֓,y#gd\u#FV2lgĘ@ X)ͮxP}QO27;p>^>uF-O|WP4dH67yU%|a6ip84Jh9o$ k̇.`{t̳tp7vl2xߋ/D/}ȩ=lmQ5ӟ]N7@"3{O$n>7{Fnw[oW=})"ۧ˻(cUUystywXn. ىMvw8\,i-nfWm3/(aoT,Gr`t`7|7 ۱y0O!w WU? }NU}sty~zn˻˻ ܿ7/yVи įX՛qG垟NU(7%l_{q\C /n_m\45s_?\oNJ,Y endstream endobj 436 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 439 0 obj << /Length 609 /Filter /FlateDecode >> stream xڝTY0~ϯ)6ǎs @bEo>ds#I߾x&K9TUL'|s#؜ ڻyi22fe%:lV[nv2zd =fԯw|+^Jig*!P BP1$;W9& ֣(OZuj= sqZBT=a%^IqsA{,/[#?㿠XݱԖs*ŕ(FZo)X3[Rb )Fn] nT"'(̏šwL]@r nMYqLrs^< x%YƤ2LYz> /ExtGState << >>/ColorSpace << /sRGB 445 0 R >>>> /Length 1119 /Filter /FlateDecode >> stream xYMOGϯ#\ϫ8R"Va$Q~^̲e09MMmwUz΂p;PI!k5e;Lo]x1E1׋⿭B _' gLvv]h;ˇd*UèRb} 'קawnmK%נQ}zu2FUF"[datz1(^P$}ƨ'8,1ݒ-VÁ1qHb@6U8|R\מ4xNFx/v8 fw7x t=N )Ru1S&v]j]2xE4LAҪ*=dСM[C*7QLd͠c-5ϧ4jZtȑ S,9TŦf8zͼ|ҖoL0pLdRyg[m/ {$FlhP$l*s$4K |tǘ#،IwW&I2x"7y1H3l2ƒ5avߦmEB:X涆 bYmFyU&Q( |=cYӓ1[ GTt2g0i mI;о\v. `.\C^@L[{M !ikKvg.l.F ?\t0>vuueH 9EDF[mda4,3Opjշ`~y.~/Z]x ܋A 7>/#qNwf/7mZ}d,/˺gܜVܹod> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 458 0 obj << /Length 4470 /Filter /FlateDecode >> stream xڽ[KϯoR%bAʩrNbOٮ]83H,j>䷧R4aF$FF3MW神TYUbr};Uh*e&뙘6O¿lR,۶܀; kj!iG_]# 'DMr\K5+,Ln\[Ki{˿{)xMا ͽY̤so80O؅Hyp9q5e&h1 N4,4JCv?͵ӯ2os!ʸw!bѰyfe /oYeU!3]iGǻh3I?cJ[7LlÑL r]\&*E1U Ǩ]UA r47WׂcBh|;RY/AS#R_j-rK!p12bv)AL8$UEVQ;n8 :6R)Z:3%A#_ 80F>^-|lr5WZ8q1D H~dI5y{E@c)&&MЉ$@ӻm;_Sn2&ᘠkhmZ@-3F1f32􈪆Yl|Z8v~!n$Nʎm}ndcAlRX1H$j/o,ZLkJp4o5y(*N`0O$nL0bN+l 2qUM"RѥW--hAIMR ɊRzwp`hwHN2z,8x.1LWNE+cM՝^'n[:D"^^:/0 - ڍ+wחkӷO$.1EgPO/4}L )Pe iW$^zs@ޞFTF~b2g#t\:<; FUr⢍;=T} 8H{OeJUQV.YmWc`fɠ?oSPG'"`C LjLQ7ұ}dԠSZ%4;BlDZmd | < f .5O`zp2P:1l`6)/Æ}<0aҖѮQסêsp%`M-´R?,dl0.wYdz]#AazoGiZk YmG"0rArIyU3K9~Gg2Z/|BbJ]t*BF  83lf !v b-4E۝F~^sa^WD7l!XejCgJ夒WyqfKU7.Θo%s>v[QfoD>[Z##T3-m1"jE${2m}8i,8E08$cDIgW^ 1Q&J"6z\9,8)o&\']u<}k2AeZ 6`"ټ)fEI] Y~!NZOS7,4MI`|R_Mjdp6 (d(C@eeH`OH`sUC}~)w&LL}&@2iP,.(D)j̘_&dJlL٣鬌kVT J8pu¾N' ,hsIG2?dޑD*F5L"TIGd,sh ާ6#[ӧʜi~ARtNI ¢~j2X=c= å.'ZpnOh"ݨgb:ECyڑŤ\RYF2OR1bfa% /'*4z ]gF8r4 ἔcs|#!{>|k/9^}B,&B9LL$шU$EjQWkg!Uh[+DWUU\T u28UAIJ_WD]Ya$[6Fܗn$<#(%Aʫj`Y` S:\DGn4*ӷ^3sz$k/MvqB4ā$Ʈz!&\y$EozHsU2v$D0A}Iw1Go![%Ab5#g5墣mxߏKqnw(q VssUǿ4+xbZ3%cj1?PQd2.s–+r?0qF{sy(e0D wHdlΧ_e _VU&6]Dl[q~A# 4vLX̴/a{rpZ*)p viy*:gdA ]./*SNڪj MĐ!OyAٺd$ qkSTNE5ܸ>l8cW\ɩ-7IlϾw$m$fdXY#P$_ax&ʼn\}jCʝp ]H{EQ;7l B"yo,Fg=q0ِ)0ybtFܘL#6&bԠslsm[!8'"e6%{.' m2')xe1߻1 kحkpH &QT ї}f?4ukV<53lmQy1VX2 e!aǹj *Cx; UU#qfVT:Y})*qv c2o9쐕?Ys*Vkb,a[;trSc=/ 1eݪ^byYL!eCǟھE7/ endstream endobj 482 0 obj << /Length 4029 /Filter /FlateDecode >> stream x[[s~. /Yu˲Jr68*` r\Ϫ,qӧA"iclW[DžR;H6nA˦tX؉4yw^Q48LeD'cN"09Ӌd4 0;d(\:}ϜLEBTi!%RW5OOe>h8z!t&KXH fE'xo7G \xbk?2`i I&puʹtd=8z 6>P6NqC(='/sf96 -Q]%N%ҭ}!TB2Jf|]\<ōcOv:9[p66s]M6HẏwVC`[M>708 .9#Z͗V ٚ [A<[YA]K*AV1.(U ;j>v6_G:KTN}ɍ;LS~ hfaD w"qFR` ):utÒj7' Ş7CƋ瓍B%/n֖}a{HlYyR٨`y…/ ^'7Z'/Bg#nzӸbN Jf4ECP8G(Hd}-E?n Սלkݠ@V,b(ڒ[74żHm(hSF+aj 38)iNU+xEX8V;ѠEDAn. `8o F;ӑ.ј]/O3׃+BQ&[v5/o/MU2Gi a鏖vL EZ9zCm}hYce]!yXŜ_5959Hڑlki.}|x,}!c|&&UUF8yHq+v{sB0ؚKpƺaIy}~ q[`#!@IbaXQ&7Dvnا~}>R(w//;1c8j8j%7&Wi!&!{lZAC!Px砿vy)qN6{yAh] _o+糄Z~O%.y8,OS q>7R?د#!Y_,8x2=R8؃SRGAKυBcǙz(!^uZ}LÓ6+ ${9\6<_@  qNJ5 ]Ő(g1!顫 e5^e-},e!Y}A,Huҽ( m^otW~D{\t*-=_YXdP Nr#9 =?ZJMWm7".4>z>-[Մ4R rŵoۏ!6y\];/&+WrN9rаFobO*?&9"WIJ$o.ܠN;0]o8ΥȘNBv@I%W߇r]ϛl4l _3/#$;<ᦐwڋ`l󿸏 @q:#5*s=A'J05u1Ŕ 7fA9Y//QhQEÁ*oE"5kD訕/{x7rE_I"}%&.]]|4xhz'^]I ^o]ܿ8'P*`<{?`p[ +. ΞDj0}eL3D`lnώAA;)7\9mrKr'Sb/?rN'?u endstream endobj 507 0 obj << /Length 4070 /Filter /FlateDecode >> stream x[[o8~abU$u-0$N;i EƊƶ6bQb "):*DTR_f4>|6UMf\Pֱ~3ľ@{C4HP qu& aY=#/u.+aǤx Rzp\#b|繼3sHV`EGw:1G2lP:0iڰWQEC(PӜ {O"(OeѥTdcWo ˃4 .qF<v}PG|Ѭ$G^-!/t)+ n'>u_8ӔZŘ 9'`)Ħ3iD9d<"gP1P9HiNnaN0* &I#i) ͋V%["id<_Z1rg LۂL bڥp˴PL Ӣq)l#^#cפw #ǂ$^F[pV;kj#/G^`55^bT= \h"YFSmxT_fYM8fسUG%We<m0y6LRYVL'~6?;[Uk.,~o4|0q[ņ0^cUAӸIүkϷF0?K8ULTRL;;η^ _<%cmJp<Ӯ _3O x2vv gY^?P [& S b %¶BH`7oyQnEI'<^nHh*jƒa8bTxP)oQ֥ZYy%;@ wڗK*qPmO.CbHd\mebf nSTdx5, HTl$|P@5t$ Zqp- S*/$?2T1 ƏD8YeVz*hl֙qr'w3`~%2{nzN[R2E9S:UQ%Cz:"GC )u'l*pgpǬ1cĀpCx.~]rpGb+gX &4A)G &$<d1Q,y_,*{sg>cKԮ;*"[u$鉺'b_{5nh >:eczoz$֞@}P+ 2sK~B땐2n4T[5(J |2[?D8ر?d2A-#߬ 7j͡j[K \;&%[;6vF\7~xBN1<`l 0RmCXprʠ&[byШ]N6f|{|%i/[=z>t7<@ZO3ZqxX!2dp1J=ȞƭM:@F/pÔ<E:ǐK1׌l|ך˜{)<_br˄ .ofX(F+]RNE9V;75*Z2i*Wы W>P; %9`= ^-!Ic# p bܗJB'e˴ Q @"6 s1] wLBkDm.YDKېI2̜KIvǒ;WQR֓5Tod`kF$͏ڸPNh R8Iv1}R x/5;". >aQ~৅MWqST`k<٣Z_xo0`XSiqPԝx6CMŽ{[,fn$3+ED)S?~M 8G֬#)M:m/ јr<@쇝 wMxFuw C : >1$^B;Ft~ؑ~ӡF!lNj qL6\pXPhuKG_:ɜ3ъJ^[xt#[&:<e){tr~v5eICI~մs(sPecuNCH=)r{ v`δ%gz9ar_aCOtqN(a+͑  2^9c &+!!U# c/|D>K'Dy| =1I=~j"2Zu /X}|6#+$N0߯m=j+͐,(0/6+FbBA؂S\!Օ>V]i?$֫Q"^ݥ݁53dr+mw@jkL:A3E_Bİf] f{|NS̳z"C59g%бb)q+/nNsIDzh-zju(i N4<'^]2IQJ+ 6AT\o2.ʱرnc!u% /<ܱvH{Gd;v|,׮@V@Ȳ={Eݟ1A8#Tf43Mc'M5c2s2YGX+gx9oۜ\?ZG=C_DrUk'ƅF%{DҜCR_u>ON'd1vѹw|;+02J]9 od2Ǯ}w=!gy#6m|U+pq. 5]5a(xfOI.+㋭9^JG@ ?rcfiǔ _WU𪪥tF_Xw^`͹XWtlz?i-M$U_; yZD6V3˵9ԞīߌLM2·yvc> stream x[[oF~"F_7M& tmE۴)R%);C~~e)5vh:7~fh#C"/C$Q"ZHo%eJ'*R'SqhRN2hM=I$:)Qaa!MDjccT"щcWD|Oy"Av%<?,Y҄DiZD"Q΄ " tZ#1ѠN+@Idh=#C@o~`@h" "4NшL9$@|<$F(؂bK5 UVݐ ػ,DjO)8 1)=b&=4,aSh޹wo$/HDr_[G&jV/w3nN Q ,$QbV1V́k=.w/qzCHKK;0y̗@{O6</9̠ł,ЃହzW‚ … !K^$Ä?_wbLsk[Xлͭ`B$c,d#F ( Hc.|? +⮌1nXY 겿;$"HxddH Qi\4im^A0_B5Z°LFu7,j:%=)j?iY'֖Q=a[Q|) 4JJRrQyt=>X܈)QnEJ;j%wFGtҷa}:>CCn0OD&vv$εUcE8lX֨!fgq]wX.gl?qh>ЉlęO&OzѤ.ӯMUS,"΀P I WOLQZn L!X+Yg4MSIǎb&aN{|?zӿ;]7%"gU7cxƺK6;+Ũ쮍TLQii]);,XQF% zLH6mɟC̤D;2 ǡ\`J`4bpеLyN FoS?4ozVveK<yH$a 7{q+5WYbAk88m#Y=nC#FdB㭅/Qp0 n2+{/-:gU7n2l~wO4;-&-q}4H"3Z/ctA-N#ϡ;:=&]ڵq_H Q9A@8]ۡ?TqƯCjaa>!V n?Rܤ9G Z VT:3b ҥ$v{cPZ@3rutl!P}U)@5mfcd>(%L 9k{hЮb;3]w>=-S =4te 3Z h񧏟)!p{'v[;"/L:M9Jv3/y$HJ|>1T Xg U\!b$HGEY]lsNc>(uk8$2ƹۊZ@_!COBrj=kj[ 襑GtrրiI5u_aW)/YN_3iW]rtp/ƽp<ܠb;߼DYkoGۭ*STOf%xQ:n@)}cY^`VPwM%Cύ}diY`Eud9m T碼D}ɛi{boxQ=~s˳ӽ&MrlC+i :$BB[ З>t4w2zGSU.*9գEԔb|^Ǜ:SDA-UM^w6)q{QI.yWL!ӥ[R eXL1銑lC^S):v:뺼sl 2/|cVV !6tP;_7%$ endstream endobj 524 0 obj << /Length 1679 /Filter /FlateDecode >> stream xڵXmo6_a ԌHom͒4: 8AVnyٰhx${5jŭ{/{Y+WyjV-ʭk/ZDƴ:(mr@27mEE[G) yV4fY2P#~;%u7DMRh4&[j23`66h0*xK>[p7hW;-il/uh(yzP[0Rb]ًŪuw,z De|hg6.h簍4*aq;V0駝Jr06X$e6XlP XM}1c c|U[:>J،Lh|`>yKyuYQыHA7$UlZE. 39Cɥ .h}ϝ ,˓`D_y]nX$wB$VA^e~6X5@8Nr.=e;x?20rub_#\gy} +[1asasnF6u B2QBNZ&cv<* EC$ wDO+?j9lT26K)N2 5nT7} !-T ee4mpPUˎXy('*3A&\sPEpbk%M!;,Tƶ #T ,e;S.l)'W?N4`ȯ'& \!Nqp:S.$`Gۂ7ǶƛΙw7to ߲WPNvQ`JΐCe)_8$VlGD,v耰07ʼځ"-ԏԟ }͛/(YHb~v|EczE7YUR}sJn_SbNTnn0HVRc:Lw'SoVW_1uqKg7g9vQ,-_P,_ Gok=rpy]Չ49oh]yRb}[9&RauSpI9]t ܅ .H!7Fǔ;+<(JCB z {\n:(׻ BآJ{@S* ]DuNJ6bPDǸiJec΅PĶ>#mX ԫCUCh endstream endobj 558 0 obj << /Length1 1797 /Length2 11244 /Length3 0 /Length 12373 /Filter /FlateDecode >> stream xڍT[]ݭwwwKqwPKqhqRw+r}y[ޕof3; &9H] "`gbegDA@it@.`GHn2i۫@p r 8E [TX Wd)G'oky[08\- 5tܼ '+ޕZ vh\A. KAUƊLв%trx TP:2Vˀ8X98;haht;X@UVˍtqu|]!@W?3d%. Q"n^wYRG~`{uv",ݝش M^EȬAnvvv>^aýO }VEVd_W ` pxw{ _{?OƯeeSӑa$%,Nvkxvƿ*8X9u'c`w] F<oϭwBȟj?5 i^@u .诡tXou $!D, dvWk1cHǥ``g_{8\_Oun; _xh lnK]8O>>9lMiC6k?z`~6! /| i/|iy%o'׫W_ݿ5ȿ5u~ub jzK7N\yy ǿx0kf klb*}@.%_`z9}? y,-l:$HlJc߈?7!D~NQxe1ׁ4?8Uz$ EK|O'Gٝ]W {xP~}e׎3 )2FH BȈ}1s;]0\1 8;g7-N^"j"B2Z_tEEM|+a_)QU)PxZ@t,h}i*RjF*MHTdzPL]}o{O{0d%5P[h1 ք׉ag pTm[9Wn7*~zY 4:|l#݃ 7#?1,{{a'8ц%nRvU[Vem=y=A~ kCtaFCZ>~zD,Y?g]߱ת42Y˵% .B@OvTe]6c.# s)(70fAQUgރ(ET6#rO2{̟ y届<ثT30iA?QWHϿ9(M2Q@4{:I~jsO0]NB&y0H?UTzs<9}eO*1AHv5(!H)lSO^ۯC/'!b' b`'ى yJĭ)1UP,ɺFn_wNA>@3 >A\nAB)w+蠮Ce(xʘr&'3% N+_q2~7-aύ;굇VX^ex^T\J4x*:Kw5[)MB]FdS啢Q|.8>Pcs҅{ia'~b2/U?ء}rz!O8GQQANeJ\J̌x}ŏd)ڻ dKӗ*yʁHPёqaփ (jY< y2zl% vi7369~Wϑ1}bt׺Vkx0]"FpSC̉6Ou~BƊQGj)<+]cxra>;uCHC=J7#(N@bGl=1cC.g 1v۾r D뙢~ hD_՜L\К C1Gzn}P{Ps7,|m4=@!~XQH1_Wdwg4x}hҜIRwf^Q-tF?|;VDYƂJy4sv$ A7<^t+_n)&P|$&0ֱ`iLBJ0䮘O2obٔB?wF|ԏ\TM?w4)9 j߄( doqaȾg3m1T'Q_ [*{&tTgb +e[C-*i,Ұj6Lbİ9CO\^=+d/6H ;7䛄? aH&ǝuF<"|W|d>Un#0vU5ͣ􀷀Q^9[eSUY6f~|!jxqVϊj;DDzf_zҼIZBR,CwÓĥQp^6K"Z0'sBkzqx1" c. @% ֡/oS =.rs'R0xNsR#Ա;*n|Lv_N\l($>Qҕj yqH8Rj{:1!5ZV|Q@- ܸMRV hY0_c=ع]1zÄL#/ٿ9ƿG=ZW0!^K-QI*k"\3坐.r8L}{,nަ-X%WwK$gB`;qdXFAiQ=OEdrhwV/LRi-"p)7KbОe4~' .Ǘ/:;sͭqT2:N85Ji̧B Th|$k]E2xQ-&=K#m' MtpNc7`:hur%$Pf<_X*/78: !B̬̚QK*߀2i U:hwZjVAHJү)g~SS IJ&mJy$`GrzsÙ1_dg]6ƖՊ%'Fl|>ıh)kFxpi2j}Pt1y #Qã}`l32$DV3b{_Ïթ@Fdp=C4ֽ"7Zu_lTmV[֧MT.yg8M˼߾qÐ{4:'H/!?|q />:Hπ:55EIMzT6q >iERsMLZ-a;$ o{W꽭)=ؙm&*0w3KmZY2GhcsjΈ T`N>=@2~qʞvG$\gr4M[x[wM0il h/ ip)UshpsT, XV{njBnK:r  >?S}ˑ^rKǼG58bY/Ӟ9j#[<{<TN wZ)2E$QAqˠ,v>5 m5+(|KɷwI%ce$/a?VAmFAtKAl5@߭Dv{'ݞ>r@GP.P#\扜&:175r:4h-賖2'V kq"Kl(v"E%MїuSS {GuoMF2 c)J΂׀qҶɷ줶9\,vS ,$fvɅYjM|")7'.:ѓ.& $?mZ9ITlj8 jV_aKlM=vXSYAC@){-Hm'ņ~(b{/{a%8hS3JӘ7ݟIte ">A Ty'uNS\/rrf^_ P:ws'DbjCPALV3i2>8R&(5'_ $>OzHn5Dv> = [ 8p5ƙ9)%.xA BN!~-!d{_ɴc3œHIQD8?=/"Ԃ OV$qE<g:Kڗ GV_:O` Bݚ\F٢C*l~\TU5+eJӢae1c2r{`ģF45E5l+m=+ͫu O?Ҵ){݅mŦ>uG:%X(Fb7zsT+j˂[~ 4p$zgѲ^{C,MՍϚ h Fa+N0\G)QV"bI {6$Qs8#"V'{m†;dE PCI^$2ugKJ4Ts BO*g-8|hnѿde+A3dxɐo^>{1f{ҋp28S6,F6֥ľ/l4L"(B(Ŏ* zgֽ+2;IA[.)mS\fsi1qNn." Q;]j1#imB *S#f*G/оo0mW=~g&_;KD%j6r^HQIjeH/ nluD|(al`0y}в./ % ГRSC~4\U|s ٹ1`Qk "鄦_mu}r Ɲ3$k p+3W^O*px7kw'2naCo}b3kcB3CтCNyN51ˉ[e=jă⌱71Od,*Z-# y`kXZrv MGfܷy%|lMn:^Y3Y,o?PO_U`(R^cky_!OK8gYԄX޾˸V<|Dyƨr93yԂj8kEQ W[;dh|*^NŹ-f&KWN(GqU# zd4,]'|v1x/ZO=%7_l,x9ؐYe_\5F<{D._=2ZwCX+}.ô\8\nЕbu<+oK_m49(lO[},1aD#~{};T1aӯ4z7 6qh Uً'32.9@uQ<'ۅ^' mHJ|% FPHv\` }C4C2=1;4O&oDoz66Dk; _<UöE`>ȯ5}onE S[`^vuԒ+dKI+4 U`Rx,-1 jeINZMވ:lpOCUŦӛh7+]J vҺetH叐xҁ!Mcs UX>ϢטI2(oS/xm28 &PZ@>qVX4!%eR4mOaukw"g. $RaӘ"p)_Rkgiն4~7s xoXo:=5N2Ѕ CH_)j;Nt? i2eCZ|.4s֝y`RcHKX[3e"YH]/dIP<#u3hP x0?y()qu7kM,]w@)\hnf.& v;_fYu+ H0IMy%,1 $;@9N";v~.`aEIq1hܯϷtLsf ["n5?󱑽,K1!lLN}H? r^&ɢKzp%K+L2# 2 +EIW {G Ewk>XB9Q]̟]]gu惘bl[xH?CaK WH7Vxzxhxźnul[[!oHdNiPm Fnn缝28H>@o8]2ߦGh*}ОREFG6-67-Qdk=.0jeoSR;XF+o& vF>S3Cm MxgqiB\7㭗tKǺ.lUݼNnKRڈP e4r G Ss611;@|z=äUֺ E潵5Gz%}ŏcFg%5r7 riЊgcoj< re6 SgMZwZq|WeUUZ.cyrSTP=z4t,.v>;<`ٚңk@ '=JJ5;VUӖ7hy=Ga?ƨy'#gϱ*ʵd@~qP00_`5FVxV3܈k5n L(*ϝ;B8עG}% V>7˄eԮS>QuQH>* ֒ʶ#W"Yxd/h½UgnQٳ Zʺ4ẛb~2w560 QN$Q.E"&$rEoS׊Z6T8-(f7ߝho}} z*{J$&ŖI?YW\nλ: , ,ʁ)U4J&hq{ث;*~E;|1|_)-F39.ўbg|t aT4# kNM Sh%%\c#y2]ӢAϑz[ xqzxhgᕤƨفLgP- UF2 ;'|u.1_Ts*Js^ FDuTɝ眓^ݞ So^hR/u-٣ V%IVouC\:pttdKެ5>52Ntݯܵ j-}-8DfY,RjgBfL5%[B#7WA!)G *u2ͭHPϹೣReZ2x.I9CzWc 92`6L3kTu_&:+wIѻKA4?+ l˾-K6 Vz)X a%ukթf \δdN!wX2B5XU+܅29^ @4i{E.~󰲻p;[dc$jR#$Q%hCx2Gk2ps$Bn\8s_\Q#A>h֙~k\PߥRd#0Tiԃ *}3}B% 0}2_ \`NN2,H $zZ'M5IDqa!뚢x wub$tʤadYW'9ձӪiaC,#|tIfE҂WvӜ X7Os|r=@EqD%noe0CϞ z}4k|mo!hb^Pbv'\N+.m[63:Ӱ]sE~1cgT4X%)9%7 n{SWTm(k,>ڦw gՓscq6~WH vhr(Cߥ,)ѓ8ܟ=ߦr&m-~:&)PT9yofq[ȸsZ2ԴL*OLDC7iPMwÜ-uC0%gNQCL plKH+A߃y33ը1=#4Ph=Z%Ou%[s!'rmoio\ȡW۔t] }D|6 ;̊@Luj&e> 7Bc-г(Y*.zSm1gHE,{A~qlE,PԀJpRt@8`W{/rJ3Ż)k$`-UiEŹMF% !R\[uX]PZdl5Fe޼@Ԕ:ynN-ҖNJuj?"B2# /u1ۙo(n Ous9sa˸+I7 ߰kT$5Vtl$/Vfe9Zg8NMpR\N襤G K%+OE `xC''b}Wr"W -_zBsOP4o'mtr_V^>Y(GK#/^PE+tz-(X!YO +r8]"E* .$y{?qiY[2|\8S_<Y#+#bk6LZa^4l9ZP[\Ћ5e Y}s]v`z%BŪfΆ㜴"5"qƳENX_#Kʭ̶6ʬdseI#2iUđIq+a-Fת ^[L(||h.0]m$a/^͛YȢaӶb~-Æ"ڠaץ/$m?@r95Gy1c֧jz?Wlz{Ǐ? `*2B]Dn g~Zj~6DӟH a;:@ @^#=i8cᓧ6yHq닔⤋5K!@8۵tBaT3yNt]\F܃o[y&Őhi6:8I$VZucp*ke]g%vg_XrMWo3qqm't|G0%&(Gw޴+QJ W6|(H!ퟓd*b@ycʆ'P|'e] }c B p(& `Gh\Qv_5[ ã@iiQZWnjJ@YyoU` TXK&#p.m~),M| 6A/fstf"XpTpDUP;TS,ݗe >&e]q4Y>(E$iv:G]ZtZ̢o%#. TOhw|~J4ڙئ;C'WbW*t}Hx2@qB&Z]=Ҫ+V3FPȕÅseլ5`4#Rhύ^iR/FS3#vpnt~Eۢ"qa@kh0@:f-%*w%_%x3rUE&BM0$)x.4Ў*-ãPr%uzH#Gk;(w'gnʲeOܥmaܭ i R&UwCIݓr:S)z ( ETÍge?Mrp@Nk(,g/5smQo r - endstream endobj 560 0 obj << /Length1 1671 /Length2 10873 /Length3 0 /Length 11960 /Filter /FlateDecode >> stream xڍP- w`Kpn 4ָk@pw 3sUUWuj*2u&1s)PlPacr0#RQi\lN =4$&.˫ j `qYYvHJy=Jry#֌9@23(X^#f rqqgaqwwg6sf;Y 1A.V53 h]2@WïT +uJ؂̀ί&@'kt"Oe?]owYv l`b XlwҊ..{ߊ&W{7T&UřdFn^Y\lgwqF$ hz,5nd7]=('+g tpp?l|2  "d0ZJ-įwyX_Ǐ ub ei-]UJ[(.x3qqعXllܜ׃Q1ǿl-?} t ~\ Agb5{b?LoFҮiTM@iN(_wU8er.& foi5A@s՟'{lA@3`bcevټ>ί3<R l{_;ldWf{]GsS `a^Xw+?|Ы56˟7`ī3 ?ȿ N> _g7b3_1^_.X_w ^v| dUwWjt5)cM^;ǻzfֵmwbLB3TکtLދN\P2?;݈%ulK^.>yÅ~MTm}e6ۊ0?Qx,VG@Ĥ!hC*ѕU%νWƣlyܮ^үih(TyY.Lh߯of0s&^H}O9u7cgV+4؝; ( t\cNQ{|Ǜ..4;ܛ0E@\$؁Eٙnde6 !*H*1"?Q'@3|"]{뜐MM\1FP+ANl.=I d VkѽMĐK3Wyn>(C [) 9zO0yyvAɧ!w0[.$iiqu>BvBcƄQ:q_/l55_蹗C5C7.&*i_Fv6JNmw;@ 0.nQ!dI/f$e-[/ 48d8CS C _x#dvk#N>/SpI:/}2X;>*wG+6HH1"q 'ݖP{k?NSUwP??VPӔ!75P ?@Zm%g5hgveH=%[=@M(w'(̒67E8h?SO&s>gOY蠂aMJ 0ŗ\fJJIEwBu11<yߚї ;zetS:PeFՉGM(6:87Ὕry<C)8 Z>M8uDu-O=F 'Gh}'bdf.Uh'KGQɒ1Qu I':F os=Xo(ޔ.n,!K#0; ܘ\eu/C`cyTGv~8U0`L;jqL+fߊ"C2/hf[6H.!dF1:ӨK_'>ԊN7l A;-^Ҝ}^m5썁kmpB8S#34WUF/Cb?1RE*Ru#w hxK +>P◞G}#E+3N{'h>HDxi(;}R`#"Q[lU@EY鲝=([Sn#NzdiJ5j v<+$IEh<߲f48>+,=ٛᖠ:d*2*, E_CRJȻ=7@PEu `(`[DxYǘ q^෥֟`HdMİ8X`ʅ9,Tf=KOS hUhW6}(9EgEAwa}[N|b=n eðU; %˼P؟=#=$s?th$. s"~Ɠ$D[[: H͸x鄘+ZG ٘e[7q%ZlV4$Qg桊7 MQ7ۊ?\l }wdPȥӎNuЫC}V<'\vU1m:8f s!q1g,I|Ę}p!Ty7i62^TcR8L(T,BTo4S gǏ3e1a5!bL3\كrQ*qF&Idlt?$a,{1_J~4/VKÓj.7FPeFRSN\\gTF4ȈFI✱ܥ^.*OkG#}RE@9D$,FOjKhC>]ŀqF H[j+Ob҂q!RU3g$BTGI>syhҾIƩ2wv5Ns~ztUpyO2,AThċ_\P S!}ݕq0X &* XMKStcD# P><{y~{y",_+aMlujځ~/BAdm{%nbܞ]/Kߓ"䂻Kwd<5x.϶;õ$\ ͩX_> cu65Q4f|L,TC$Ӄۺ&zE+QaTy1vLj)h+^PlzN_ѡ3sѹ] ҺitT0Tb‡QJEI/i3W,*u ;O+Mt3o*,ɧ)x[K8K40*dfO%q!?Q@vjiÑ#1R] ob›\|1,|&I&~ykb|6-ZO (t*Il-ZCdsY |ٗ^gAuqڥ_`v\W*Gcu.{VdH>sJ/8(a;Em[.'y: R_t"css|mYUj7eg1S:--($hFj^𺣮tހkxm} X:^^D8UQH9 !oMd Cf]G:qY-(a!oTx)FHb47ˉ546!iHI)/Sw_3NՒT5 1c΂KuORꚆP㑱`+ r.QHGD7sZbI,1[4䲆DT1 y0$Y%8H/8.gX!4(VKֲ_-)cFΛo-}X1@|4>џ)!0.:!R8e)U˷(~ K2 /sD51;n__DO|t{ӧ ] F9N]Њ鹉L,j)%7%b[z)dbe9|Lf4 / <4vHaR=tH)Ʈ+q-]*Py"2' JM .xW6; G'Eqٕ盅ڍD_EVxG jlsgUKB\FYr b4A[4,UWۡyV%z\y}8 a[oO I-~QuS X[>X*cB!@VZgEww˔檙Yæd{4eOzUH0 h3 Ohf])|T%5MPs}WwVݧ2/V l"8;G uqRǎr|ǝ?kr&gc>Nx%l5+L?)q+w[`\;b~*Lt_fYuI!bL1df]P3yQGv \/Wk؛q(4.5!؎)sŚ#| $(KƩCP5źN_ FvX8)oyHŔ1}[%m)l9=j%O`z V{wIb:} zihR}byUg%#Fc3oxFH ؤk6q1E!iu(l% OtW|6S-doǘH8|j]9}icLH#L)%ZmM1SrSg?kW"їꟑ8UDrFsӷXhYA^L"S٪"1e2À˞ ʀ_^sW.I=G>}s_9YwSWBow aU}p.q-RJwx&lRSp <0T=t讱fC"ӈm 9.È(ߖ@[#mh nP+Ul ffXkF-Д ,JGS}-䵗|YdGʽ&hpw> 57Tɓz.ٷh49!=56siԎ},ĸ/y;8Exu o*}`0CDq*Hڠ[$q/iW̐tڼjaqlw]J| )@,忆p—^X|\.*aњ w յ1=q ד%y\C/dp/5<̹NZHOt\#ihP=ۢ$O%0!:_Q濍D,IV`-&_3,_9`620 Σ'ø9 һ݈Fç̼YoGӏi5#}7[mc}G!\bZ"-w:D  o)[F)RM|ʺ#gWApI̪k̕fV,Kh"D6K.JNN+os)մoT5iL I0)y%rڡüpwr aoWOqlɝvNzS}ߝ# ܭZ-874P+L, a?I+̔dFp/%d{k'H4`/dNtctYU|(.B꜒-8Ĉ~- boP^e~H),lH!t/$PcG x"6p'S/)fV`=%qgۆ/-04:5,2h>G6yV_洹@릁ywp55FEK:n_&zD oE\2z]+cp&K!ùW`މftInSGjI  FOPu?z4m45 $](b4f}v)㬷#Ht♘ք '(脦Ban>%"1ZfB)s)b]Vx5wJiqR UnSG=6~Y|!!bk |wՃ N-4p&y[+ 'ou =FYɉߓ$n+R!v ;dYa&j>7 ,,)un2O Exs'Ĵ  n= f!C@R: r}Z~]r- k*F - }r|,L4AWFY"2x{#86)kG6v}KUmCǽB%ϧY"BF\0T[ۧS ?}Sr?f&cr MjIqny N601pNas‰i*u>y7-@Eg! 3ndYr,MT4lDo-Xj/+ˡ[IATfJsQ+BO|89mc.r3k2Qnwg_f }E]}9;i]IH=輵ƻ 5+" b :Kk7)NwX.@+RAQ"~ЅlG,YUCgy5"ej IN!ml./R<c97#M_YRbboT׿%sλ Io3W{P9qɜi%%53/ ^ #@U,貫B,4%_:ϪЕv.CMF7NtgAMdM36m9WhgһxV(7%OTSCvd$b0t{u3aCL>3?}]^tj[r/15^-hrGS2ºezMIq0`)zcn kN^Cu/ C\uo O䈣-$WP ~f}e+ȗ=YSNP 1REѕd\ɤz܏b7NYqewv$8b;Kr,n#v k3(zhk҈l!2cs{ãT!Sm=MTҷXsUĞIPi$4O())r,7CΝ64U7P[77/n?)ּo >hK+bC:Av7xMVV:;W:#Е#^9dʜfmhַ~wlXVkϣYoZ}fA7+]k _c?W螃z'+Iwx2JK\OG1;L&ͮнcG _&> I%|; >$:΅N 2]ժTݩaj\GU'lsW7uR-,Պ*N6:,oN#NYֱL>W2RuY*w!~ǚjeiH;o,W+ɓs( F# ?}\9?hLmxQ$UΠsX$|b2)9]gYKĐ J*%GWSۡ:`Sim$:.k&#D${{C{*qym *Cg(c?p?K endstream endobj 562 0 obj << /Length1 2133 /Length2 15118 /Length3 0 /Length 16396 /Filter /FlateDecode >> stream xڍPZ Npиwh5n!8AΝ꽢 zm_}NCA lbkqb`adʉhY((TANV(ԁ [X: ebNr6ig+ C[  :"Pڹ;̝#ژE/w5dlh3t2Zg46NɎՕڑL r2(.@_ScDVؚ::+1xP(m6ۀXYdc0Y NnNC m ] AVF* !0|go~ ;'GFG_ fqQ[kk#_}wgZغxlLLalǤfwJ]Gftp033sqr9_ TR%~igk0} x:N@o*!L@N#Ow1o~ 76' 3rc#fҒWSU7*EDl lV 7q A-r]=^ƒ}\ Ϡ0s0bVawbW#C$̀ [ᤑ~qϣF'g8i졩#oD B{^/^m=Ο>(cܻJ5|+9W)\>_2OgK @G~2s;3F,G}V䩵V؋GKy>>C)r,YZ#QlJ"Ѕbh^G|ljk h9LH6o+n\GlL@!KEдxA7Ԃ0KxC߸}f|Y< ݐO!/$mವUNdk UY E@XmHx|x>2\{B\WuZ6)D[H@-pt sth]y4_*yj0GE_ws52u22qlBbGtCB|lwOnsu^Le2:|V&N3zQ!yGVSL5RR~}TA) jȇ1ۓLnw@2MJz Һ.\ե;jX71tnncHTbINXIkA:omQu+:_T8j`$y'1̠b/ ܇: n\]x[O͏O`:mb7 wZn{#hOL,^DXs8T\j(~ܬ|5E_3/ 1P<(,^KHjN݅cO$P=~DCSW1 &PテCō,ÀD75 zk{bsK*;&Uq:.8=i[Q (x=^ .؅\W_z=6D>dDŽc/]$IS4VmtEw f&n{V<_FnZe۟/keinupSaR80#еAiMНex$YԼFK nOWq<+ݘ+| rg0EJ^kH`Eg+E|Į(jn $ФRd}quON4 -1kQٙP-踋|' +){zl[a*?Y5PHVS< JA+;dAhIyyhrl}D̠IYN#dqV"s9ۨ빣,^isi:kIbBi'N~Km.QvAz9j?j- iv~uUy}Tt5l"qCR"Г]g?TO%I5,9o;s& " %qQXx,k,WMʕ-=yzq+d퓻 2+ŀc?X!![ޏ$$<̂N=o}z@N[XBQ*r߸qlq\ \# s΅,kg$.X_ie~6شUk>,2D2!MYKBsb@_v-doU?!i"a<\ʰ b)˾xUޒArD}%V0Pb<{BC#,cV oTahu9D^:#)WGZZa Sm>CZ `HyyrCuôj4գX14^UFe9x/RP#L>BVtv*kze*1T"X6h:9o8A-r{Ӆ]̔^xtp-X$;|䠊ֽmUh7 \P#8k42)@I a &s3qU2+Sm$J4 쁿q 37TQ]DΧL='6{{݇-AZ&8Qz[*=#h³y# SoAΨ[rm3\u}"sn{k1!c)n}t4(/9&2g ,Iѕ@f\#h/+&t#䋜xMqւ31).] pX( zW'Aϛ!U8ađ"V ɞU2g;2[n Xb;h [ 5ã\Ռe;";R"nCR\uJ"74JT$HKgHTMg_*.Bͮd/&ocg!>:9]KCBUɜl8zuHqͳ3ްejrxM, gvٱQ<&;:^RX{=Ł2,iz$z]a2 7t볼%g&#TįGnN>)>O O剑qPǐ_[8 >:`ʳ4<*Ҳkzك㉚c?̪NLhA c=*2CH+bP poSm뽑BwtjrCʣ.חQD@o/s6p>"yj~V`'*m['q~װ{sAyy媤M.oߊ@׶%xcmCa!g#n*dX,ITUCͧ{`҂,pBo8~XgTKŒ|)LF;ʍ![6$P;yܘ‰WQUY)Oiz%R^7AtoeM<&8*Uj7 KODG1|CĹ(3ڤn̘`h;/i7ZQČAH'ƾS5k7MR thWz\GgK }+-㝂^h:&+LH;\nyL , ia8&~:ŕNʽrĎ}[esx5#yXPMLȈ#%S%"}]EgY4yP\*;iO1H74r{ھ%TeUU+߸Hw/q=VX{ETVGZBa8V.Yy޷y^7UVv/MxNpl&e #oʢXT+$ht9]MLDuW\VQN%E>RA`E.Ua5Fn[è_ZR7eqZzx Dc|4R$ :醋 w7/2m;o@6SZKpTe椸^>6j r_M|+gB$k },ɚgH>wvݴK_->Ha&̛6Mqo̓=zAR ȶ4?^!m>Y um[{] Fuw-0b3R*'ߒ"PEʫ( !ga%T dV9nj α=h>DP.`(mq\w䘼U[YڧGҌeN30UnT{#c7&LYdPNۨԓރ6|O` ڨLK܅4sP#akPqvH >IH3!E4n{"A4 hu!zcWK(DDb'(bO?Uஜ5r%KtV&:Ӧ+m\n70BLQV=hRO%n32cz\ nDxC{s.7o#.隥@[sKvv7'P24m@__'grԝn;ʳF6R@73y[OIҤF̺f}]GY 1:/Q IzwX LXMvhɢzS}lT8vRId0I[X>1b2 cZӼFnWG؁$ALI=C)⛝,Ngؘ8.lr tzwSwߺIU|6eU[qY0`kU8vmq]m{Vf9}ĖcOM@ĊXztMs戜 B#kd.m7<` `Лi&HX74"ub^7=ua]3ݚ6]aųXd n;]k\ʗC.5,0PIs_6ñsJe}o{^3%ăAZU%xbXG?޴!tW#5s)"LCtqh} Mp}xf4,)2"aZͤ:Au^l=yR;ㅠ/@i"4ubqmUY<تN~?L\쁓~]>CI& ǝ̗ : c(j3p8%RjQ b~'ĶJmiaK8w#)KjNTh)7_­\6j<՘ψ.c\riq1t5dw}+1@ d}asp;uN`̽ pA/rǼ[ $BmV́1nQuCVbE7pc~ԛ)fb :GdB'J"yw MvɊ=flE*;ȳЕ^LP{)G);Uo% 罄}=zM l-Jg4ruG-^oF|(lkd\tгYa ) !=ۘQ_hq +E݇1wi֑:s7Kp(Vyҥ!WnL TJޓCxjBKH@ #tݢLm+̞(Qc},ե Ox.S=/u? e$"IN&0F5PH'J-)cyq勀ds˒[Lb YD@F͌O9 rH5Li8ǁ4tJ)R_ tidg1jUsv@k ׯmu8yХNQ Re~i؝[JEį<뙔;P>; p;k @x k0sMo0p &ovOr ؊ϟ'r؋53F#*vyVCW QR4DFڞ\/hS?I#R^.YJ-X愖#,!.&J> ,s,h4eˉ̪.09fü 6Π4qabz}͈a*wEn5/:GߟrC6%3ZQR)K9cD5?$ڹ^?R[Tm6$N@LAS:JN+ȵuƼ5+4_rmH'm|Ntw}קʾ*a %cCP~}+rCay֍hޢBG2S(,D#׊`D  s~c*(ͪ.|FY2<݊^šsfwf^L%bfG:lQ| ǒoW_2\,?@*ԃە*w&$KMV)YޫNi:ms="@Ăqrc2O<=)l/}C;OS֟j [#8g2x+K0)_\'BU}"v"Im$N/i.T;2WUY*c௽8#)755#"JL)?8QM! 4'w>-_0+A]?S#>^"nJPq7^UJDcI0knbo?[K1BRT[ JX )L p M_wWt%Νҭc5).+' K5ְ0NWב6=&ojp,X.`)3HieH$<vAE;c*Ewqn u' |d.4+>蜗{*wٽx$fqϻVXx@wt#qȐK}j2)űo ZE %.w kѷF||.Q$Ts'bKQ&Ђpr5SZ%EHkˣQ );kmXZt2}ee"T=!:RFV֐p7jm=q1wT빮^WFׯ'Tk}nÌga8&I ʭ1N[,1Ȍnk_ߕS Q2bY*) *CJ(Y ?2uR-d!>@=)4ESbLIO\/5y%޼fJrZRJ( .`+ys! `G?I }y?' Cݒ."#9z}$n0W4Q o,4En8ƒ s'sܖ6;'B6DPg|R>_oTSmD?HjsX^ իj@ žג J(D C9 '1ݼF}>%T!:K̝gWFN:WςTˈ>biHr88sJsl;k+!32!2;اG\|0~t9XďEE"aJ)|W5:MNgf5g2g*6mSf}Rޛ*MHy\=FVk;ZJn'7vɐ-.-Q /J2^_;np!&jdr?WZֽ?[JY%0Q-?91Vn-kح=MXa:.VA9[s]?Z:c{ Yn};=E]<זO׋ 㡐TY$aFTupdy ½ʫ 'wZ}z\qaJ30""~+Kf?ƅeMQ~ڈ6_bsHB;8'Y|jLs9_Li3PyxX*nm/`\u;€SvH]h 9܇bZs%G<_F8li,U 6XYx w&ʧˆV9c xgX W&rRGʊeKިMf|os-jjDrp%rQ;V&Н9Vt-\, &?K|UZ E+$3o#%E|iRW$PE'^-Vr%+KJ76B?jwUye̢mx,сz>{ubx8nm(Hl]hpB;L>:X`Au ~B2ݢƧZѺ`Uj֍1>LeS;˜g,clOr,`rePm]}6):7ɟ,%TOmෑ+`ϥ{I2OGqcIՙlOY"~0x Iz$z‚2l.}{5ftG'צdzj/fe_}ц-n9S,z/ԇy!/w.H"yҝÆTLD=q.y(sY;Xwxe++N gNH6&VuZ(PU"9u7Xsu⫁+\h0nt=Cn/`2uK%y_Bwk<7F X!T "啱 Δ7 ȰX iJ?ِh"TYQ=8`~a`\'TK?Es ޟ V f\:&*_x 40PSrq;|ڮw[}+Ha{m5-w diH x?D%ĶMճ~&o@Gtņ)ﹷ]a^h@|N\gi>F;ݾ( UY2VAnJKQ K_LFt8D [1_3(9,bĬnC SRRqǡϸVy 6CEaV(#(DZ**g?-,דd\'f zjd:ӉF/v62i-כ\DBߞ%&X7Sc.;sg163UefNԙ' _ehQp@Rߠ{rC""wdDf-tR ;ÃjdjcBxlLT s<@+5 J"e'2Q~HU]$sc@vJq plLsfNJŜE3*YMZiN"D h=΋;Kٞú>a%{X=%k&64U E\4{`'|g$,z+{=rm h-tF㕰*]i xiz5\{a~H6B V5kUOYq-GUXq*8 D#aQ}e#ೱWs*\^˸,7 #=dz8U:D^&u%fQtBXmPVy*X86 J_#,y.8Sƽq"+&)2t4 )DZ=6ٳbE]Z`Pg/@ vpT2A!䇥cМdLTA6$܊T5~ptK=M`^xBjYv%R]='+n4;0}(BС{v}~ I$E ȈaÃ0wGdk+=-"ۍa,=1p] ;09|"Nڄ rJApE!4v%kYԌƵp@Ctq]78nMgEqf̉ /٤j;7+nkŒ=BSYw%R3-l!wEIj+p+ L &[7k3cm E*"$lX-2Ns-d* /)jv2wPӅBqSA,hPnqNkz1qy#th %ٴ)74^I[L`Kr+l505G~eI . fNG}cGl)Ei' c?Uy~j%^ڧB0H*G3q{m:w;gw7MN}|}ho E27"woU!ɔ2`9"AyF7y{-7t<]8m8YI"5̊iussuݾjzk (H:%՞P^E&p6jTTOdzY ʗQzpf4@?eVvqb}f g*CgZ./:z;c1A3/M6?\;H۔a[cc%A]3"&2{k~e~sV%P]5 i7&B`kp':W,5\]T+iT|gjmCݣ=,y!q9[1e;$7%UBz E`sxYkI;ᤴxD螻!nhO!jMaYŠAɟq_e Ir]wcM݅MQ_ztHHCē6*5]o;žl%/^p+Yا̈́,0!ĄoA3I0N [G_gJ:]20(= 6Zn}7;AIī.l>h`aH-&׋I8w)\<˾VLvټ67A@NѦ~Kx\" c$-Igߐ†!~ IM5.HB]Ŗ/G6!c}^)S&(V 1;YjdՀo)|rܫ|S@試GXWC1Ju!4$В\xҏ_a?Ut#N63@vDLLZ$$,йSQްΎI1L;䪉||Hզ~+6h8418` ­*2> ԏbKkp[_YܯW(@4.AG$.u7qQoqrs7Qj?,BG;AAM͟=>֗*Bw ͼqg&X+!%2lXeLҀ_Xi;B"suq>SV:£b3W0D^bKQjWDUStCCVO7^3$ ߴE5qʩ!^:Gl~wephy/9|WG"0ɤbWWh7ql$@{{8*p:脏nFKFrI3\t@}-GV #u.JA&c/Ӄ/'{%b2u(mUָbVOF&%Qb1e]\qm>|aM舻S$foǶ n=g}JW||<{fRn'h,SwHA:<3HHkkL5yQ /RVԝF@):󼢯a D}Hd-d@Hl$}`]{7Z+5v0`ȦpNJqݐB#HDMU,Su/fMJt2_m+,H ر%{.:]q4fhc8ϩ S^>} j5ׄ_v䓲jZlh3D}lLog֝e5Sx8L> stream xڍtT "% %=t J 1%4 HIIKIHtH19߹w{׬ͷ~v|lLv(G %Z b@~~!? ! c{EzpPDB!(O Bp P@( *) &*A`v@-Pz)"|0GͿ^@ 1@yW(f  (G+-hAQ(!BIy{{  A C9Pka6g3oGyCP =p;(n4PAP;]W~i"<`>*@^~epx ;E-_BH/і_D;o @p D/G ~ݧim4D"JM ljߞUznLm>Cy^qV儮 O/+s3^4jIk iaovo|]/:^C+!Θl℺g=*>ue Ñ3z[UwMřLNS1cxqHML0's  &O-U ztRRRc 9]Db{z˙F6"p S[~5i˪kR5Rzh9.\! ۢlӈ(yj&5ӄY=^/|2J"?%CX sV旉| SlT"QSE60F^ y-QG+re\BE&P]t6~Fvb !k(_gm+6)Mi)qgFm{m˙ݘ^ UBJs;EoEm^|(eެ^ԃ֗R.sA5e*+/&}`^73dэ1 p KsadDEe)nE+UIewy07pJ)Ba0HeB2U'8'DVC6o͸/ IjCʃGOb.VRo!_6ݭoTrDHe#W.9:>K ^p.jMLdgjGrs3n._+ٯ&^]+g&ɿSMft?kj"R :,+8&/Hvݽd ~YLAO[«u Jߪ7/U66\LIf nu#v=U[)Acrx"J3?.T뵻CGj*4D߰aVkw+|T9&GŬSùvJ9N& -Ǟ9'kɦ9ݩkc/ޔAqi?{<WK1ip+$H<&kVFSsb>1 @U{²͓$I֑-mGWcU C4z"wB򧹉ao-j;ǒy'rdQ c 3]QE^l@)b 烶 qR+Ӳ+IS17MtXg4>/˙o(9W$kYZ'kH0m y.xs%Ru΁ lCɴ)i:ßW򪀙Ԥ$Ղ6]JeS[MT7qY[Mbۧ ju FZ7-nu;-91y2~m =юS/+_X֋^%k4ڐ{:wXmX28qqF`DߤO*͙l(16x]LR·D"|c1mba?\Xgj.=\)?Qۭ)ѤκU}J.%.e}&"?wC&.kZd t+3o6ч\BeWNw9J?ݓ +;4I}OhyzR_9퇥uQTv>Rt5l |~bPpkg7Ak,LR&:~U3 fOOʲnT?~ 16IH] Ȟ{G`y`DEz=qEmLFRp\mǢKkg+5u _a FUM<+aVwAT*񂘪̼h-pVkuVEEUݘZRkJ^| \`7K/وh+5}%cÕX_6WIBy678οs4i?o Kx$o"sD@ :J ֒iEXW˕ivgp3wp*gr:S,ȸ!!+7YrS?N(ԏoYJV,;xj<ٺJHzn;ԍ,]%#U;[V[O 1ιvXbXO2pg64 S9cm_tiY([)bݯ3Hq v~-XhtE!uA_o%<.5dNX+ s |W^M0:ypTEHY6QsoAj-fI̲؏4{k;zfO\Ƣ# U4 uDb%?&  6.o)g㈶h)nÜ3TvSP|al\R /峭*mEЍ;i i%y(Ho_WT<Y.~|özӷٛ\\Pќ.<>6c

8D)]NZ1&b~}QMs]QYsg^.K8B`= %3/eTynX;zRic4o< Z;i%lf.šoγ.{{FV iJ_(S m-y{K;Ead|Qn+W+9Dڠ쬊=.>Em [^9Uw$|@*W؏wcSRߪ mz Ap4zZa ?+9ɢRL;2+ܔ,cde/}̐k/b?I[BOfq5%j0JfەױE#}e_UodYU 9,v5M+dxWgT^ʱ7'}~9Z)‰՛:ZQ=`>ZBIS/lE@1o*g->(Gwd)/UL6I n"3wȰG{N=;惋:Ҿ̐x5j r;R+AeAVca\ֱ =ݗ@G'7{ɉ1lJeq7SI myW~ xm|vi&!ZMC" x*a2ܪ,6t|C83NHR]P9ͣ' C7w<7zT{&eLD\fݍ4RQ`뻴*{~ˡu"%+ Cngf.L.4qYMc)OoʬǁG~!:i 4Em*q8EҮ#[lMT/|?X0)LlkLpJ|KA`#eHdf4e _k]69\{ h쮠7k3=.' DkNp}| HOU}>K%ӡtḅaUk8Y8z|l!͞*ՙ;H'hĠڂ횑8419,IG knwʏe${8fk(e%Cf@SA TH78Rf7%26G*)Iqx{эg5JObK{~58Y߉3(YhxhlJrσ5|$^:M`Rt}VtmF؍{|M`?`aOK<F=p"vǸqjMz\{3}CU{_5S5mK~jXɎCv9;EHds|{۟9:U/7׍:_.q{OumH~dy=4H:c.QwN9B0t[7dk>Я:.<)UKs~Q[~{[ }=:mf(딏?z[>CV\O,∴k7\%t1ዉ5 )x9LT~kQv>2RHF!G(zWñ+DIi{}1(m4 "pulL@'ƙ_o;%Hފ ϣOL@#Vq1{=$(udlg&ͭ$p am )}Iᔛr+掽m"Ed+^&#>oqTM1V;:KϽ'w`> stream xڍw4]6тDFeFE] 3 w;E'z zD I>}^ksvgƬ#AZAW>^8@ASH  @ll0W8/1(D vEh;M$ DA ?$/C$JvA5$BtBl\k pv9BQ0k0 v:wzHk!8$\]ā@^ /e+ t.P;U0@ S/@Gq"\n@oS|` Bs{e{m|J54To<#$ Ą"?haWaݦe8N?ci!Ѭ8&Hd?Sῢo$jG v2@=H ghpT]1CD2 цZPm ץK,k&o=7R a0~!a{>|I@=SE ].tq~$y rDV#1߈ODDq( Bo6K}7@PkDgrt<_F&پr̡.^rVe<[BȽ|Mq,vs^xKN_Z fǨ?l1nt}Ռ١Ɩ&JG~ѫYW:?6EgJXt' ,x-*s>+dS'dn^p ,??^,we5a:&gFѧ(N-,ȆbQ,X1'Y{oOJuV}eӐԊO{OӾ;xOH"PռƁD{V L_nM, Z0U}>Gډ*J{WEP"rt8Ho5:!ֹ{DC%ڗ3Fzu9$3 goM:iEMΚvkM8@uРV45rS@k|U/S*:;# zr=/Ea,a&QP[ Q-z{'?_짷OvRe20){`ȶVRqU,G&?|ׅ2;΅(UG$'Ho ۷4Iy_SJi}WRE"}&:g{ѠQ(m߁Ҿl-~ҙ [#CqEǹ5,ɉ&簃#qA"iۤP5H"Ԓq*H{ lJ|,@H;QrѨ:pe=` ʐO鑔Ҹr[]`#F-vR-uNj%cOlZFYIore utl7aE݅܊4(rT'*OyF\MǍh{bUt*'SP$Ҫ âF sC-KEDQld>ߚq Ȕd=QӥoqQL^ E8uEpoઽYZD [F5:%3N LwB>lN/^5Y@>?(-+u ˜!ieaQ>(nadb&n~X\!Y66O,%Wχ^drz <_86X?ר뙨aB"?]-;}LD;Z41s93EZ2&(|M=MwkpBBum .ّc)F=U|!xsy2;oBr/Ec2R(]x0 ET#h"J, Fլ=wM`\'DsAƮ8 ^:r!J,<`b_ta Ӧ!S'Bn% ^3Z: 1kyrvIOVKtᶘC'ƷbӨ)5moalW'͎g[%I^ߍ"XYS];*lӁlMc{ yZte>>KRzJ%6Ke!1׋5C@noG%d cM)ڪzOxGmG^KS9E׸ne1#(>)M1aFclkqtGj>S?ۜc9}dJw8Čթslc8m2G,=ih<,k‰PMB5QFC=y qn=X lM&=am;pbō'Rzǥc8`p+e"a/SV}7Y'AlOgQf3}WN,gӥډOwlݡ{^w΄3w 5,#ɡ|docjT@ jwֶٌ_-{ЈQ {E͇@/,?^lUme7-ݳ.i<.R_R)*y3sK#&<4Cԩh=(ҌQ~w|n)6+ZeqgGvKN֫oK_}V=US;3ͯO9V x.Q< L\?!BuwJu9x"/8ڷҥG6 awpاzuq75x#>ty܍Է鬀1ZT.s{ƏuDd"9>8$NŨb4.f[c$f^|>ը>om햴C #.FJLod)p53áB xM9EqD'WO+*HɅ&U5aoW7UvX}I@YߊOVdG=U͵T1џ>58'՗mA_1j'+*%HTnGWcQitdx3g<|[za4Ve. &-ci%z[K0dqu7P(?_QQىq;-c!6ƥk%vt|PK-3~*oLHYvȏ.:e _^{NM' tDj|X.o庥p>6TMJ>=`h3]Bd 3l. _32<8,Nܽ"w1O%?],Ȼ%w8oIXaBzI$pdjMwOɛJ %}{Yz4(M0(&=Ϫ>:GXu'dzYeD]I~5qxFP,LS;k)<,E M4~QbC5YHHM+4Ť@{ޗy(E͍> ǯ SviQneU!EzCBS7. 8q$.H'l#[UV.(;tDpn,U*휟GI$JZ#>0kSCD9-{FG-g3o&z:GG dֺP?91p!eK|73x8TwٻSghtmF ri uVK'psw`p475U^/KuG1{D*cw-Bɤȳ_J#e}ɽ+9v6b3' &'OHEשUS=Qm~0 .qܺlk7"Q +$<0{22CM|fzԪ,KdT.BZa61ŶkŋȻvϘԩ*B*}sFWlJbZ01)'stil bxLQe 2⬰8LTEZ4If]Lx= '^1D,/>q*׈=ۇ> L1((|^<8uyZŴp+~Żi|':t"sl\>ZCX994 K9#H{q|[L9(*qgR8ÃNB<c=7]*Cv2+j2c=qs,uGv7ojpY8My/TXŀcq̈́OLGZyyl [Lr9 dJ{8%df4mqB>fUAbX]:_+OS6\8|o"+I]z57n-ca^[fcG(-pJ醥WGWECrҥ \ eGrHrp.^.4I 75/`9]0) .D,kU~9Ei19 nz}:/#~6nLnu`szMT~N(|ǮԘlOai_CYZApMƑ84@4D&z|,x40X3"ɾШ~^Py3] PhBuz}3!ٮ (}DBOG%5t-,^ Pft,C!11]+'2Wl4y!yƓl4p~-e){F+}=ۼՂo/:=:x1=5cV^s(T\Q3GKԳD;_(vH&s(QӓUQē(N>MS e08+g~Yb1.wDZa/PIFԁٶx)dM/o䲕P␁ ;;N:JP?c@3J$ eM5U/ʤI[% kL0ev8fƏ='ER2-j~V &qa.E`J+ɉmgisd\eA!o8Z,|f4rIgMEMx;l3 8QpMNNC* ۉ/\ HZ2ʜY!fM?4hK`ZqG]ʻ#K^F=['L#] W+7(}1s9E͓Wdw@.ysazNԤ:wI57\]vS19` Cr6K\~;O\Q!*|᷼U=;W3@Vm<`ȅL0$@a@V^t4u*/b<}y PĨy$=)wkz;cU""IX\3Zizi.~xNY6-,avFnGMdIqShÂ:٨Ht`خ?K{8fOc@@v&"Ɠ]tDӔR Rm\!_MOӍf쥍v^yF?,duФ+xnO56>(0=#k҅cb=J*[8% endstream endobj 568 0 obj << /Length1 1676 /Length2 7522 /Length3 0 /Length 8587 /Filter /FlateDecode >> stream xڍtTj6)tH- ұtHwwK²Ē"H " Ғ҂ ?zgwgyaa疵Y`P7O h 㲰@,F`;C!l Qy88PD'(n7_4~!a!q|w&[^( q+`ꩀ qmA;L?>-mG,?O_ a6[3.m8M.7w{D"ވȿ`K{Tz63S0Pǚ*Y/a,kܾ3 B_')D+l2 ׾ۍX^6]]=}7ք;=J3R-@f( M%CP'룲OSkxW%qAE,֙TLt$'O>d2=߉(5[?Y(3wo~HmFE~Li쑯F x;[ei kV! m|9O\\ʎ/y {_Y"(W6q:-z# S_f2~@=E!_s9̻ ^ tgR?ʵ[Q x^v|DQCQ%Tt2cG{§%H׫E3gb 8ʍe Nߥ[gg,bg*cT,=] P >LV<; <P3igٷމr`mU|58NdB@rEAc^ˏMp)\ޛ,&-*nEI!}C5X6e Âu M]K`fbЩৈ|o]t2 j5qqJJh>AR3(L3ZpH;p:ql&l\h9\Gٙ$SqK>zc RJU}NUV!=cV{; da-KH=Xo/v5ړ{gA]vmշ9k/R't-6Ň%-'w^j# _ĔCvd\D_S;یH&df~NFZqHJށu_G aq ~x&)Yȸ ?pvI2GomS\l2:ot)U/#ae2@W-aCz U\vn[zjGH,ĘD h1UZ{_vT&`~j`jaRmxn.<|?-{77☽I*ۓ$M_- [h}6}sDSm6 z1 ֮)={r3O>j /ߘ~99j KrFRԭ838ǙxȲgVŔ ^aN?~F%ådGz7sZpa!ToV7n()>Zfm/ k/$>{zrt2hMu zϬXw vjIjŪ%@D3v԰zH2FDi"rNsDCڔ6l7 w !0ci4ӦG_R5vKO^} ȕY\M%$e>,B/*q'r.9X* ac &W~|٥{O NYmw}T;Cd웦:cPE>&Em)ڢC* )t Z1bkx2__?){ŝװ_o$OEf]:M*a-$ם(+<..vA30HƄZ jIB: 8n4|G}s{8kْ[a'gkģV2U-{JKXB8QPпQ3WT6,ǷϪ6'dK ^NÊey̖`&9+FFOZZ;(KM5;wsHc?nsX<_$cz9θnNLD%1Hs '=˜N $p->)qT 5VR{_C6avT0d*Bޗ} P6&E닥(R北ʢo̮+j,C y!]8B}PH%h6^Y[0BW5fU~(eF\xzA.VXfƿʷ}Opb.@3?%YS*[g#*L\q 1G*,^bqlIV e奺`$ݞldAgȤ|5 h1*iHX,,r92įҊ~`D"y5= gc]^AB&A X~`E2w~g0ߧ1,a+gnR@@g^x^DCe?`#,;cU8ZC^ޏ5rx (?Qc^Kf67^P|h|UT?g2q{{gy0'%"fR1v"\,qZo%ğ7)x~ٺ NZ4s@(V@BiǚFZ`%l&̞.#8GY[#+d l'ntLe61>zG~-swb;v=`4emug8yOǓ&wo;-2*ns!ULo %~}^uBm ?jO"&B ifMײl. ίKJ1,'kЭ`)vvyy981g+ixd64ӂR+⃝ϔјiοL|'*%ii*B:~ {`4֝ >.9G]o }ѤSmG{mU tS7}0 PA,;4}HiECI3 }E:{'0;ub07JD""CLya/8Am'˹oZ6)< dl9<Pr)ڮEO1j=O:x4f-ٔѸ،LۯegY $Vf!KzV);*˙_XzV%AN ˋDĎ~ˍ"&-{n$Qjw7}ϙbia`-<8D?ZeP I=f7s&;jrFֽHn,࢛[mG|9l=ʾgʿQo%>d~F.xw{g#h cuDdC ')<6ΦQTq9rlH^O"5;7=Y'펥q.ƆkC0A,qG̜LRzz&,mYy Q\**" r "MlGZ ab){,bǬ Cz9BG!Js\1ki'J#JA/ڹu Gk|Ƅܳy )B˪/.s96ˍ2N0-VZT,tpkWL@z⢢SD|!]^X8<1bQ~ ;SF)pz.32x#j @ #\!b5AyG8t`ڴsvĩjH@1I7Oinf+o6ƚT-U%==od_rK^ے4eYrmA 1[>RK9`DkXwg<@i ɺDdo-LwM5#X}oLUy{iKoozsoĜ_4U\D5OtL&ߢؙOy ?RR>k7":ب7Fʼn*D/ z3 p/=ګˈYX4;=όhB&6s"",)@Z  D亊wՍx֝h%CB-hw vV5G6Ytf0Q(}K2c 0Ku 4, p)琪eDmLv$BtӻW* >pL(t:AQ^.^XciT<,ݶ:|.l,J6uc-(X,kԳ0CVtM8L򞵐UOfK}ӽҰ؛Q%=}kqR9SZ +(ck(Ty^&yAzg}b T4/WJnO@Dc^d*amCFE$QnqE'Jq(ld 0Lα8 ĻAY@ٴF7BHh,sps/JQ`ͱ OA:ii.jtצ^V 4%nqھLyշ$aVWKxFW*fusm*|XG9 CMԺ}POhGT[K]L1p$zg5F D=roz۾ހ&o;u6>reҥNY{HK&Q]-HPV0gpTUSI2oq H38 _hj߷~*dnIYQǚmye{Ow7s*GڴI4n,NL:6:g֐i`Oh(3Dt#VȤKb/,;{˒0[4k{>A}/_BţqNX6xP΄`]TS,,F))Nu"wo)j8җZo0 FF=HCg l3#ҌprYmV;4 ]d)}mC)'惁𾾋d4zvAjpOOc=죇ʿ叹G8bf)2{;Q S|:R;xNЁޜڷ2d:"x?9 wKA̳v>")ZD#ުaB,6f;.:Ҥg _%[kL}FMIEQ~1(o冚 xU/O'e/Da 򓉡-^mNTHUG~:24sJzBoQ"uuE DYd~K_ʷz~\N (t YN&54'?+<7.9XY+ZNcho?[5DYfcH#p۱i>K3aBҶ;?-`R\-L؛#$H{p[,D ԴxSavZ+fX0^R^SQ~8|3>Uҥ sHeT}G+  OWmN909zةzil@Zhp eمpːmPQ/kLh, gNh szk$ W.lR$rf˱0ݬ. 'i t hv^]sqd4uYÓfL9UvY.T/xp=_6Ls{ͅK4aWT[ endstream endobj 570 0 obj << /Length1 2230 /Length2 17658 /Length3 0 /Length 18999 /Filter /FlateDecode >> stream xڌP ^8%hqwwwwŵ)Zҽ}3Nf߳YZorbE:!S1PdLD bb02322#Z:G@ttqBh.5r~7XLL܌fFF19rD\,Mri \dhin&T&..l&Fv9#g {F# BPZ8;s30:уheh C od 5zrӿ* 3gW#G ]`ciszwbg tgHv6-?037 `fi(;9L8\,m U@\H `?L-흝,mpdbv" [[ŸD-&}wgZہ\<YڙaŞA PJ?6"2s3 tL,$PuKGd0{4 x:Ύ_ޞTo04q-F >~L?>O)ARRDXQ?=_OῈ=_A/b0EQ4(ZE\QFE`dcoW=1&ElS?<wh~ ߋ/fO&z&,{l!{E6FƦ37DuyfF}/оWh7{-lL-`{ 4$toM卵,nw{矝on(OIz 4T&@7 <Ȅ'Ȫ>w+$/EoMf"}8ie ~&q}nya9 מ DwtDDqow೉$(`)ŸK #Q{LTQ>-&R`sHwwԖebpn+;|ݶ ܉ O z@HpA^E'v ػ$ qF&eQ_-@~-u9@4<1ܠJ]/e{uGLaadt@f)WaJ2ğxޗn)}6thlT)Ar |UM.f!kSvh lW(g5u8,:TKV4XX>XU!34 kyz\wu S,;DEGyl6ze?^014Exnyոmقztزm.DD9A8$_Wvn?UAęU>cM :X\O<^N 2yԧه !h~*I 1(4ϷQc{v`sm[~! bMB!Si8(!6Ch-7j  :Hˀ dR@hq0` J b9;H[I!*(”5JI[QKd䎳:&+@2dn&㮶, #/.Rc_^W  FzM0i*-оtfy|VC;nBh`M5WJ|%} 8UPzpJ̆ˏXޟ7m c,6-Ε]3fB?e. zqܟZB5 I86{jC8Tg4ݱy;Q{bղۃ7=gjElA{U+ئX%@S~c#KS)M&8{$Kz6!Bӧ[$)2&G7#Ϫ)"j֑G"%R{ tIzl6%fmna^-)Oo&Rr$ÝM*OBL,X.'ӿXSJ,y,jb4ɝR7GyWs`撰u4!2 ˙e k&E33^Y ' ڜ&knKjEBwbxӑedcS6yX*);4 !Yg[#ޱ~a}HhX}44U,9UF$bo|8!O xD!CPP"0qnpS 6mb,}|5CB5E -tC"݆v)f8q+,~Yzzb́ٸCjw;PGUIaW~۳SjV(hKP&_M$1$B34%~C#OmAfa3ջ.@Rq8:Y٪<խ^3 R׽[=G0@ 0QpYZzꂙ.: yǯjDfJn]:m(O9'}TJwqMJlw8Ʋ l8|J Xla jXMC"*rwss܇!b^"=<$!nh};ZM1E0UJ"knO\Uv8ĸ/I1GMdo6`z,q ~;)'P`Cp ؑo-B-Vp)N+nE XCn9e7 V~H GwTOAbVОLLなu/l2^~;^ΰEV¦G|K,=>:]Ms,/.yvjHrwZt·iG_=~";35~3؏m:"4N5!8(V3a; ɊNt E4 ȗTFe 3R/gXFW*rзHW ZL0ߣ3e/ֱxsXQ!"U0@'3=Q!&+=5wRͺ[[`u7OնkEBzgzhɔ𶳢:_{}./a4W^8md prX(rIOnw n5NԜBrڏ|7elbQdc&gc*BNa֑mb;\i ]AD#T=egSxԽjyg_&`ux <ֳtǬ䥥eH!r)CF)W'EW$I)#e.|jÍwnX]+%Q4X/8Ӈp̉mEb9M\af8|d[Ng&16 N3BԏNBfF'& /I@s^K O6ov_D a(B984wB"x V|gc]j,hDs=Σe#+hӷCEu@JNd{1.),1tܾ>>ۣR!ˑ$M*e8~7x)e2RvQI%a&6,b bn(a wWq^ܟ>}^W-h7 /%D5BCO6U67F>N"2kWbIIroUn$ua0i]DyR.2_Γ]&TmIBMkP%;rbe#m&3dw1i=ɲ!9+ a''߇L(__NTG{zp H#9)ؘ@OHβSua^?f>Qm8COvOQȅ(s/n\Xb?BZ3"=宏Bf[-g#].JxyS X `O[4XK##/AN:zlU{a!A}w3`]b-85GJ78L.N ~m +slN)1u{ӼCn\ %~`zj{e>AY%aY8PM$-%TB=PK( a$^L!}'k||.c(/ oQ<=հti.5OJV?s|{ӼjMdQnUr=| eB8fJh GmDtQSrTrƑMov$$LT?j!RқGP@ifGOk2Ȉ>hrQ8ī V54tODM+xJWַb .^l0DM| X,-+F/ظF7b(T(*5.FwoYuVP0 !dWp#sKp;@[7j̜SFtQx9>f=As*< zPZe||%Hߑ1(j5  2{"͙_dNM`jwV٘|ivaȇ* ^!|Hӕmdl:Kƀ.]%غi7QDja?zDNG_L=?]v:fqx%EPI~>WyP3=k"Nr~izPF (MP`BY\fܸṈ}+X iUQ[L ,{|pO3YgܰQ#bMɸG̓RmiT{B_'3B%S-M,rpl(V6ָ۾*GrESAbdʹ% Ȧ> |7E~g`p^ނGb:%B_q|81Sx@M'ګnp.6vIM1KeK[C57[0(&*$:(ǢF<VnJ ]gV!); 0^+g_fsM/ږ5d(DK iq-ryO,`pQp/2:-l)vKVxшvgPZ2Wy ӚED2o إ|W3H;*)n e-aOEA bw{;'*19τf: $AZ+ %cy؏g̼c0χHl6 ѣ6EsњNѪLdI[Wn2/zҮcVXq8\§ 'pP( b4HY;$v#娔U+ 46(T$AYsm u\Etdf̻^𵀌Fc!^%-b*`ƪ;9^&DVMQ`dtLm4 ֱ;AGb견< 1d!a$YNzV7 [BS_3` d<7[8ơZ VC1vv~?b7DT1|!:N%IxishK=*]lKia^_3"7J ;aȼwsIdVO%%#R?OwW=~Q18\%6`mϠ޷4rch_Թ,~^j ) Wg]clZ- 7D&vqŽo11e&\\4`v1koG)d^FDcItw[N\Ѻ޺{q ?KP +c/!8[Y [|8V"k̵Mq KYn|sϡcT[vՙGlT 87 iM&uQYJC}6Ѕa/4Zy'stJ5 |2'޼ȹ`U XsP_u`eig!/UcdgCny| 8-S">SN A%!{XiFm|B!oƦ-zQoO6Z^{yVQO۴7/3FLB`TEtRp$_̉ Da/9T&YvaBV}+ f;P ڏ @Ŧ=w/aN- ճPu6iqU\!誎dE$vHO d]_ Y<e&ST♆|8!̞ɚm~vJs`/]D։L_b*g.: `mr]s/2]SBCiJB,z m`"xTG^8VV `c =. pM ii@~!WwLV<1ß&4iU$HDk""CtoHg5txtZB@ӷ3#j(ހ,U4ւ>!qGYUӻ"ABIX߹eQla~'xf#8e|Us5[1_?ڙ4gZ(*_I G#̾Q{DO`>>`n;3z.pg4Ul,py ;ZC`A V+^lLR%yjSS<`KLaţ2W)Yjr*˳+m;C\&/yJ,&6ZpGnZg?mVK"c_Pp׆Wv9K >Zp`~N 8fΛO{YeM\_"V'lsۺs%y a0VF8Fh>]M ( T-wEf \u$SZh d ?k琲R,P)ͅ ylS l$tYyJӆKM9){O0,obA)$Z8vµ6uwY=2 JwבO&!ҸRzF L,7(qCC⨎Qc> ׽)!98}|h}?9NQ_#; v zɾq!J ?,PM'K<6;c<[ ֏JB5JV8K`Iڃe{Cs;sM_ʇ&!¬G֒FY1.;f:u`-8 90DQV)usLna8xy__onY1}a~bR9`'4n`} +lݓ=L+U^ \x@5w L4:H1$N<ဲ /ZoY<>ځ%k*Q>8>TPDUhB@X+GbW,)pofY=%.?<+z9zZyQ.b5qHrr |O;{K1g#6؁:9Na3ELA]laz_X2ߺAe <uvMꑦ֯Ph/7b$ʓe7ӭ )u&g%M0jBUU~\ԕhDx7tp+ɭY9#ѹ쵒Xf!q?_1dqc-T{u} wkpeZw/b;0@=GLxTbx#QZy(WAm^9hp9X>3 OMeԔ?x3mL]UĔ B Efٟk3hJ6/ eE# j?R,-9<4Ƚ{Hw -i5N2-i/@]вb,d˱8aDx72Q ٳ;{X<59ˮ,[reA f(Q 3SND2(nBY8wtS^t2 e3l65)`jCẻ*7Wwy(pyvqƏ^G*It\?k?a2IUQ֤A ]/z C7V#W "0GCEð4dr,H29h:h |w(xZr-X 6g\'iK[ ]|ɒFsw7z>euq܌0ˉi97?yP&YO4J2" oO ,0{bG͹z(rP͜˱Suk'g3/q'7)r [7d(Lb ;#iݎ;2r3Tޱ}󜚟1@̄(6xkAGI;Qr}))Yr^ :b-o?T}竖~wĿYF"h >E6<-'!e\RA꿏'" PYb@8x}o/oFUu7l'=UGP$R 7l&d9^,K|RIHyɘۚQQ Cp#|b?N3-nW+R%)S=享x(8̺DP\Dn#\OX*O#_._O9<0 V ۗ\zcf?Ⱥ9jZNq@*,ƨ^+~Dbr]Qz뿦PMܶnOţgĮ3݁}E^HB5ґ1И>+eݴi@hrXkd;3b.3dvOZ4_f}5C3ً?4ڤ_[?MsȂAِ"׆ 9܆|<֟k- GhB}BMi=R?θί ݶ0>]vKoNI,r-Fmm֠aA%Nk*T!+q0Qk@1"4Tm[E!{v*yjQu}Z>rtXYhCŷi>qfFQ5k\Kt-L9{\FZR0.u z@ϕxCKly:rK 52bWU|f5S"SiS4I~ DUb^3-lmu:/ Mim݃P򉖵ه!@:]K-|Վuw!nQͅ.Lm;J3pQԏk (D 8I"0*h-?!2 _M]ֵZ/Z/1E0?f=DVưN!uBAxZԍnH'gWlpo=]v4w7wft=?fU|Ӱ|܅d;k!Q JH7R66gP5ؼDXZDK0:zYA|3n.ϗUq jI+O%Bf8XR!|=Q:~r)U#FRJ!";5SFfoXr0K;BfÿT8%P4dSʉV1Y֪գSOz,Gڎ--!߰tPP+&z4VNH7~S~ As8C@<ju{V7͏AG6._1Av f rY#riu]5+g Ϯ&zznoaq5q?%Aǻ UEPsa0G)$yH]ՙO 4G^3V|by%Ȕ /'gբ&Kp.AsCw.D2{ZCZvSsěg[!$gU #UN սOWտh輆>Xa.xjnWDz%zȘ8#MN p?*jxo58c:ۚ'5RjG-L !S,_a:ѳˎZlQ{W)"ژzs==*{Uw"=Gʽ?xe1j {IF5E+ђg LngJِ"{22h`t&>4-Q#](nr!):ijQx~´([e^'F36 G2RI}[3ƪ{jqM6}Y PFQ|28"j,^G"jwØЩ=Q.Ԡ }Kv B! gr=[1Iؓ8'<u2=xxQ>%RKVb67#Xm| X6qrZm/Pi2s e{݃MNyu~֍`H0a(al6zK X]/FM5ZW~n6cAH%~{lnXby2R#fm} AD@z#~ֽ_A;V9ޙht$ 2[ט"jJ@~JLwxy@ʣD\gTJp&ibo Yvd29a~|U1N(+BgW>[V;PAL²:QQإ/to?f$-qj#EUWpBFklzxͱa&ǝdQyگ-OEIf,ݞs66Klڴc5VY6ވzrvO֡ul݃"wg({,ճ\GpC V(I +o YdYÞj%jUQ5 YwQP"#fc6} EKڋH7"fXyNPlj0M}J͢@=bkMkK;_.] ^ !1| ŏu9j'mHywq2Er;54ў~ k[OFL:TU9cYHG-Jg)Yb~edDz:9f'gA 0_y%[`!jzv[}:ivmWT2T|GVfؘD( &?x7M\c,bhk#?Zc:lty쳅%ɧ0y&ɂ2 !_LbF+ށfrr~ g>;L_3۷i;imӻhf^V"8S&k_w4Uugj#ddl~ ] 0U*VȟW SZ𧘈:eR9zqbܽ=V u~v̗l8 'u`Lj񬳃+A-~ 8ϗ'6yBoIśVO.5%o qϘ~Bv_ vQ)z.ϯyŸH/I|(6S2DOOus_>nDG\wo.7u)BÂ(Ex|n`ne)|Z;4Q1U3Gw>EPFw[4=_I!':ATVպ7z(ry Z^W[oey_%Um==]WoR~e38SLklS8-pnq2*D#OuİŲdn|ʑ$o؍_mh%nHV4?)V)LS k^_sѤpG!.ËIٷu 4/HgQ C'MNq/`/ٸ?[V 5>P[;S!Ҹ4lwYC2-t@"U“&׉aifήdb}m$M;H:d~јC:]J9:-ϊ;yq::_ uIf},*c~.T QWCK5cJ<03zC? +ӯӭMf}fC>o/5G b$7&&y:PpA!jՆÙԫ4I-E擊U}x ->RPL.CN }Q-DG<ׇ1\֒OJ}LnКu >EMjs(5˟\V:FRFܡyҡ t 0ɀv+WI ӟw2Tжy` TsI%0 LVAqas&DiNUw9_oeI.*JjŊ|S>*9ѯO_? Ĺ\nÍU k$Gv^6sTN?"m?&B(\zi;Y .bZz0(|Hj#?^ ȓAu3xW߫=ʯN8jjUW0M\Upwg/2LMO]{]}ZM'-wf|O@x9->01fQ|,x1ؙ+o^0^{Do|/^'ᖀd^2mNse;)kyL|{[hό'HN$b\.)7Dqo5ma1e k  _ 'S(%;ޢT% R`;Q2m w).`)౪Sx=FK}^ODͥ5 òFGQ^VIw 7?4l;==2m?ﵞ8.u7V]#\&?0anijH=|}t~ c2KG+4Xj;')|,s(~Zm %o~;!pIA|f5/|ݭsEͲ}ēKƊ']EIfiH ֛6-S|<+#XԜl!]~:Bm:3r;,WbN6A/29_4h@M. a!^HK9Ͳqt22]d?k;ʔa+`_*2 J̖O.iLWٯ>(%d>Cl2Vn {t Vy$;}\jqȀ*jS&/ǏB=k]%'{~p TbIj?\}>0ش?0|.+2Efa9T Qr0>y/5DbD]!r2?)k\ȉ#I'(tuu> stream xڍvP\[- .ad`4HKp \Np~{꽚3W{]ZCM j:8فBiUUE^ra@`ܘtz`W7I] 'qP''/@B P:0餡^[5Y-`W% قo90_%Ela0g!OOOv;F n`W5#dt[6 r% dv_VT;UX re*q :: N6k.{ c~AnPx>qY;$5 f qA~ |e`'d `K{q9Y{'_5VNw_   s `-ǯ:^A_n~>Pg5| qy0WwXA,a p7 ?|WK1=N)^VP'ϗCPSKKǤ/>6.^ <~U4@d pt ߦtԠpւ ?8;_U_$~0NZw\P #ZU񿣊0\N6p2qy!nr`+ euIրA~-, bp}Y78/@np~/ ӿuZ/ „=pj~v'( b:f~>/o .-N /p8p³a\\ztwuZc(`Kp]upU$'ƨQh:GvZrݜrfvjR.W˳?|Pz\QmPY<\ N%\5 'KQ  hPDѦeK' ǯҢC 7-XE C7ؠh4ek վEnJCa2Ď\bHD }=p 诉R4uй@_~~ERz[ &ImBTBFYpB&S2Dejfp-6lyãǣɡ+6G9j],7T'\h}(dßieHm1bZ,[Ⱦ#ZӋ"Qk"# 'K0;?stk*zM(&kNŅ?]>]j7NћW:띬՝$׹ZS @]hq<8Jް ]1& /ܙ|_!R@5P`Z`Y7\R:I jly/4ŗ){#Si%"܉hL)V'hdiwRi_[MV^qN.4ڼd2܈oJ1~G[QZ(Xs+ \|~"=3V~_Ui(>$[±|b `L9 6!_ɎX^sݨ76꟮f[~ԣEİi'tG{5V-fսdhl 2#FvpϏEҐ]gV~FfDGws~[UZO_bUwL'hcO ^'ݣj>9BgC <~F/][7cy)[G`JY3ORl1U a yd2ڐe6#(BDңO/1D1]1x)HbMguݶ&X#)I:Sz1Yg-| ?2DvO#rENSQob`3WFZ$ i%{lޝYoV>SE. e,+ڔ@Sغ>`XdX|Él_ZqR,ȇi|ЗwQ7.Jv17TtYf8)t2Ѫy_g´kY7m 95z]\4աyo( ey0KRvs"X$psruwp=فΧt친INuOfD]؈OyC§ϱ񽋢>c]FJ +`zQ! yahmbQ ֛.|b"v`1zEsgM4n@Gԧ&Iͩ~j}s!+su[/8n!WJ;]NJ4D2(E/R,X(Ql"Jgq2jX_ȓ'˧_wE<0+ y'ƺJ;0"7%#Ћ3O_:Ѡj01WU6'@eTxc}JR_2w']%K'u5\U4)Nb$-6WfҤ31wt{.]j |qPBezP;ghE;jv˓P1\a 󲡖tl. F<NիE]3/+C48PtG\] mi i3E$C)~ Q 5V|%aaZk[֌N #s/rC~y-;EfLvڦvBx'9[{.koG =ڨڃQ^㊎ԑZSk1^%Th%ľ}F]uњ(t"ąjSJ6Io[SV  s϶tUMܝBe([7xj~oNFF g<>?+5=L \f^6fěcJ7W:DJpk c+gI;UZUmGq.+<}"/"|DK =mDHJL_6jw*hl'ދrz%v#eLR;{{umGĵ- {u[uzEE>gJ=uUmE FGkz㚉6qȩ~WښBw6Wܓw]^YT7"oaI+>jHJ)_ i)D"ݡ1c~!th)TҖZ}ZY v^XT@%ե3n2Kt{V7l,X8H$ 2O6i(~T/{x͞}`oNF(mi;Sqi'.Uǀ~#HOjquاJz"r{V]3 FKl$|87 V"eϥ%փzxqX,1KƘ*~b%^?}DxR9EeApvnPp%0#.nݐms!9 b`^|DE!9c8]ՕH1Z|vm-(nzCĀ|Xfùـ޳2/r# ZM!8߱ަڷX'B޹g^=)%+DLʈkcw|BF5HB^`ۍNt1gʑsQix*WήDT~Ð+?U׬tۺėN=-(/ܬ9 {q;&`U4T\) ŪXeI,ao4whk̹Agi}}j3PA;9F3RĨ5a FAF/ Dl ~6Frye3GX2P/.~JIFA~N/.)^lW*?Z410;$7AȔ\X=Se ó"4WGؘH] ?k,.3MF?ERz`J-}_S(N7M~W`_%Gya `~b endstream endobj 574 0 obj << /Length1 1842 /Length2 12397 /Length3 0 /Length 13564 /Filter /FlateDecode >> stream xڍPڶ-;wwwkpi!xpwwwCw-}UUWu1}5jjrU fq 3ؕM $ `cdac@r-F:2tɤL]y7;;'GW ?)Sw@  QK:8z9]@gN`e ntJ@v sB Y: zxxڻ88[3<@u ha=XA.5,]=L7vyp[orG /cſ p{[,Av@"+l񇡩Û)M2jӷnE?¼MYl!`o Qh6v/ֿnX4aȪ 96y!#x98@'ܚ*u|k n@?+!,@3O71/v O>l|s2|?UZJF_G'! af0spyoj \v}T7^zSvxc-@ ظ߾#.HO5ݟdi\@m T*-@n[+j`723sq%Ȁ< Ws(\U.?ޖ7/6{/s۷南L]ޖk;Xw<SggS/Czl+ ֳk! X%x2 ?R `UXޢb]M#%Hſ ;/_j/_hm9ۙڛY;[9` /[)t67;vߪsgo]L]eV뿆__-bƠ?=|@O9򂃹`McH]8ΔYTfFNBM]>Md^\7d2Vdf 7sI> کe$Q}V辴KI482]=}يxZ[f#vhED)gR(#%zal|Y" w9)-8B,_VW~c[B^><~'kX?n~7f׭Hp )[GH3 cėӦ߁kWdcЁp]qbŗ]luvCUh-ba0U&"&G,yXDWȒcL3rdʏQVI?gK"Ӭ^>X61DH<~/ 7`/TRZ,3`WQo#C,j`0 BC~^&z2h,>5?I'm>Bfl~EKxAHRyq45dbh銖hs ԡ m;ܳnA/ŊBC9aHҺOl0~^2UbCǸB©h2fh\yY_}!Zxc(\ѵrkQ 8:v(5uJuvodQ!OBDken.⠢q~]'z3mo 'u#4jh^v݉X8|9W*NE|d4It D >}5Ċ伔zZuQs1o88 +ˣ."V=pA]b*}V87PVuX7{~]ur+7@T^MHnkqFMwBN>K_.bOXSÉpaCpR(Vh 2f}MY/xZWb]Y2M8>CuFvh]xnDI/U:yh, vDD 6/ɨ6*ؿyWspzԯ6(4Kb6W~{ff>%^xAkȢi֟LzV,n֝#EWdAJ8XvDֵF4v|8\[ŀ.ۈYO09\ib!xE3U{Mj"I.O]56L(F_>R}:M\V_HRz `hus( |u9'e4,Pn ʹ͸B9:. 0%qp@j5{39 >~;mA.rU>y|g\@|-7XO<&8R%ubLVQey7 /F=bs* |>/:yY/= -MD_Tp>ȓwS!.5\>;۩/I.1q%e|AgF~jҤOiorfѢ* ,`5lB7H_V˅ 5o,J])uTօOu_,fB݌H2ERaǑtLpO Qm -ݘkkpD+ج s}Ef-g&)t0yKu'I*>4g;wmx<Zb~JOlBG$*m/< JJnΗCm3WFn4P^x."8*,PfJ  7GxY$_Ր=:m6-g]1/ K g) ='z~zl9,(;tLNW5=wyXe+PŖ5#1wUܨ nJr 5]v 3P<:|L7n{X"h>'S^֨h׆DpxHd\PJJ\sv~.nd ja&WUITŖױYfc6XB8]ݏZO+;MaLȟ07mNR(ohy.Pe{ăx Aᘜ@]pV/BKsC;H'ݏM9~&9_`.v٫V>xϩۈ{ܹ՚Q*;ΠSa@L IP[x(Ph ćDfj>u0>HF!0O7 {O/)N*([B@R9>*:bO?Ԝ[$@#KNhJqks T?hvr}9|]Ή u%v-,wo'[ IےnInJ̽i٠@$0\"$ ܱL;PgWr@z@jz۞bغoݺ"2-k-5lTpa e>KoI;F;w w[i©X4`ȳ  ]c΄EWsl헗n- >С2:z j1a'D#6ondC~0!``mWdL)LWZ\F;C\g"(I143mS3Wb/\7.TVYqSrޢ6)@RnVby*kIڸa~X3=ϜB@ ^16UpWb'3K}wwL{艤AW!d)5"jt{U 2{hFTOcʨPLgho į#j4(##xp(^)EC3w2^W{ъgV+9K0 7=UW ƽha40F(+ŭ,G$jw޼P)FnvqPu"HzYӍ ^ wcv-wVuO>SG&ӟ>gsq7r0N"+6=[leL/+F5BAjqμes@.l 9~(++Eb^|W#t{պ9 wN,m\k\t Beq/FcI P2пR*F} u(*wD=-ώB́>"{Y63v㷚@h. >iO-axF'[ Ԫ_K n !Œh@ ½/ #jy|NZT'.G`jcN)NZ; uˊp1m%]l..z&p6Z Zu/((;"MT +EH$⶯c\'ʓx*NwA|De4r+b%Ht\\NmNznc;lƬ eG+nI'łb̀ݼ 3:W? - ;jj*"&=qD˛PsoĦQu#{v nJ8cb!L0qhQ!Ya%In? 0ҐЙ|BR BhF!<&a6N֌}6M2R^]ܪ;֘1&Gz^73Qf\XbF YdF۫ X~?{Ўjf .2^~nBO3S (sRbм{s$ u$NB7*b96k$T]Qפb$H➋*Ժ?}^ ;gm~fgQrQH@T%-b}(P0bU֔I_J  OYcSPdd6~\ԣxq53i}1t^&eH5Q#]2ŋf.w,ԇHtwdl΅u,=%u Φn2i}e\Dtkz)d;d4}DOnM?ΛK[Yr[k6el2!_c?'5E%]Sc(y0`oQTx5O(z}J[w|VsqHߗ)ʧHp( $lu9gkVGn҄&\[Z'W#4*׭ϧmXDZP9TBL l5-(˛ O(Ϗ[`BNpZPzNӏN^* <~mI (hФ% (˂>i޾ ~'f?ԯD`gѩ ;/UU:V{60f^#H##q >@cWA!o43L"OzRgK -sbK0YNsߖS{}nm9LoZQ%,,srJz?U<1QsRS)O5jggQib{/Z/t|oH0Te>8= vᔅGPFPSs˫3CVwpASyeE#uZ yabB()g"Fu)61:=Zw< <S G~(_<>lֿsI&@rp=G @7rͿҔ+fJH2K[BѸ:/v,W_z ,d;ʐv"1Kt%: əG`AMzw=*⡢t|q/6&b͔}ܴ GNڶQ|xw~+~ytx5-wiς>7Ub6: GW-Өlt S~"e#y?-<(0 #\JF Knm?* r}9]:zK-~R>㇇CR!x0 -JQHWlX rՄ1ABp|zVshbD ak Hi Nݾ(/8[L3'Rie8VQfЛ=:6KȼiB)[j*)gv\]P a쭋ീ^+J~&<|Zs9 3+ W u/]4{5.\Slp/ˈa XTmIs "dt$٩X%|k +2uJѭBhSae"ѕ{]b,Ƃm \n ɲQ) '9,-nUg+hxkpr[׀Pʙ]#]f4~Y]fZ֪̻Mdd)?tPU|N ǺQw:93br3MvM=V)E} @f-)hwyr>Ps̷,Nh5KǤ ρcliw7Ŋ:0uu#iHg# F3Jl%/5laHWk{|-!_,0N7G+/F,ؖs;Q֡k*IV~,^/Tܹ[! [KoݓIb@+11HqܧEv9Kb)Dcהhڇ>wsO Cf5SΖ xKx.cG]cJ#~&z(`z{@%%D ʔ{=c8\W8Y)A9A[͙7yT=Jmh>AtRcY ݆۰1/{ KvjlQ{҃Sa0M1Kܼ;~d0t =&e2+$P w)$Z F:|f5M26pPIŅo`-@AE7!6fgR#nznx`jvfaM&T>i|7|3ӶE046hHj&Ss/tzWv5\ UƲuYX}%UQXcbW?PdDfre[p!f;ȹ';r~dsݐVr;ܹgN{Xmrod>yg]/4#Tgn!_47DT#duDs]֢^zʯ «e,=p|z.vi$6*с$g>$j Cq.6=Uc%Tg]Qy}A6V4{ ,>yxԴv`x -*Rw0f° f& ߄^ZW0~Dn!AD6Q?ѶT .j{vqwuk2?;-z5X-?nh(5J+gnU.\;EW}첅34<_[J5ե\i-63f`\V'+x\d)Ւ  |q&OOR]Y0[BTtza.RY(FcS_w' &Sd NN-zn#rёGGsЦ$8ʦLyl4|}ں2E=]raߺ*}cV3P]U [ՉH&o~U?cRR({9u]n9ag_lJy]; #$g-uՊfxE [3_>0Lj=/qmQx0ҕa؜ R&T9LOOv MSS#O؄$Do>zL}I- V&xKJTd]s7 \}3A$#3Ro~Y~ޕǾ QX`[E%9/_R]MةJA|$gqH15yxV__5ʶ?`} 𖡽!eJX AJ+u'lʷ :*[wm?'%!Z7̯Ghm@X86{jGa1FuALiZ-jϦCAh~L,$O)>d9%15GUy\th}㖿*J:gtYG>Ush3΀#N1zeT"E\rǎ 0ȮK'o5| sOp+!cDY|$ᢉ#6;aI '4imDtjnx?xSTm0=b,7ЋV4H+X\^bni 4sc @9q1K#54Kc PdUr?҉#AT)&۟AS/8S9J̔ &~̼/idҗe$ʡ%aj_:bN̜msP/N N(o %P

҃\0'Lia{k z|鏦?ZSR^*>976Zm])JhRrh&eلeGsŋ0%|>IT)nk#!IxI.8ammeO41Be9-= :g\مڌ1(Lr=v-''Jn-yV'+o1Q[卋%꒟5-a>)p?1?J>yA^E\#=\"_CZ_d;Hf(QhR6@؆.OzwE4lo^ /_tx6Da#u# J~Z> 9҇Iz3ȡEoxP]ü(?Mk63CoqI| m -,c<"$v4G3;A/@؉%[4UÁvq)kW|&W#'7$x><_3^IrP{$c#ΣSj9i- ڗh̸칊 'V%,S0\8b3 {̶^uaX|o<՛r'D#ćuJy07(Z"e*qiR0w->-$JN|ViJ|v,r RgHU΢;^m,K{fh[ٵn%{Uƫ.<.\c lIqr(x.# r'851Pabi.S(=o&UFS#/˓{сq+ͥg̹tq #5>th4+ן8f{$$ x 3/qxr?Ved7ݬȘfl~g.م8SVyt58ro^0kHD^QpB]9T3ga endstream endobj 576 0 obj << /Length1 2815 /Length2 24241 /Length3 0 /Length 25822 /Filter /FlateDecode >> stream xڌPi Aw}p\@`%xpwww8}UT1wv? 5:Pэ `cdac@ְq#F\m0M2IS7#@ `ggcp 4(HN +k7p=||L9A6榎ES7k8=@tnn,,N +az&5@ y-(:Όam\6@GW wG PU(;6Vۀ Om,oG6657wrp6uqXR ,n^nLSG߆N6f`TIdjc;EnU~h!ttsEO4ݛ9:y:,m-,'a̪h,BzYlll| en۽3/%o18_g'g%8 %j܁*6n3#ҋwh77dg;'CxY89{_V%YuYqUƿ3W'.eb0sp/nTLmYYGK'lecחxj!7`f3a<7´ %$no/к@ T*-lVbVU hbfn-e6@'W ܚp_* xs򍣹@H&7@>t!(7Uo`AV*oElV`~AV `}A\V"\^ sQzA`."*;LKT^Xۼ@0Y&d_ |A`Z/LVd^( 33/&K9_8 5;Xڃgbnp s'{O|QB0Ifia]/a;:XZ!2~\6r6z0c-xqG4տX+ |ś.8 i.5 ZlEg91/GzpI^Ȁ;G E v ~;gwKqwK-q:UO\NCswh;/\\6i6@? vj\g6XݬA?⏩`@p=`#zy ?swn?׿\@iv\ Զ*B'8zfYP|}yZ2J,ca ݥ٣wM~Ɵ6fFzFrŪI5D]젿Crq}%U]U#P9ZAPuY$3 ։8V3'F$h<_IomTz$ЗXc4;rӾy =3^2Q1Xv8RmԢ;+=g[7z7+qAʸMQnv@VK+vK$9_π T]v=I7? DE<XG.$ Aieq0<^? 岱齆$>;/=Qj]2 yQz6trKh$-.;MXZU*`DH1J)d7Bi/UttYA-W#Rͦ_U7vjCº,v֮JJčpwnOJ=#cV5xC߷f*'KEA_Ov"Qj{;Ǣkkݭ?x2JR*v 5${YQբ|CC5Mi_uD$J]&gs::}AWW@C2'e!O Kb5sĕ=僾AqJN6@nmڒB#T|OYlPAΥ`XA.$ղHOB//lUޒW)6S3ȧǐ8 +3R0/ғ F0BX髊Mq!ʋCrvl'[@cKNŝˀ2GYtlq#)8|Zv4D >oඹ%ja(#'MxEyM!Pmc샵]͔2 ;.Mm~MChdx)AnÂ5X;}xVtj7Zb [xGb_|ڐ(gt`( @Uql0>2{z+礈l=5D{Cq'$Ҩ- AT}?5Bs7)e9t>÷FRooK(:ˀ03ZZam հZ{[TYvZ`n` u"0{/TQ5T/=Bn?✟B@oN?~0a%B^O}4$YV1!5+a'e(*Ud5s ],D *UnGC*SfQXaj~d43<*S@hEBjN^F#˧V{:{9_ы ;œ\5<@aEL#ϧvү۹S >1' ȼ%(Ei^>*Y'xW<ސck7Urc~zǿ]eC]{9]C;; =7BӖ$x"MaAhA!GL&o2ɛL?ڢ{iq:>W3ό#% 5`"r7E29iF#A!4"x\,Lҵ׫'tb(cG& Yg)΅N׌Q#95/gyA}q Ɯ#f9IZX=9|򾬝K0 H*p8kGM' |5]{Oؠ+Q*~Vl!Jy]bND|dָ׶x ;:8J+yO:!J1#{ֻC]kcQ%ޘiXwQK9ta*B1|7wd4}˯9us=(eJb 'V1vbXρ2M LCڝ RX&T+#?&tb(^;1R\YU4 W֨Vj-G`TK:>^vc+gQgtߩ] KAj98p2.6.󁶪VROUt]vvōsbZ~!=oMCc:J%.ļiGF)r2cB62](Rvh RiT]R^ޡBWCy\lݾXk [9\_bĞ\p:z^8☉~BF 7G`~WwpLņbfły|x"FA V=6Ԛ;w,]uý>ޅ!dU>e?e_pxL4#A7_Nԁ4Jp2)q,9< aiX&Tr魽xDtr۝H(wGEoM#-oTe²pjIp0t)8/gnVe (Kn͹n9xk]Y".pU܄^Kv7QD9xFzv\iF)t6ΚMV,(Y:9ǎi*E}67l{6Ѿ$6q`_Ɣw*8mA7fķ},ħ3%D DžVu71%XfTĨ$X%<#y+ڀ XhҊBywK'<O HŸw: '*y;N^~[w-NHb FVtǫ: OFyUvO202?rprW`p%!|r$5.FU31Y"834t د- Y2I,]>^kG0F>pqK>)0Y8I(Oc>UH|vPU:5|mP_5\Xy38@m PTѕ. 僉&6)ׅ [\#BI$R/)c.c쵐!<7:U^=bT6g%@XN)=RqHo9!jR[/.k!.C( wpx.LlUY}nZ*Fݼ:ΈaN61lDi*?rb@~:tQ"f6Um}nWmr4P/Lq3BP m]λ᳷<Ѫ (fi%EC I*OEK SL2$5 ߵw҆Q;APΛL:ΛpCy4g=]$߬yȾWy9Mcag#}e;hu; Tw$KsNFvEW7mhLnY[62gYa]te@])'Q8!a2zԱ3m>F+ԁJk uBe:+"8rq7x㜡$62 ,GBXS/=h{Xh;$gxrPt/$KvkʥݹSojgX?L ?D pd/u0wcܲ,⳦"+4=V51]ZJ.׳w fCr,Vi)i-cqe.cD?6z΂^-z/憉\ ~Ѵ8zmOA}O>0KCTjӪ?N9D +KoWoYf[ nb6Z?k6O>kؚ#=t[owv8; l{ i~# E wj!b&hkؽI6V~j~p$ɯ7_;yk6G4_r{"󨄟XQ ۯɜhC˛Lj ;Ls4z 4_lky8O& UgQ Hk)˰;\0qf{a}-4S+vM!-MQAj(Y132N"[7&).Rֱ,3PFc̤aLzJ opG% /ʯu`>W'Ϳ ftWMbSڣC53huj{߅JkNa"%n/\ ?kC O1ȲP"sjoHYB],15zf^ڝ'}ىv-ekx^伴fW'TqHҼ~6zg>~a_&kԱ#-' :8cNԑ9?MW[?ȥ,T㑖>ܢ ڲm"qʞ<: jF>~LCZ$ٮO <7/bxERxԴ ~^ ]A9ՕVmۼadVݍ7OW@ f7Cz9S'O0/$ f!+ʄ5"c0`>&E0$L85T&V,M҅׵p_=k1̨ o;~9v?sT?Oӆ)}2 O9UolA[TK9k9JasgGuQ7<R1^? z_hVpVSR/Vv}V)fY-%%ߞN3"*ߖG.[v[ kbRglnGo ˈS!H")&KQJ%Ռvatsk-9 )Q/é긶44f'AXם atܭq[yOEYJZV<%m|&$CX8vrp }|.UƺmC.?Hz=>ҫfliH8.ٖJkzծĤ yMһUT>ǑGz͂@~16hhcQ\5Zx%{_1qLW9^k @Cz&`|2d~_ w3/ip$>b $r;ZC-uO~YɗaN%.=wʐES WA= DžbYsݖ%Jc4/ q/fSF>MLu3j wh=:¤e_HXȭ+2.MwIt #n)_%:ot rP]A׿h E}¤9藬3괼=9=L p#2cj=]O^uOS!Z" :Ӗ=PVޑ.Z8`VUwBhܛ.8YТ?Y􈄖!>xCΡ?y.Cg~&p>Ĥ +GVi;oPqG3 EC,sSf]=I1`**e32x?g𱡵#GB)V'ί/8̌yFIv3| A#_u!ͽ۠/ldUy3-wfפ/5oIpT\ZչGkH?*e_0SV8T=> QDx߇=*l܃Тiٿ&<_~Z"VR(nmanqQtgHu@xr~SIf<JW|jiD >V>wx5{Cxfܷ\cI`\/T)4P hO4|ECF31m~NMf=h1aF}o %G4QR QlBN4C)P5 !89O5$q1ynWk9yzi6AmXFEWʫ!ٰԸ_Uxhd}&cbaX#ݐǖ`5>%B ˌud>EZ>8ȤKnKsl.=7;HfVRL$=2P=OqD|(PsNpsIvK^48e8D q9ru;"?愯ˇcXgz#|P' {'tд3>AG;CaVxgKS7( vkchj//,~#WV귇嬡V;{Đ7HfC;pB$>aF%e6Rx& C_>{"u]@Bn3j7^ duP*gB-秧p-]vH Zfq7ajfЏ-jѵQ}| gl%$2=I%N]SAlH}g2R{DX)ly?)`cȎ(nH`yn؅" n?fL8u6O Y^-dBMcK&ZqZ@|ZN,ћ=I(ot 0 8E–_g8rFMҞZ%[1||o׉sï]9PH)lTEY=\fUyw!{FU@s6 cɶ~-ۨV JmS3[9Y۪:$_huNw$~<\U] "'v Ziw.|ʎW2PT Gwd払^]Xôiٞ(ry֯T2zǜCƁpljTs~*N(bV Ĕ E8JgWSghzt.dtHp}8T !SV%sօ)}_뉝_@S:DGnC5D.,-5S4#.uE,+LI3W:S o^-WCv&XXs'ۛ IB"%9*q3\Vj]dU.CC9HU뵯c +1"7IoC\h-+bM.%#ʙ5k--wP}Fad|"lqOBeB4!\u}Q6ә2 SzCRkqR(Lz$Lk=UD\+SbydfӅLRb@xdE̱%se:ыQŻ#-숞SkǕE ^a' צ/oҧ TpusUٌG |\ZT#7:/=-x3-1?YQŹAbF,]T^uрR37Tdˤ'M*gL8fS`C/FV.m2[}}FSzۥu .s4E)[3d]2{FySр$CU }؏Ç;M+c"r Z \06 U E#% oF~xKܜ.|Y\Yܟ,=-̓yBMZ:-)iB64b&GƓgּV>{3W^d2Ns57LϢIky&!SZ=HqTID"fP4 zgFI"Og'pEh朓ӑ_O?BL_R% 5:\vx)s)ex[O2ukUJ.r5J=\ DB`r"Y]vd #"8%Kh6rU>V gs`D|ݒT&oblԇ:y]ѰǻV9xnZT`w`!R/breGh[7e%:<XP g)jCG YkrʞGZ}$㡨?b9."=X$f&}5ϽG^lV ,d'sڌ;]TXE }twB[o vRܰ@sU2KkG%Qm2جVPFYb?j Q; ȑ*(dtTG3|)ߤ%p"~%M<2f">xanTjO*ݒ]C'JFGxHxQZKw1PV?>o[B\X4R{ , =\P& cI=jJ\hz |tV'KI\Tcl5Mmԟn} (1e{åcbmupXIVCE'9|k >Jy0B&iW`SΘF[miЂn5'-SYAgzxFbW󶤭jW[b`"QTߦFЩmm,mbc29J5?9ҕsR\eSmcb)FZ7rtөJîcQUìo_>D BӢo-@D_A>/p^yqG>RwwvwW~sg4&8~\:扟ϊz.QTv Nvjíp0qls `inbSw*b Ƭ/.!1z*,xs^|AWBvDžحVǿұYt.IMRqDr$Mk\"~4JfU/jwuh?o{C*+kF*V:ʼ][ |5՟c蜰(AVqSS(lV(!qgԁHMƐ+m+ئ"~d$kF o=ZUfZ [L%UÇNT\W f;’TNZqp)]2vOAK7p8̦)v!N؉|ـVy;OņYrG<pFQG+ - }W3{@DU>hǦ6fkRQtꮨƙD!i%&>6yZ6 +6Ѹs'l h5 >uX ]rW{9L: T% ]Rhdv  XN+*0ɼQVY1?"ïh72iXkrYM0ShW0|MOupGe ?a vxn]sYj᱋u֟*Z@7jm&iWu^3 t8i评EjtQ Slf8->h@gcSA$j/=X[`&NOVly(UlW$;.D4 ["p<LN4c{6 TCtѩaX1)1'^`ڸ[[!=~Hwp#aE~P_.?p(gzC1M(g&ߐ YWpT&ku)ceDL4?DRM9:j[L1rv*<РtvlAd4ZytRMȽLKOvc p ;zl6{=e36c7A-?&mYfD4o"EXn9 {/@ksn27IL=C`N:WZj.eB(seR1`Zu4hȺ`Llީ1Dc9:֑S7Qm Tg;ANzsO}CC"|tռ2W+CkܨO>)_nLjW(B+dma+PMBYt{_]"i~wmxF7&? JPFtЭV/J(:9mRSI^WFزb*ƣҰ{u\JO:Sk\n7$X=юs1)Ġ~7rZрΡk-]LOg)QZ8ZbSMLQ=)[;UJ@-ڪ&y(/.ư׏!'jGx`azOS{?G0XO_ϸLi_Ardc{d {nn̬C񥔜8u71;pcC2Y U(SK#7e{p2xb@fB|s9ji,tŨDO ~}vyi>o|Ӈ'w_ *rayJꆸBO*oWGNDztGr_kRED4Lq+ Ke2hO"Cj:Xi~9.@mD-[:K9\.3R8F#}k gq7:˲~ot|yvDM_nl7JHέ!416vl@K%r;fSMn͝l'=(֓ *«iERIƻ죥ZIL4XWƍN0>+L 2RgUkoViᲫIL۫^v׎e.'T@iwO NA~}ƫ74Vu&E'DNi[wR?E$z P"lzϡ5"! bM_%  pE?LNޓZ] !3L]'B/KC8o'Q %QG-u_ʬħÏ9,7JxBM\ *9`JPE7=nח9=(mµ*2ۆRK!pHd58:"FY g<;=_ޯѐ}Tctv3J3u[bbZ>ȍ/ɦɾ} ‚OGgiy&s0~s1xc.Vۡ2*_;S&sk`tzPD1U2ߧB?)$ ,Z> " /fyҗ=w۳#wM@rVoOhHse$tr*q|w,jbDV)ޒ6< t֢[ڷ~01Gkݼ,>G /~'[TA[=G& Ip\jj$K{K.Y`P uҌpH:EIj([& rp/fWVH&1ujziGBn읈Lu7Ԣi٦%QBiŻsg15 vs43u[*1){~5y)n9IcDj2 ~Jǯ'=MuJuy #jd-*an.ӷBTG&~CX!p v3QHϮ!XZ^~\ExM[]vP<+=]n/}%P't!<uѹgf)s9>]̭AXu*oyOCc`WNtȍ$6KGt v JVWșrzo|Q=ć޾5Tk[m AwRL]8A˻%[ݵcm搘"T 5ҀFz!&t6;n.)=4 ZpDZkIU=JQm8*!Njd#w=~|Bު#w;α]!9]?o"ɬ6`ѧ >9#wr[ޑVSqI_HhXZ/[ދq{ǂʷZyFfm4Uh>*֣ERPnw.Մ07|nw(S|bZ^S((5ϋ|_Pp}@ptx9+ >|5@DL0eo,%Jͻ0 R˲.i1+ :̘^>b8FA! Zi7!p6]$#ưJ=:IMEN0 ׶>A&#\);׏ ]NGJJ}DtyKaUivy1T׫_ Ȧ6pCy"tFgoSn:ٷWi*uq Զ`XgiC AQ~|(om +Ix8\HV:}8A5R"G~|^`r>`_ټ  `x6ʩ@r/#I@e~30};ʚOCfVk"Iroԝ=nG)-/ӦX8%.X"HBe0k|wwxC 3^G)eoZ؝k4\^MѾXcb|?vz Ȝ ݳa-O2d XY*UQ&7] xD#خD (2C+}(,8( cKъ՚22+pg]kH6ǀ˙I5t}YN2|-Z9//ZWDcZ0$ ̆`+fo#v`L:P؛l"\擎{÷z3P˾IC2oZ@9HEɹ`і EEZ -sOM}eIp[J 5 >%W$Lo/YO!l@īᰯwx^M_=K5ل$$ EKDK3Rseng`<5oFWwiGvXuQEG B ?= g`rno+4:CC)՘EaN|#ގY iF@*c"G2կNnf 0 5 dڧٛh? G] \D663>MA=y<fJ \ q(%jhd,z#XT1d(*P>h{Z H+ps^(Z/mB.(^ȧ* ІF*]8bbFUHdT% w93W^`Cu%2Z;{V5'r:]II A|3u,1H*gph!,R}j56"ƨnRQGg4M]=€]]2j.Pa<Ӫj':;_"?( Z;>VdzVˡSfCkUhGv9gFhw7v!P8&w$%8㵒*iRմ٬09 ߰H`[#|,bSAJ8h-浆+_g'VD??+ ;aYh#wXoVƍW{Lgگ(8dMHަd{A3pZqƐ}<޼~@+5.<^%g"0n@9p?X GġsKe(d:Pk\QAr2IҒ 55Y n}T &w~,AcOl)$7i~]c>05WejDۛOほK}__K ur?&\'lH] m=l`FD6` :55_R鲔1XhkavJ u "n%@ӔU*kݭ*(|cB %6?Z+Ԉ`XKZ8gz,aO0/B/^V6-v,0p: ;zۗ5L39ta }vvuC u- SY/-T 6ݨ,@[EZN_@#Q㤘H-|裪v MQE܌:\dt%dyef\-en\ܼ3+<~0LӅݏ@ꌋЦ툖c{wʼ7< :؀n:ek*%w p Vt0pqh>8xVH2[yKӛV9y{_TUV]Go4cI!ش!+l ]7p~x0q45а/;v8m([i,gCܽ;7L'VX_r%{=7+al!:?%ڳ_Gr`ԡe^pZ̑ A{:1z;u.j|%`q"`Ž7lDr| Ołژ0j%j^n 3F1}+1Ȉ@7™ . fÊja]Ao<)#1qOYHBP"S!G%O N# ;!S}Sp] ute^ y+/UΞYT?<)Br@X^]e TT2/S ڧ>VCJfsbɨS,oMvT3ڛQ<=dqZx*q3F7~.;/Vhlfal_%v4e Bܼ"(QwP죘 c85s!ɷwmdPsm V3 K3t wHma&^ Fo&z!mŸyV%u7֡8 gDܸbqodWliz|^xW<(H-7e&,y%2OFqUAHC9Bmk\eP-pÃZ*X8KNjb\Iu8j3K'^X!J-Us`]Y f PZme?)g$߇B%%sU=5]'F9ЋF@ZQIGGLF.N$U:d`"y +sኋWlA.h>!!cyDEnH y|P._뉶ǰ g|lYٮ^fԷ#z !:Iy0b7r*^XV^+`D]+]P£!Q%S4u dzNT^wK] :NUBתOS`SLFuq9Tʒ% k&r>k{&Pt2yP`Q3bý1ze Kn+P~!EPw:VKzͱ !T0FjZ:nW#$Uz䴶7*_~s^X|mp`.Yc4@3.wIzo-yHZ1m#Oe()RǷvZc`[vVmAVtfDaoټ6p֢ac|z5}&P@}NO)ȗve:bS8EGȕe=g^0,R9`ÖŰ;樯SѰ@lFn|X/5D }q;jQ6c]-Úvmqu(LV 5H"JC{!~q PbJ}`0w(nR2R V`CAOI9w+8}׬k5JgkeU,YSߣh'7_>SD$ZT!nt/" ַOFXx8Eu9ycꅋk8战\CU{bL#P*+Ÿ:Օ-Z{At`Œa ֐1D^?l+4Ϥ"zٗbAHhr}6SԊfX-aP%6'伜Dgu9qke8lJHAS%o#=X'>HQn-@|?(gi"ϫZԾYƮ`|LwU~ Nqgl(:{s`aA}<ԧя}<$*~Se~vS-#ł\ ?AٮW˾ft#J'ESR5OO^&ņY05zeji5$E2՚ .3 w͞uxsoN*evu_򼭀w9&xI FmTJ4pf87: -`ԙ лiV]{M+X?^J?3E3j/0<ws?z6ծ!}̂A no,ſ/'p!61#a-éACg[|J$=[}n@;/lq:U#3d/:|Φ5SؼRҷ^kӚhy"pD&iGꪽ ϲ()(%o@XDQA6WN9La Zmթ)6v`\ ǜf0OXBrh@5%CB 6<3W+|oM0!4 GpQxrYw61XҢx+9v )"y` }\5¬zM-KX?ۚhw{224ob~Ѯu=Vɝ ]aVl>;+dcÓ:tx>EH&ˌ"i.d$)'J3:wiFl+Wbz{o;c151^Moh  ̮E@Y)U9]ǮfbB_K]x:XIv,Jy:TNT2ɄØhKN\ilA@ء%5> EW-}pw]{Vi0uOM8w9=܈mbT3zEYե[%qOym4q&hľd~ 2>:O~$?^'pu0$gEW͌3^$5׻D=K ST(262ṈU:SqUG$U$~SqEU 炕[i]|w0`/kN!$=hȊ#sXऺؕmI$LIC V@ βс!S a^t'o17&M8e[~̸9x=^1:6t`@ڈO J;;yIQ uЊh~j) }8EaY;62n֣ %2خx?.o]6kt[Nv(8&h({BZhgg~sECϺ.NE먏YLXuI*=Lkl)XIC[w Z ~Ct8!="Hpd1PJ*x0[v;`^bnfFdq.2'J&<#EEdYG ^$ ԕ!~Ԁ5L ^$xT!~)ƘP*;]Bgݱg!疃kp+PDXLh[щ$g~&Ў`:d*D#Ǻp:sX&ULZ/Ii1,L'a_wdc%fh~+ OTnaT$@ 38*h"Maf\ho|CmݱyLBӊv(BUkXnW1!7/>ϬM)fbyK#@|mBg(hjoR}7/\OjB2HfTJeʗ՛D.9Ev23նEJ'|6zǨ~4/O!fWuh Rߡ=nrNdJEcIr-cޝB?۸DG%ɢ}pQOUQ?/ѴA_Shcoݤ8fov׿I9o@P,*3u1ezUԿMZ@[帪/j&bJ&Td:FP&T";[*ET64> Yt'@>:CK]AY@j1\L2=lP*%@u}03mk + m5mTOR^ՙ309Կ8DZIއނCU^} G B C/Xr̹Rm|Ic$mw(o])}RV#W (rQ3^M 0F_L|ŢYIQw(쓼?. FTmtoj*cu{raD:YOUDICdYx'@aկh'- n%1*TU,_0$|$U)0Y&8 d qo2UΟ+laY"Kb?#w%BV1YM\{뻅h7ԒO 'Ǜ:voXToZ,̧١("-(k-(8k%鉾Yp^;K%J()2kSkd=@4հ)mij5][v:C̭ÏmAF>,Z$Rѐ/tgp@NۥkBub5raI]|m$Cwj m׬7h!hhmҦפ(:A .ğ?m6l*3@m7mlQ-0S×Ƣ oN+jۃkp_M1TNgB13猸b0G drzrLBv=yԨ",4%@ t/ j\8tpY~&ݖiINՓRKR{=g|UR hTe|`4;rI"66A'6Y cyȿ ?͗Z"x Ax]jWNr,sD EK֢u9f0HWw7$`It jUnbu=wU%ݪaO'u# J1"S &\\!Rr.I[_~;R/9ύS^[]ϵ՝⭈wYx$#'HAގ'-9Z]xƘ&}gztkjb!,>I'VO`cM7a<΍ ^6}.B``䵞xEB[(bX+!8-C1.9հ)s8SwweHw9)Ttxv` endstream endobj 578 0 obj << /Length1 1373 /Length2 6101 /Length3 0 /Length 7047 /Filter /FlateDecode >> stream xڍvT.!RCҍtH# 3 tH4ҍt !%J+]sֽܻk֚ywyװ3)!l*8O( x/@!BvvC(솄" BuJ Mh;b@ @#M8IuUupr%$x``7- `-`QH% A\$<==A0$?A EA`$l=. 3?!;E6@أ2MPWUsm_ _멬XtU4[DU ;7NwQbE |)Z+/{0 ן@Oܽ0yݣ4FBA伛8磣QCQ%0u_ "zY<lu&gG:pk5Q?:FQQanTxu+Jb⤑DIFtewhay- kHRCN9?x;9ڏ(g ~%~ׂ+H{.evb?( :zyLWl]@:csUY ?]r o/pp 4O6Ȳ/V|g97"{mF^}}9!D S:X76ODI3FSY)g)UIL<ߙ$ZWSw8˼oTУ?=~7dp|zv6U_o\Kg쮭9"/!xxZ2%:R 4VME=Smi-Kdc`0C̑R5|JONdr}s/)߀4cFqLMB `roҡ[ T k5!wFNxVfy8ZUIpN5b[%|W54 C:λ O\%Fમ0b}'޹]c;+[?=)yjio[/n!]7n=b;I ,wiYޘvzDajrW19Òi=v>P>D{y;z;SY 9.X=zܢ2 _h) ˸H=a$>N3+a e#QX1w_4XZƹFjD?{tyRvnk#Am#+bcu'^gM(iTUHipT* 7^E@]rSrݵ7CYe*0nK;%d?]yS2G彚'4Y>ء2!QGbɼ .HDi쯡>e8K=)sXW2\-70bԾuWMҲY 1OEȊ̘P b i7,[in2Il3(=vaP@`Rܕ4VUz{Ma_V<[IBx]e#h:@f̞y6VI%ݡپ5\:qB>^ބSh<:Me*/hH&75uGd#v|T(lŋIQbiLQrLڟ<՗Գ:{Qx9yn }_=A'i~sHX=#yUľ / Ԧ7ꫝ~E%9,ܻA Ӊ޿`X#I/e#qF\_:y]X)Q$9I|jX/J}0+?3(9k0 "~'+e2-O~cSS4)ג,Md'V ?,*F->W٢~Qt;*0te W.p֟.\V *h<XDEF\PʏrsTZkq#n)޲fI ǻzм3 4e5߁i mm| .UAzƖ{2r>)D{S5Z8&h"G̉էBd3|lIϞO-Ѽ['R ?5AX&4MZ<5tpʺlD4ʂލoq2V?̐.joXZ5mدN(8eu~)C/p BtvsPpEKbf>fb0DU7g ?e1BDywa˟l_ kĦUM+Ip_D!%\PqVOqT{to]S{sQ^,0x=Vezsw= E CMr :a5d8Ě;luΜpRoN]qKjrגt|R%Cul8cڹ~m8i"dQݧRG2xM٤nfx~_ltw{G}t=9\S8m.V597n?59w rvfN̠,w+]][̫*(G cwiM =2۾L\ʢk]:ɋ  InZx~iG rʔd˵?edPjPNWyL1C65q?RY噵"K!"jLd ,6TیPȲ4:Vd?50>dN CXzZD!{횣a䷧|jپf]q1]јE!ZKxLef(Dc's X-|#e f%-4273fka>i|Κ{¼%k(J8Z[#$:g} AK}UKNSKS^UTUc'q.fH~Řcؚ-rS ^RmI5ޭ 0F)~mLW!=8Uom>r+ZI2'i<̅ܙf&iVZHd^.l┼~6Vk})s.$pz/%y[#KIQ6JTo bb| endstream endobj 580 0 obj << /Length1 1511 /Length2 7479 /Length3 0 /Length 8495 /Filter /FlateDecode >> stream xڍT6LH(0H %% 030 )-]4HHt"4ߨsoZ3~v;5@z-]Nik%DCrpD: %HGhAB0" `$ #Qfp@  Qj PaW, Cnb v V`@8n;tVP_!XHgQnn.+a+":Wb U.@]gum`V+ f APwtc?Z;_޿AaVVp'g0 @!M5.'Y2;Q`w0l28 :W+ uU!0&ìeNNҕW~rP u/cu=`>6PͯݜaP7_(@ !a\[ F wؠJAm W;DA|[o` B,!P?Q0揌< 0}2CqspTuJF 8y@>(Z`_YU"Eu? 5}Xpc!n Yxiݿ_(9:ֲR`'_z_ݐ(Q_SCȟuUXCݜWv@fw POie*p_ A_ jP+UJyv ؋5b$A57\08@#~$p@Q$Ep7?o_ /u7+fyH;|@(OLa7WVnjSՏȿ_bE0? m;~Qbn3hw$Ma ^EJ ["rdgyk۟W :o (F ?{{kقީuq&'=U_!|f]{ZPl3V?4d g=MɀcI2*f}^">MS|rJqd^9G|1pTL9:fU)ѩ>?`IeSP+f"VQb~ÒųՍ񆏜[D|%H1Eu1CwE)?iJi]v\s?4F0!sONbm3j7 X}c8I'ݟz|=ب GVfQf%&lO ܽz%mjf5 GW >K?Kz+Æт2nuCQBw&T("x;l+e_AANZb!ORY_'FϧIK\h0Ns$+:xj|-Z/>eTBf۔ R]gzO.\Wsuhxi^)W(w#k= ̻L P4&J4=2*oTbVIn H-`aEZ6t7CRZx:=7$SL7hf_t>7?C4?+ Uy&7b1'TAw2lDWj9HzuJ*}r0"!8xzhPG[8Xw5Ғ» X|elz/_z'kkg˭f:^r{vjӬ[ނHo[457SoPeOa7yW#>6ǐi;A: ,ew?B^og \*EE{|}{3[VM[-@Og"9$x;jKXȔ8b$'Žcq6$c05!p7TzTވ|[_#=v-/H)@ݗRe{k؏` l%KXꨕq֎&UvId^ORGr,߇0&E]!Ta:υ>\[(-we5 λ2?&_@?$$_-ULNNLǓ&0l҃6oHqJi  -k2 %h+n.evkHjZ)`mz1;>mGco`K-@ [+KLuO63]4.=Yk=5'mh,y38\}C)sj+ifʯWMup6܉r\q]pFn|o1} VVe@Gg+ YmAaͷi߈l3ܙVkXv~J`P|(m~2bLMAjg o@JAPj:qtZڝT~#ծ*1VōS^Z!$$ܽ|_RhGccSpwl"a3UP löpPg)$OX]riS8k~欵}8.-ʅ?={gmpH=@pgΑ!3ځ4z{4II9!@ˋܣd{IGOTEs` FzS:5Aw mYLc;X|[2|3^( SNkqpy{l6~͜#(tK+H`@ut{5\W%?XQBE;WOWf>46z7uxZ Ao}W~oZУH*>PwH8v|#cМ -&3%_:2Ώ,Ed^sP* t,%F>ۃqn%KamT ƿLrǥVkQ\v~:II-1Il$2eb\b3V_f"cN>jK.6A.gO&fʳJ/ѩ1EfS.3lm@2ۑl>F%|6#V:ľ~%T:5NPaVO^m n>vA`+ $m|Nx2RQW-L4K*~+I6A/DsG=X!|'cۮMY~x80C\*1Yb3:0_$FM'܊[eRpȺazH)#or )=!QZ&hܱuyMUk M9DCaJlk3`u{M+?%Yal_Yr9ldެ 2/~bW@)z 8F>oяKhN;nL(s-ZeQO*/Eo[A5~Ǯ 77%DyK6>PԷ\O+~;!/t/17l%"lSF Z,J ;o/PhniV'$S.k гQ?}D}CQgqEWWE҄@Sbh`A}]vCZ]j-'>-'$ɺc%z;y^'tęG.=8m (=ݯP#cYg>蓓%.o:#-du女fI]'hf)rtD;׽+׏Y3Nq*_z>5`M,bt[]5˙~t{BĂV;|!h?pq\F8UIMl-Wt=]:ks_9tM̲Sslͼ$>g&Ubz_+C}1:NU~/GD˾,:و,/ A#"-I.a\ *gUSPZOphe#k%VR=噆}Q\674bvvKByŪe:.aϤK\eq2'84€MIUUtXװ{`22>}B47'D|/} "bq 2zlnH{plptM ՘㦞I*y L*sY~QC@E 9/5B{u"[;!7~!O'BwTx׊.i`zqyR͡oāԃČqDɒ+<[ߺ#ͿTTZ7#))1mm:y"^o@YcH8T+xfͭ0άpl >p6/] F_zFVȐFg,Q1P5ť][rW^K,El]?%;+WDܥjlyӣӛYm#X6o:)Î_rbGBYGK:L*iʵuD-Z3 W%Xr|V^=pM,Rcl$ h@qg& %&,_7Xj IїtulFbSw›U:Jy10f^w7M#;N)y&\'p132jT"}.Q'UF7xn)SM%b+t(ĊZ3Ah˭1ǭڳդv?sMi uЅX=֏ A[sx |f9)ERo#Оt ZUM-5X>κ D%m,Fedz4Q/nYQ HDOӎn?# l:݅;t }V)l hBp,$>xYS YWCz-HHz4j0]'ϟ;-]Bd+bFiٸYb'#sI:f^ڏw)Lm}YIk$Yb>s+Hݕn6t˞q*/tYN7W+$w7!4M*AaW2_2*_@˾^8:5B0rđv K'~)W*,W K# "3啕ؤ%S9C/,+qmkv,]dpʑJ;d:(ntV3:)+k\aS_sEy@zwnN|m0QKp 犂9ka S }sYgϊT=[1rєsU 9Ơzw1+JD·au$@]^g mD8|*2&m3Aj{ў\a֜\IKֲZ#maJk;UϼT^d7RYZi!|e => WV yW\ol=}Sabzщ;i/8z $o;j WyPsN%Y~;*؜LlO(&رyEϯD|2ajc̛7DY%YT`#@nLc;W4²[%?V{mߩ/y3pn7z:xtg}ph&}`POyP >ˈ/yR'ʶz~FsP--5i}uSsG ǷW Vo3 >`E!JS WY} B?L)O '2M`պOiBT=1uEOG> _%J0r=?DfWe2 =\}A:Y/gQOFb_/unJ-Mdyk\\пstJ>"~F|jOHLQ7q׺anu؁ZDV ^ TU $Þg_px`Evn5g/b~5 EZ! m$;2^ؖ[O rЇ8~)qsf+hdQnCԀ'cB?U:İ[ ."AsB3߁ rTSw>Ȳ?lI7x yD.H[m2]9wfo ۻ[4nG&9O _P1t~`-J#6?ra endstream endobj 582 0 obj << /Length1 1651 /Length2 8628 /Length3 0 /Length 9714 /Filter /FlateDecode >> stream xڍT6LI CJ R 0 1t %JJw HKt|9soZ3~]Ŧg(( (AXm E`apH^0( )AQh5m$!bqI0( KG% TB@ $ RDzxQ(9yy q?́0/=ԆaP7!C3 !  Aݽ^N2@?8hy@a&9ý (? 07c յ0ğZ* D򷻿;#0#=8 wuUP( [DC}p7Zġ@y} ]_y{=PBp~A_2ACS{ѷ"ϳ#1SWK `(( a@3s$7?$tD ;? o/ 7o (  ѝу>Ygp Gt (oJA  0P}=(,ت#@?E<-/߾t艅ypK(?Lˀo>*>nn< uţ}m$zj s]apeQP#D G;9*ƿ !  UG=P0;2{EŀP//hb$ AB$ mDtDz~wLo/.AD7:"@#뿜UŁ {@etJ s'=_׿O4= {y|Fo21 _@!q y }Xto#`0{)'.UO+ׇǹM_ }j$K-u&I6s*7z/1Y*&`l 0=Ji|u3FgcW6 O$zy~ݪ=%Sb%_,NpڽcE 2QNSdܲj$BvEY, ? o$NIoAnj}J18A5BOA\;j T<~l6ۆ6jtLۍGeXGdAS%Umo?fPńip1HKMa=z6*3êzBV "f'V!rga˅?bgwd4D)ez 4Kk2X[7ii1G:cmybr $oȆe ꏶgaOFȞ OF'f%M>clgf. wJۈM݋P:I0 &LM}dnlMOd\Ve >o'_>oǫtA[Uztb5`pͶ=CԘEAYl{}^Q4nc 6~%ig >; t.~eA-ӎg6p1^o?Lߝ]7ʸ-T\zQ-M3jNk3W|zz~WTZ˴gV/l1-^夌bgIێND_v2ti쓱{M tJ՞0ώN7LՂ9E{@ӹ4y|"岗 ~˛9z5VQpAȥKֽ SP>Ǎ0Ohns}̉đ(axp#^ xtȜu^ܔbR&֚nجÁ& ?޲+OJvѳ]ݪBe6Қ;,Nh]=EUi\%DobK1oL8vQR}ǯ]R#.hizU*ДT>2w׹G躸CG&־ +n˞XTƈna}\8B0,"He.A6ug%JBi1ut-1V{.7MM&.@mǧA{{/gLv}hƛ7ÚQiJ-i-ZYEUP1x#3ɔz]";r3BB t7Bq{GÞ%D3{sJ;V(&yZ][>%YU$OTu v,^ӧˣ GwFg+JL)pMsFDr<3MZ7k=41:H*@|n h0H8S2:EqzTU{I?tB҈ډG'3"h7.۫EҬKQI 31K,AgզWE7[Y1;U.+}SAC)R7KRF[Hu fa/ȏcd>?Y(㽁w{\a.WtW$zYEV|Kr0fxrTnt…~~sA4yɦ_ ꧧr0I7{51 jM51s[5 Nr 94=$ڹW``,;j8cfj4+>i`L .a<[L:AXϗud!r.-b0m(h{~3|$qZ*em%~릋cvxQ(|l[4xg&1#tp%‹99Nla;jVS֓*CWֻ VŅ9"?ES*4VfME~$&3$:kg,V-"g)3jMgвo3Re;F Q*iS̋tÞTMΆջ2 n |5( pCkUt + 0:TQ4B8Ӵ6׺檶u߫"(i*%Q˓HS-ݏ;>iW+&HReHc$Kd t>`[Kx[|Ryd=!,!1ϤY|?9OM_El=Ih#&9۱eԶW_Z{Ko߳oHB*-ÏgǨhA$śó[5]Z%ObfWe*f_v pP֘.l~j7JI ~I Ǟǿlx{1Sv:2RgO \^ǰ'k p(10uS_6TnTj]ɫ6tvIkcpTmofϐXfu"~](yM ZE_ @5 } n@J'+7tFiV^W@Mz뚂(}\h" <7(b 'Q{rզ?]cՖRU9w4uN$h?&fW="Ҝ˻j:CJ2`jq(ǞI,j9UA[HV[́VYf^lQթwoZ%zwi̢FͧVCEM:~u7@#-G)?XMen9mByߨ@@n~7N=RS A%qop$'N)Y$ ~ 9]{J! cɑ,TI6˽vQV̆tC/*yj$1Ŭv+bM/Q$rFT$mrqtOǔߙhwO`st}baQjoCkB[ nܷÇ{K"q"vB3v?/ ѩ:-1+ XV"Fapr{[<ت=s9upy㣼l-kO/fF]}}cS;NMCc[(18AI/QMH|7 f"6$A8Zsu1.GQv zD D҆+U7&](Q1 @: e25Q_2{@VQЮ.6(.l\B_k3TOVU4^HϤ* = Fm7=.ɓ~g_p VeQ i!DUqr]p)8 [ #?E?mțYʽĭHʢGo'Iʘ'lw:fj\*gհQ~{tѸgߞzQ^qe\Um`<((=̥gtpVesjFz|Ƒ㉣ TTIXa)&(%*ci/N"N plcM .&aΰ4Zk|t/vЪ|/-87r5bFƛ@u4ۡE GH\UquܠVd /Kkc] 7b\l_ l Sٕ4;opT1]6"[BAu" Ġ%HAk3:MAq|SW9w+ Ư.g vN˞JGsto(y|H7b2q5F`Ov݃r)9 ~=cI K!I0?'5 Z߾md/ CsܺᲈgC.}gzM o%St%l[Ul Z >|ʉAVتbwp즭/8{>[QqїTL; F&/phՊgi=4B U"EOoyDٝ85|Z z6Lq(7LEwb'pݭr>~RVx||x-稓D+lSAAWދpe|۫(}`1U?+[AĠa)"wްG|JͮWۊ:P pŠ)C"bQnEXcMb|<掍7 jgX&*h ) k{[G_f rR]{=\7p Hb[MM.r^p w@9_rnߗʋ\,ɯ|ڊRv~!7uggG_kVG IcdpEQ҃|@ !IዓC[π*e~*e-uS/f(T^efteeZtFj?a_ * x4g-Մ]OyPcLufVi3/93ILbvp&G!C:kU瓤PIaͣ@zhdo9z?3ưa|l ~*Q ɪ<8}[(Uf]g CqcK}'_δ=?KFj[4+PĀֺf=Iu=?6WCk<(Sq>d>\/ 3m Yt~5($LUGƾfjgJ6a[7}ßLrսP2ҋN)Һt`u9|)Wz:)]; Ma|Y Ũ\|X[-dhO=xǿZ.Gh4pISaAhH00OE{'M^ NImOm>`,qq`܏Sgy *k?Dԭ]Ek-5>$ھ2>͒; \mT IXdį' E?/f1c{a>AqIV_2f293}N[k0m18AI3=s b'UKDvi״{iNs.i7zF~2PPq?Qw׫= w+yΛ,g&uUJGl*=A|sw' qUB0֬~\g׏)RIHD-ŋqT~%}OvDw0};_E vbЭ@Fk[^bݚ=g>e'X4V o$mpu^jAP%C^ f݌f~:+ntL|ev (3:?Qp 8DX+f`Ksk1qGҐ|nۓA]w ƺ[ Ub9e.C3$A|/ XJk*C~MUOy<d53>׌̋~N|5I[/x 0ƇFspޚ8%nG jKcMG6sQJ3@5oIO"0nf~8PT/=S`01/M;_UO3q+xRR.gL2W&gR},+sxDaAE Y&oB-9ɚشG[¦ G*.nυ4sH}!ݡriejv.L27 nmK]_lmjijDY`IYd_Z꽃f(~;;JM~Ll>BJXvR4B*ֹw U?\<`if}",A"0Q7$GA֚Acbu!qTwna2`&jQQe'&ߞw.a9 s8{B"lUJe"ݸ*+Be.T@WoGz.S]"O/l^̓ k]Sh^}g2SX 6z=ħf"<܂z|w DcMGξ.2 5[>-$Lsgwh%ۣλQڟ3+0(#+ֆB=h%;;xd#η;R7Sh <\o< |OsKR$1?O~`V˭N~fG?I;KqjdG2:}gMEGr/9@Ol-<4yӺzNr:5۴֞>\-] hL axkKD\NJȴ endstream endobj 584 0 obj << /Length1 1935 /Length2 13742 /Length3 0 /Length 14933 /Filter /FlateDecode >> stream xڍP !h]w'Xpw܂wwww9gfUVWu뱽ϳjR"eZA;#3-#@XVLKJbl Ktt_ha1tHXl\\ &9rD ]-Lt);[,*}PS99iL- m@ v@g)AclEOFghDghGIpp6(@rr6?T-2+ۙ::k cG 6@YR o+XHwdQdCcc;{C[ [35 /&CL05###O1AEᇾs2vwvsC!e>6YDh? GǮ{yVvn=Zؚ!Ş^()?633 tݍ(a=Cd 8;=_00v,laa?Nx?>?~?."v ٹ&WYJ:~̀ߛh$f4Qp66U1`@;'?.-#}LǥяCKۙ1]LlCGGC؏#@OƏ14z:[; ~0Ok8~q?@;viΘ;в&J׍vowtO=sɱ :2N0iyuGV`:%AE?Nijvq"D׫x3hi B.ڃ[{m_h~%4K4mj_,9lb(gZ|*KwۻRq԰ާQZLяs_U:?~::E)t,Y\ڿS@&e)B)¶utnN m7]ߑXU?͹ ThN2os0NP6 {XY\4ֳgWIS:+WQu5"u;U]"ibŰ)GSقH~eG,,0>+!)SDr_qFqx?bfA7[moRIly魍Lm}v&p }~G442N⳺A! $fwFBUwUlEX%t:0,<܎%6xy@ۓ4_~ں6w |7"mBzbS+j/q3"%_SbxAi>`f,&RJ/~kv,q!Ƴkɷ>!= !p[UsiN zvIt:Ws8rR/ elM9؂0Q lrGm .sry,fzi%W2鉓;aX"?ۓ^XA_]SS90Rf"v{TM'$0p |#tQbJ Z.wV*瞇 ȗ&0a™"4K=`YWH"5DFo%t mb)I~Pڣ XqRE:64VlsmH1]>Cg@ *hlU!f/}#pv-Ej}AlSIO\n4}o*FᔒˈV&8,iZ^1Cxf/bt]SX~D0ifuXYdHkmoyA = hNItCKL[R L+ )'"$ 5tmE큝ҼccCKHzz߈8{A#KUϮ:uR[6|t_UF\NIjwxSM BdǷL7n%a=1A[NZZyH=DKou'2*_tƼo!`$]@|D^>9S_ 8)?TH5FgRmᕚ|gm~ͭ:v<  5;u_NL@8 -A-ymH< SI3H,,:qqԂ`H2G?%v 7;Rcq2 IbE~g~} Ow*X}3>FsŒPV^@{/N<z_fngrEoox@{^h a0)qH]H[J v8a]y1(R_?ѕSύIL"*20 J(rb'DY=(^9xpm"@,ʊSV䧇"x*]$`4R^>[đs=5vffgP, @8$2;6l{NFR_MqV9Sň5Dl^nȷޟ7H{PKD5R(`n&2gNCCܴU>p[ 8!"h.߁o7t=\Xi+؞@Q\Zbޜy,1I?` l\L͠bk+3nTO[>smrU Qzݵ=H)G~^,Gy\d.[SՍpՄcӥ܅6QDIן6=gӂ!D O6l?wS>mWn$m0Ɵn.8#`64b\,T )WHF]` dj>£ȫ5ט|Diڙykd;o:$Ef/_PYǿCLp|Su/JS*7D)6$鄈CKH𫏗& ד=D@#Ku` cp;Os>(cSN snTFQ ߾g%ː3>woUq{؋7D vkPۇYO gX#~~6YWr;6vqU9! $1'_n, ![[ٵط,`&<uz/@sgDw,f7UÛ>(wbBÌTJ&4nI9jE69u1>mׂSog^XQ @hf\]RD^|R-oJ!={c)-c'7Eu5QmB%|,7pTY|'1 .8,Ph{"k6{ X%)2颽"@أ$&y@2Y3q>$MÐlL6q=F-jhL̂; #eƚ2ޙFQ7,\K_x/_M$Xs(3ۢi觗Ybԗ#$&^}d`vಀnH1)ӞA 5 #!jYD+HHV#ku&L#>=+ɔLUɳV6c^Z(E#oJOhuGh- k`a(uN=[^61B%'XͶnh'°ҺVL۬BO<"@ <q/ E Хh񾢑 Ş JjN:c1ЙG3DTV:7yT2CMIJD^ 5ڹx)0zc&SA%Z3<~2qE\No&@bڬ|+@MGouN”Pc_v}Ngme#"(E+ I%i\]X@H SQ->2-|/R8iy.V98{F(2QJl2) V{}$~< @/6?#Ў(YI|go "FS['?(c6~gd W8dIh)R'[]O!a]IoBˎ'g-~a\+GBu 8psrtf< W{ȷ(SoHQSW;-p1ִ"iA3ɋQvbT(5A( ޥa @ [~F= ʪu֋-t* > H%;"i'|>n,%/K"A5{t8di$GWvY)JQ2僙Z{ ONt4 R׹=E8)j*jzn0^QNo |%'A&jmW1OtH?gEpwͱ1"Q/X#X)0fYZ=1!'ε5 o?D&%]z*:}2bu_V3ꌂ΀7sgՠoh]]-9 P)k.|R0[ qxVڻZlgDGP U s7t~PԃXdHq:SYF?r 0 {Mcڬcl[g`EEQfO!xvAѷGq19< lUpw+µ6JQQ,4Yy0Xv{O"~|Qj%]Jl!?%ר,-" dѷ袹6@NX6=jNEw} TŽNlHӬMׂ9^"^ o^UL ,{#CI)ֻ Q+YQ qR YrO]l]رػOmm5OZ8Ɋ@񿯙v(KH]X: #$77>A_fI}휀:jx{ʓqo&MGW,(tAU2;B)}~ D鎘S+X5k 􂓤p[!Cc']g6OclvFՠVvH"[OFG2VX=ҲxA<6͒78+GkOb]]cSW"hAnaŒg[9ux,IJ#y_a¡oetzCk 3boy noU٬.ß˖8Vwx"KJ^5jɁcG)~HC>'n.QYX/ &nӄ=oAtg)N.9?u#|NB1^]kW,̼DB/qۤ1`O) 4I9_h[҂EV6 ǃ :̒dAĔk%vJ~8uKv(ނo-;*!m.>\8N켴≄Ȉ-;HǹST:“mÂ]uATz'?-1[R3e/sO(<'e[C]ZYghjTs !NIYP-y;R'Tڞ9 ^;ꕌTnAjya#g{~*`p=< nH ϽX^iaoU:?勢l Df.wGLl8 :I 3\[ߣ,FXvAq ӵ!ivuV8[YmjZ#?M'̮G _EʊlӇKcmJTML]w*[6kuAJ8E'7S68TE ez%(%^,{I76Na[O5 bH:4$XƯ1 oZY`^nu(/ V zQD@,/c"rJ SDڱl8Ui7Q7dr^q1b_L3S5j%jf% V!}TMaXnIP)‹U UCo-BΙta @+x} /O=a,p4En86"vKZRoQG6gWU*"]02р3ig\Hv@,CwhU%ox`jJzv +m MC>}p'!:樐Z.&ySI*?C/w<ڵ_ymc\1 &|0T/!;>QWID|cXF d6:{-2-0t=*k|Ԣ!7)Ca#<݉o]{ 1d=t 0N\6[sV0ȯ"*#Ef<2GȮ5t(7mg&KX+PS^OB^f E!Uwlvl7lHFIjZyLSmmhjNꪯ08Ye$uv4eu-HH3+NX!=}ۙ*Un KQ!VnSMuCCL; .>->ghfu`ՃSbPZ<8*E+d;!:U4!T{%[?q&xg;!wkorfDM mn U0V#(!+RDDqG2zp/z+CD4>zU?y t(on07q)AF[fvZW`/jMP5 =&TmAG~ǁ-T+Y sODf#RGo^WJLjFQJvqktM_9Wl=jVl|s%Yc]@RD+%WE. 籜GlToB7v|^ i9ost}}~ ioel5./BZ:!2UVAXMH ]OIE9ײGgf|,TWDzfϕ֗NH`u0ź!aP&%95%zy8W8 ݼ4 |~%_(aۨOU/'iCEºOGgl.h"RvjdڬǃRHsT>a)j%wXT4?곏^za؋R445M:^Fǿ;`lTQ9 4Q{0|>!ztO f}{5An#l%$cg|%V"uZrǸW)‹6}fϰhXG4Br$8+[؜^(*m#758B>xvdDMK(V1ҷ/6Q['}%w̸AZ#L&͡7wf)VW.L3`cӯ52[[ʕe@,+8c5ƭWYfMV>~r#bd3Ɖ!ۡϚXR@zH]\đ Kn)Hf❀byLy06P492#gZ@Lj n2>^[\"sZ o7*J•/߲]4H?yTZS!(D&0A*p!,-g5d0NWy_"FW Dp:1@!p|==z ZХtͻpfBAsֺ~yZKQd4ekq|kPA$ґj9W$=Rh 3+ Zu`_>Z/X/|!޼gG]%P&J;C֌BS :{n cIKL 5 lf<" S+x61/2Tfbrf3x$$uD~P\Y~^;llGz?Db9\_Z]^zj;xt(QvA&W)#CZ ȿsWJ\GlOji]rdnJɲ|;"휳-U,m5 Hʼ\D/=~xCW|f6+^:SsSkGP>FaB|ۜ'4KI2-">xӓ\ ^{fz x8), ̊orh# F GV5bITҢ9gn,`W㯰%hўOfqselUb, عH}LJtyZCAsȟUI/qhf!M4=)x9& #44^J7BZ[I͟itnC**+Iegw۩;Zɳc3|VdJɱޑ5ԻPnһpgMaYEȠ$$Ïqi\l%Vcf *NY.CDmמTLj@ܧְix̹)DL dUiYHjZLQ e l:3M<+-8=wև-?x%o!qZNQ!k 0@JdJyMo rT7&NiO3 q%S=Jy)x;vnUw%xDK/LP*%d\*42l=ɓηUe)Ѝ^<խȵ(eϋGbi^(3OmªtyKp_,Zկ 7h.w(n7!uzkd`'PQ9426sd$S>id:_$D>"^#+l .E3AYP,GvjGZKp*i65[6^Ko)q:Q>>9 D/@8'Χ#/Z,6{X.z_Yrx#QdZy!lzUEyB(YZX/q8ٌ2b6p,T57aᲺqQ"(MI깎N'M svEH)[$|uA;|k\ ua޹mtC݂K"ߨNb9ٔf$` )` bMvMx KOE_)˜z;"&bkdYa^C“}QJтo'oLڵ [ F_02 Fx,Nji|6WJ:"Mmvov f?Iv/3vI ߑUw}JϤM-F]}V.ȏ5Kslg>*Ɗ)JIq b߸V|NdkFaX gQN|E^*=ESS8h۴3 yY*:Hp}&-C f3FlˍM*) +->NMfC$XKTy\6CI>__tɫou,Z2xꕧ6IJ~OFy_hLX%{ƧD]G*F23ahY;ÈR]\Y ~~-^|stu5BKqaԩUm̱e{MB&舞| tcQ'".@2Z٭qd6r,,TP`΋9r4j4b,vlBvUYHB.F#J#@<Ng:aL_/ĉK/ Acu;J^8HTUO[i7K>.ocb|i: [om`ӷ'~~&4r_?V>Y Wg2+s7FBZv7y94M]:v ']QSTI܇5eC.S9VƆj>ysIIh]:lx;vwotuר}Ս!fǻxiyսCMM%yۋB ZO et[FT_:NzFt{Y83)&#́Z ~iJޞߺ>)Cݚ"cf#B];HQȔ;[؉]H*\U榓sa}7ŷv_KeK~_՛KNg j~\ A/pT\P+kCyCyJ"I(y ON|:c-˛boKf/6K~G[IZb͆G~q\~Q-I#j5-qBq&yp5I}sU~TuN(vCzT,ɲRǁYsH5k+5 i^cN@fw?70Jpks2 Y'ExXy2s\ C'D8 9vϐH ^j1EBݕmH(S$TU08G[KqNn0m8'"jZ5TpCHJTnčXZxҲvxY2l:'7 B,RXew_y^I4cR+ ׼Ls -C Jl TR MG槌%4aP.9E!FȊon\s`l1vlnd(-z i"ڟI̚vER=BF ߀81{p>7e㍟W9 +Ӻ -EY7R–n8Hih6> stream xڍP n% =`%Hp-|T<{^{w阮XAN(ncg*01YU@Հv +0"@}7Q r0ع8̌\!ZrDAFYzh@.beb 21G!!K-P շ7Ze4Է([.ޚɉ^Ҏք7(퀶@#r#TLAv9m7{[6޲%e@_d7Dp#b}CC+Kk} l0Y2}D} ; }7Ÿąo ggh Y0o,6Od 4|wlv2a`͠ 8%E漙l\ lhGkN?o<ܬo2 c#`opdh0D3oo r|e|k?&>iuG *EUou [9ؙtl&&fۃQ]ǿJ\O){@Ks#vs.#kߊ,,SE[,\fuZRՁ,`o 6yh:.zV ;q3HdohWeWc,@` @|o3fhvؽu.oZ1зwAx;7pczJ# `[ٿ-I["ql?L"fA"NF?_q XYC5ѿ |Y7vi/F1|Ko п[qb-o|+_-["Av[^z/O?k`kv9o : f yͪ[*>:2"^SsS὿bj _G?b.x7f4YsO ^i66آuG{8q`ޕKKld}֠t-8ECks3r s1=hX,.'' PJkpt$ݥI }b3fb-h#!u!~';8`|S 1߇nXY'Ur{R4UDD :f yivmٞ(Y[> FK%p7M+fAWP,gM8Z~L}&JF'GdK @?f𡐿3ͧh~;jYEݸZ+8 t ^;,༐Au9ȉruh,dT߉;lUg\ꪵ!ѭ|N|+i"! rFFlխ`ec+ 2hZ_2SzD=,|qtS7qw;]R69 e wdqpHRRi`9 Q,6blFf5fU6L}<ç$(,5JJЗ~5>r7P}81/*waYfVT.Jza¸\WNJ_ ]zC $.8_Enޏha}]-'O@LfV2=k+hQ|w>yz\!aUaQ }ߴ1$b jڂ#Ն퓶j鉛*(X#R((+DĪ۪n&'+R<:^.h<*Ù9>n1'g^z"g^##lˡDQNYޟ s' h%N!hmgTRfbSOr ėNuiڢa-pHl/b$ADCjXsy VIQEYW'։y}_ ϒy &V.96'7է-0,j.)]:ە;_-X࣍Jwa1&DzK3$?/#d/>j-h gY`۱^? ~ [S>gv bPo6#)I z_j l=*d"ŀ!G-ϐ99bl& T5ƅaFS W)Cd~苗%c_vMKzL0G֖s5tFr4;viG;PQGGs/BΔV0gM *ճJuj1ہב /.-ki]Qcw.u'ڞ>OxUŸr&=N|{c:)F$GSGy۳ᲺD٢Cro^Kqu"44s괐bzQF2G7q~[US|h~ʳ8HRhn|&:3&φ?ND#5oh.zxa&&D֟R~!) d+A䇢, ~1M(ں{/DLg"O7L~φ#g֍1|!2&B#Vto 2,̏M5ca3I[vsT/KqL<*}(1?GzDф(&[5S'b緽wdt>"FZ)i\fSK+TqHO5PW3*t,m80\* j# 6 | p~aSgs6zm|0p"XZWqk2/E? i]- "&ǣXRT<\ecWƪ 4\z/wcقbX[֛72QH *i<TRFUr&Zt5ghԼ?븑4+_*g%-^$םu*m`>5%R6 s̞XK"_EmGJh  'g1v[5W~5ēs!/N47~"u+}r|v,NcR5O<'Z87rwY%4)\kZ!#liIT+h_>JbYwW_v~~]Nl}BŜo,(RٵSlcۉm)y5*R`4c?]>yR f`.c/"՞r=~qv+p ;(ģ9o8Dͱ]Ӱߤ_'= '!?B.5ݥ1l\pCZ4aMeϩML*RG L_⣉.ĸh9he$w,">diG;i˽1k{{'B#i>!.UhۈѠ*k [6K=H5Í\/DMl]"xMF*Z) CK^}ycQUXN i\-97́u1 !Q5hoٟfq_>oSʬVtp;8fa{\Y/m\8]8v|K,D>'2OyWYWeξc^ %ּ"ej'I K6*Cɽo=3Nіg-y">K@,"w!̠ʆ\x1B}SRsߡ8#C<|^g-%5FR2;҈j!SeNR/ggPDnHM^.]/`EyU\3E]GsL vXQc $h! ," `]8,V^XK.s.X[HcC])`6TIjΘb˅&MMh,1R6iAʹ?Ivar -[l˘3<%:{G+Gp:[bfs$+tR0Ly=ٯ" Κ)}?.vϞRB#~]Bs~" H /" Ԉ'lZ~[p 'g֚wNr4rG4(jkE,$ul"n|FO .%pbsW!3U~+{N(8$qՉ_ƵN;!9V&ܾu. Dp7Xbc9rmzpX#fiwOψc^g"VHQa{dWػoE6&3;4*HYll_c-5j@&a}X kex&wy3N_}ba{riPGyggx :] ڊNwnRBF4( &^ܑ%,cwh;nN#oμA8_> iEf$6c+.Lڑ|޿1|/fp읪= !k4nuwOejAaD|dlr/GmZX x\h12g?s]qWQ۵5NB}08?CZHǎ j] }Z= ؼWp Аed[9 ;OC؟/gMOt\P^2QtդkeLw9T@q=SQS@_nYm?% !VώtkĘ:Y.(~ yڽB4"aDZsrL {Y^1e4SoDXcB'OK}SZnJ>"=v8 ;]5X,čO #/D6&X{ζ x"<94.MDf% Be@73Fl*/Ց,/I-D^w(5 `\8fbntI}Y,Mzoqp,P/=^;1ȌCC!o>&x;Xp@Ǭ;]܃MZ<C PW ;LFluS쿳isbh3@\GtA׸3iSM/Id8) ;Ẏÿfjbd?"Xwc"ۂbSx]UTH\qU?s5MjCpL NXx~CnJZpRtcUH~#nc@lJt,!1׬ƽ;h 'q%P5DRF.&Y0D5[T6>*߄._1) gVJhۿ=FB#d{;XsW)g6IC:#=6:V*Y40s.LlBj>^I;sx7m~_] ^qrr*{0A%eͭBT| Du9/ҙ9p7p] 3k}Ѣh15B?^|fs3]AU C7Pu8SM SitGKSB޲ AQ䐙kyDPOE <|o ֙%{{8:-[΢NȪpr]^hYA `ȱQ}C0~-^ |2kG|I:^5\fw&B *BuuyS n&1h"&K5I#4icВ {<ǗqF@k}2AgXαKS@ēk',H$3#p?E9u@(V*`dq[¾cu;Ne#<#E>BF GE. /_\tB[M)'U i0н'|dmcQھ3:$g6 /a#,G?- 4݂R4 ?A6tg1h;$ħw(lo~AIpM=X,29y伺d[Ÿ: Wj;76ÍhRs%]{y %4RTGR y o!409.v\Ld Y|/T I7g޳5E7{P &Sh0i̱ʇw9b-(!rЇ  ~U# q 9ay\c 9C/(t'p㍖y['4g977})G NucI\6w D̎9Hb^3{4!_(jw[eRoRC{7Z>շ-Te?qzowİySc=7s<.f1\$¶옲){IC}]Q}i:Hb y=Si=OT|C BΪ+Qx!,SVW>xkL*ãc|Im4{-7!Ћ&yJ>墣,1zu~/fchw͜TKXԨL·p3x|nmqD# DZ`TԇC5.ҦEJ)w >cx=]H%i}ϫ0, W䕤CL|ӮYmzO(䰤`dIAC"C^LPckf֌RRSJ_TTO/ `Εf*D sf+`x=@q[=h B+d,NJ_ڇ>}&+(8;dIqOcd{LÚŜe>YI =~g۱ :bN(P-5sM"}udKFK e=`Ǹ?ZQ2hp]Ќ%18"jQB-)BVe0P(s8t]GR@vo>b<)1"ҍ$R*9hţ>1zSX))GO? ڰ;bQc0@_&|4A{7d`}tLEPG"{ԁa*Z51/55?<2͙.NX| x57AᳺDO1z1uYLMj1Ҩ~_ٽP~:?%j[g+ʧ O ZHll&xƖ} XF':3jo]q(xġxu;\ Z,u0D*lKǷ@+ּ M\t9bݯG6lDzqb%h_:V۩I>ۭ"etmG-MuWzz t=EnæܪL rfrw,;*tT0ƒi0XFM%G4>c+u8'ͮYlS<&s|Xښ_XFPgJEՒoWv#?+–][bgcq,_>e@K%u׌]tO+.RT01cwESS _8M(يFh~85"o8N+=Vgmk=(ScH_Ԉ <)DJhwj=Bn/( X\ [UG U]k͈Z== 7}ZqL=@ԹeC"O:ťFaiJ( '*?LRTq@+3b>cߴE]X~cv @EM$\t-uTP1e%*LB1-JԠx̾9ew_xpNSZngYN:`JBer7-+l99aZrc ehqZI ~P? $ci(j]N?ш!J2]%tUca[#'K}bFC}f,4ٳPՓE'=y$ ip*c{t?HARލė剉a|Tzs@|p@x.$g .n t;,X2IF碍w}e6sG _ϙ)4qiWB5rB /a.zٗ O]xTY>?љfioNM4wӆ/TĠ;TaéM6}fJt[eEEIOC /׎ æ+% 6"ֺQJ%ia5{Ost5΁G"_~VE ?K8ERZ2ZcNUBhWI D&'}{mBgό]5ܹцA:I͚%N)Y']HĵJeY7Qy0nVt(m8KO{Rg4S,c ^. :_t4)JG23mțq!f b7wxmy..'N=(M,ҹ2D#&YrpZخ6:+4,O'Ncq>K0ЧnWoq$vv $ ݡ)ܝB5N@f);#Y2PD)gLQCchPt򴪂]  4tZOU*ê#tyr4/,cHYT0rg6ø>QO޶5P9s4,˜@1p&V?'znu7&|\}"_G8qVJ[fR31bߎgĻ׺ЩgȊQD-E\b+ir N.2e&[] xiFa&b syGO^?^ѺԂ$ ҰӿDO6M7ۍyU%kטЪ4QCQϔ/>1Ԏ|hԦ ewQ|?H(X% bL(&"d׎9j0U]U&ldpyۃhf`uGE^Z8eMѢS)ŝRh ^MO#8~te$pI+;3_v-J?2Zh#8$Pz'"mǍ{Y"[}8^#2[glh@B&&]z&9_F]%`u+ Z0Gć\1] +܃;"IFgM\/^3qTCǝ߱+:'dbbބN- P_AS* 7$lA.yuS&6f]67G Qn!ՈbS̀l;w5sS.F=Ya>̺?+# ׸CkxX|@IӜTQMqM̦׻:(nA0ނߗB mcҙ^YYwc+0,9.ʉy~+&4mKr=[3Kjkv d3 kQuKPgrjsE9nͦ$͵>[5tYJ0c ?]б@^{H23L0 yGL UO61IL2ajyXyʸt*޿i"\q9R`s!;soh@ُ -~<6ՌK(dRz rO/%iH’ wpiPįw,.}fڑJ?h۝:+3p-0ݯ43fay FE+>6G]80'o;-'cJ>:/X[]ljh7!!:W:=psJ6HF.1Kv nvOEsܫ0)'D[nfׯ3E;A%ˎ{dB&bcnWO)zzs6D%\S&N|]MCAWV52ASV~;]`5l&V*²@*:R?b endstream endobj 588 0 obj << /Length1 2386 /Length2 14991 /Length3 0 /Length 16405 /Filter /FlateDecode >> stream xڍweT.Hw Hww ݍ4 "% ҥ߫;Y35fv6_, x۫$qpL]\LC e/_ `e9U .H[`M7AV/UK*F.v@tC 6fhn?>~S+V `|gwQ'bG_6~]RX[~j'lpy$4 8Rۿ vAp%DN0W`[ C;>#V>tK6؇r} xRt \D'{?IpK09`_ueGt_l3u7uKOf4<Rp +k{ loWu;]sיzx^@sEGs0ۦQbO)AͻWwr.x{]OJv\E|y*uЊАIĒvb,XXP2D,oP.g.2 ĽGd)ti'#qI:+U"*.CY>@Un~O6$2YmNNdaqH ;p>BɭŒXWou>ȏL1t߯JS\xUc$&9ne <Ql-=OJe~$VП`}:aBS/H:R|I;fH&\>y_'ȸQi!mm sVw3 2 ~ ə }"N>_{W.rɂWC:iYvJٖ@2iNZ—nj QÙ%d?/k)0pmZw5OQd ; ^D'{`3kEXњ/Y?GafLMW-`g/{:oP%BJCM |F>h2i! ef(h#Yk@CS@"`߈ɉz"kN,(qoj,c5̉y|AQvhm[պUA{5\pbFXo&}3 ︸?UoidsEީ$*[)d5#` 3CeVuq-AI_? ٨-ƬyX"X*mg㲻,/l&𗴭j52雔EUG=q! Ad>,Ųdaf+Z{9\{vB^kSOXk-PKI^@ڃWPރf=Kq8a԰(G ([5B6Ww9. DO\#_e(dHCļ!!lK/uOgJs' >4\uȈi|B-k\*rQHR17:|aD%s`fף[VU*'mӋE;娎vT1%|CL~Y9O*F 3s=N4 fM_F=9Qp`51@A(WM޳N[^ܔef+BG  W0} LCXL;0QoSǜù ACWx:p~ؑ3\7e-YP63]] |fՀeiqGu~f%ʢV)ZUG+$c&cc@m Gf[h +C.:>>EK3Y }y ”i2BufLK.HS{|. p.g|b`pj^>nC/'+N\nʍ9Zy9gT_t8d M1STaM pA]om<4"fB.&0ÜwڵwƘ"c5Y =dD@<%iomdlLgV?S*f8iol+ S^uťrZ%۬R6D$z#<(fa]=ZX뒪rf!H1/'/6g2T姨WsBb\isSKf;E SEfRxZlpSJkJ4wk8 U3I&YN?\&]c\'cqKо뚤u.S'0υSѾyæ9e羟& LmU&!TڂE-y8{Wv9/sPYP2:ug6 o^ȗBZݵ=BS9U _'%0'&⴬^/Q `Fz1o9?Fjk.iP+0`뗑t\:m =ZSˀy" nQ6B vTbYO_2£AjG+Ӽ*;mz\:%yeZ qd;{4NJZ[?]+Ed^q"a 9a f>&|jK-Y.eFcXL~ifCO*~߶:!AvĚ\n/ImwzpY>o|[ r?H;AVBe_:8ޱ6x krSla}9S =y%P_k1~W lsʦRqPIE%*Q뗷/-/S╇TgGR%Df5m>3x-.2[* Ko֛҄1wg{V]S6h}JC`0\;_G0zS9~#5]ueX4,:bev s0K4Q #@67Y:/(•L2QqK9Zk|1?"άwvHx! r-j]RML'/b$lnRJLM >պ`k=·,f9e lM*)3, ķ&Qcu^x3!>o= e%ɅiRp  PqrΟ t3}6ܪVS|zd`XQ^#rfʗ!Ry &HW}<y64g3o]g'j}A}=wcg'2!g9DuffؽWy :(GT{L]mJ iU-/!%NiCtNtԐ`+~آ߉`FoQʖ̛]bP f2 Uz-eyscX*v^H>1nȵ:F -3[(0NZ Տ *\3 _ʑzɴ4nBj/b=5qC\%kG,Z( slE#/"D.nMҾ9v:Δ:`.QtTnf^H/>@5K%/;2R7\SeL{t¶ah-EROHxׯ۷ӂĿ(6|:Z94A8S|E* !Qړ!,2L: gQ2K2ڊQqWMH:r4x-v$X)2 UjqCIpL5͡vxX=0ٟv j2bNaAZFZqi.N9G Ӵp41̗ϞIP2c@nZa|0(s<6y8uqP8 tJxk#yp5A87I?yD-aasm|y7Т5 -|^^9X֑Hdv6'H֞Tlq;ۗ%|($ȵe78#ӤZ p:):}+|R?yiPCƢ=FwL5'oաBy$Au Lx[<ӞRqE[tRWRzn2A6ެh푰BKYe>H*!;=["?>ʱFMp6q[8SOfcЅQR9US& }/D\Gf~j!YVz\8l9z/FRtS~R=qNVf!{BQ>84M Vfto96-!cbnRE_m*;l-n=}daE^gVP 9`m%4rar5:;j\W6M-*o.dyM˘oa\Z=jWj^Lev)V(o|l)Qͅ.(WS="wO D4|{ Eo`+ȭb&p Je)|CMбXSrej_U^8I-M Psl1!JUڼS:&kOck@~tHlQ:կ PnG?IE⃡huoK2 s3BZb(z]:18zȝ+ET,j>򸃂j"*eJwc~&0 M`0+F3-~yND7ՁmEXU|9rjMJZE؁^jw4>jr8V'QE..,c`\~nP7-_h 7 jLĉ'z4-ZC|Xq |M?7iMrt;4_,!X'gY>=~ҧ̓* ۱V3 Nly`ȇ)@Bp"jEbsPIf};Kۨ/X4\Ck0ZX#7^R߿ڊ#\I2ǜ`mug[<BUe8b'=Po$acr7+l`K|JS LNKRpJ[Kܾ܈ ʂ@2w~،BqAf=GH/͹chJV$fzױXBVbX_kk3;$U'ɟoFzV0$W`ph51%|C~^D5\77 R}y}sžjg\:Y {%Jx@p !bk9rXWd' ~<"X-7eXay)AkOfg x Z`sKv…c@jڒ @C<3Gu@ݎ{?zAhMVdۊ?2*aĪ jUc>"hM @&0Ci/JIŝ-gPiH+'^ $^l(L5Mzyۋ<=m5z+ ^b#b[b_sve&*on|᰻,$ݶdܹin,9r}=uA;[y"Oc) $t*!l^T)yTN~I~a?p -E()A&Z"чUxEQmւcɧd55G{X_Z)Lf2g+qEoޒq|B-;BvY<5z_:۬Ý9 tVy6&DD#=5X xqj!9Сiۯޑ ]*+/O(Xq_hSM:rQ^NؽwנL{߱I( H\>{If .,|4?HSGe;9.MPn?ggwW"^F|ln$+w!D89i_S6gZ^[W:03p>ubk?X!"ܲ֘v]jVLѾljp({z|y :ɶB,meGMYQˑ .kUZT Ӝz USVEaJ++ &nQb :m|ӻBl̰x-*z R6Ik^^Ь[/_ 3]E"%dT"HB?,4߭uF<_QimC1Y`dn~0]׋բ[<;* q؟ʶj>:̎e[TniHC=${&l'f&a.ؤ tp'!u :LЛ5ݏ,3DbiHY\fA{dLo1r4:aPxQ=}87>nLj}n^X"݃&Qj~y WÎ>~I\~R+}^) ;-m_#_{Q|a{ѫe^.MQ@}DTR/ 욖 zk `q1>󏂴l$uT(YW >"o&, =VYe2!zY)'U(Uscpe1-R Πhfh&Z }8 Io35*e_W\nvP@v3]F,l͚N=:˝:#3'жSfw;QMgI@j"l,(w< -;HR*<;,rY|}Nt3()O$PǠ62L2Ϻʌ~ii~[}*bz| rSG!xJŋ.2Do* <{ǘ2ʸ]{! ndI4Έq85"\頹ؒ"f88D7[ T +%[.a U# ,=)xn7"ŒB7[ͣ}=M$=3,oYB#f1 Zߨ{tޮs4sq՝> YLMf?`5a#ll!u0) P0YQUQ$xv<ƫ/c?g3C7L±F.KW`T,F|7N1 ~wޘN2s1%3[Mp| s5q~6Oybh/|CH+hxo(=))Ḟ֡Q#wKyx:"UD0Un+mQsPRdO湦N"RJ0n[~ |%HcޝVy!#\R#QwaRaxQWo&346[,6\NUla5B"+p_Ir咽e>c3Jj 4ռLD ^TsԷQ%Q aʛ1@häUau |n0nYEJċ]X1h_ - i|/m opCbrn?nHx Mv, `]4v8#

G)}D-,~\ od11l$țRyqwŽ)LY͚͉rG+ em"V|=4=uޑ<ޗ"o|( 9PEw@t@oPP=v 2]ңgUex֦yM0Gm.z7འ8%Ԑ]"\?,_R~C4*1ݞ{Ox4MKGTF8gyڴ{?67nx+=7SpR?qzJݠa)JøP$D-Yz? YfUP±L>m&wBk>w- ai+[ג =PDɶQs`ia қt=QGTp5<qfc's cjR[.Q4րðgUZ #dza+0Ѹj#+RgrE'oKyJq+=l 'z Q6{)^$LfΌ=LD)^rHRh`^~9 Kќ߃N+.TVU^˭1)3Vݳ*-j`T:VGcPN=Cԇ9֑+.9+"Ή-[$0,ofs;P")]ć`>%qjmWWgtt0I]R=ߜZx=3/|`ʊiPlh we4HWusH_jW3a}0w[?Khh&5, [g>FI` `K!X0aguUGo?hPXJsc?ApMw}C&8oF 8B2l ;MWlVJiHA߈`v@[ˏUW0e'Hq~(!1k.x%"Kh$0BW&M+OOEЯl}Q&n/3s"$M5u t^Z:<,b qᖑQ` y:c (#Tt#+=G\yn?VIbU9s.PPT^b򻰫9Ϸi wl /Sd nFrH7̦{׼Aw[QSXlej׿&Q;N0 %m~ ?p>kE[ߨFV` jDe,)EU{ͩxЉz"ډUF^|2,0u85K2  i (m'f;ي"3+[F l? 8U, j;8LHPr`*#.c& K(򙍇Bmʙ m ъ֑Ƈi A_:a"ܣV? fG3_rCbڙAo&u0?Lrn0&oDpD+~2$A,gv]4᠍SBo FY}å:!5-e@/xy)aa­WVV Pٟ~.@7-4ME@m%.|pgKwhOHD; dOK89xBe!""lFD&P4xRޝZpX'15Qմca%;&Bs,FH\>G fh)ԑS3|q#yڮ]E*tB05%WsGgi#W܌:#p>6{޼Y)^s0X0Oh]5]:uUq,`|!7|na[tyMDAM`9W$?8E1nx4>/FIAmD^" &h]h-_Q- +/%'#+0H@#NM!p[ɡ9s|w`ϋa Â3M)2p8F)umgv¦LSE/"85IߞEr,zFNTZ)/i]:s:(־ʅIh2l>^bXi tƃk iv"Fvp+ҍX%0 Z]ѽu;Ni^rTj1Rh$ɣneam:ATtU5q̴N.2"f-%mu-n*J[/["t/ej/K=?)CbQ3/ Hk'ڗxkP $Pt瓐DW*;!Na#}x3b6B^3~.el8k(Ry =m  dJ)nf_p]Y._/*97URA\%$?a(y?)Vs/ $|+zlJ)< +?NY Q0R*5SݺTĒʫI m5<-#SƝBkS Jega3e4]%uh-~sUZ Rp"va\2z1 nk.Fskll&΍PV9?%wcKJꁱRñm쁌HT0: -6wA+ q4d.^AmE6(f">[j^Sjtb!6X5^fLd-KhgDBZ3dqIrud>9dfJsu & &-YX},:R RXws=(sGgnQɜxi kH GӋc#(dSZ~xk69]x;;@Hf U PHGFJm|hpbT9ItOx y ni{H2QcJ7!ʷ'BEqޮq N]iL g5=[Om[ռ5գv-c)}=3rUT749FBRdtE”u-PAiTIVvOcz4O{O*K-y޵**d_tqWQ^Xo 3^<ږfk=YI "L~4$oZ- ܴo޸cI|Kl?͋Q b#[aE3"I:+׀ V';=u/75se[6zIA2.wgcӨ[ua_yhm3PD*i10=3=kaM9fKW iw2J?  쩡{5(̈YޝpA`Dm?8/Tga_V $bR-S.Q·OEC$W#nܐa]+]31Sd 'E|ĥ"ed5j+J>{1;ҼEBJ+(l`ӏWw~6n/F:Q_>XqX.9Ҋ#ڂ5 1DĖ_@ؤq=5ٕG~x=+.s("mnE2+W,[Vm!A6^$ *ΧRA;+nFj_fĒܨfu邞YƶѤ]+~#օكϿmY/v'.},o&YyJ7|1|a*{9'fI6KBᝦ N30jYbopU4C[X0q&w5y' !^HlNbVK؃Ü<wrL'O(3Q-L3mOR;4r[{D &<\7_P:xnןD_j3S;jMt C԰ endstream endobj 590 0 obj << /Length1 1520 /Length2 7029 /Length3 0 /Length 8041 /Filter /FlateDecode >> stream xڍw4Ӷ]pމXk]-J!zF(A-JKy%;}Ϟ{_3of~,.-xAb./A >\&&= t!bCB<uCTGw'>H/")ȃ=`:7Cp.Hus_ +**;q a6`8FCoN;('7ٍJq0=q =  3ָq={]7' v ]e5@s8/7 A8089AME5nmNnx!. (h sAq~+͘rgg>ybs3wo?/`p[_mغae?97&lP @" ~ @ly~eݴAn~p}t٠k'b$ 0ȏd~0[+QSVzg;ee^//'DEEaAQi?Peڛ1Uǟ`s?؀@s3 !?+Rtwrg0'?7uG,:fM5[{QeC$MՂlv_Cn_/z8޼?nDٞAZ3>A!DqA7Z|yo[7 nH_w*$20#7n|#>G  pCvqrw o˿qG"ov o&@ 6SPw2Ԟ\+X-q(/9jwZVP'kMLQyg%)edrC!*'ȥ{{4V, WO;*W ~n@)}5LVz1="꣱Q,14]OYhXK~N?SOIB%i|74fYSNXڝڙ#oykctjˠ!qN1JD )i}Mjo(jI7 .9IҢ[PbM3v7hO ]`_d$ $קp;8C+p6\Aܠs[>-cQhpIe"9;5uuQ !rFC\-+[nx 2#m,pfwAiXP MK[Ev7KsDrQ}p64D P%Xd\(=ዴ"d(l.x|̉?b}kN,C.”mt`i>#2`2+ZHxU=.(efn䛠<(8@dž2oޫ]xXˮL"GP/Dדqi.}ͬ}ڎ&B%u >yLNC.A{;`w_*V%޾NߘpT&ˀ$^m{ݧR~6d[wѣ8Z'7A{L(Y Рk4d>mCPWS!:l;ii;B>; iR a)+j[v,JĴǨSw5tCcѣjGͲp爀Ci8fqՆF~( "Q+4<ԘO^.>4R7Н?8[\PmKpEΕ(iJW"E`_!.,%\\6x,lv>jw*޾9V(\ɢE|dvHb2M6/`::'e/yDND1뵺eO តPbeJA`F9q1= 0Nݒxmע/&_Uu$FֻS𜊈hj.g2ӏ H5x Ӝ4%:$J$]Aj+ݥ/ސQй*[&\/JQuMH9Ė<JEޕ^Iٰ1Vi9vIzD ic>K0{ a)< :QlYԦ6)?ϴJi=Hɲm,WqL " ?|$9`fSMe|K٦CAˑ)JP"T5f@܏, }3s F fB;W+uLR)} өN8'[^#roz4m{f [+!PπfedW ;9967{4 iBda܇% g2xBmo xQhKrDG˛_S"N~Cq9Ɓ(zOû@:PG8; nL|"+k0rBJP<9~P6`k #(&n;Q$v;NMWVO`3zH꼭o;F0֞EĹQzcdݶ7"${9WX[MV;BM"o cŝz!SV'5h*2D^'G.#_m>2~^U+;}xp;*Q 4NƀXl&"YȜg\fSw8f-RBBx\oS2 .CS#';*)!AQ۸yp̷鱵5Leh>!ω)U4Q]Mgɟ3?T .V̙;!źm}G΂ޕZgYCU/.'zr&8]HcFdr6SCP)vݾ4݋`Hct΍q >']ϐCޱUcĩ%#Wo7\*qeIsex’ުVWh|J:wthi^p_I*COiPExu-F/_)OgZe84he;* ZNX ~j}W 6J78-ˑ tAR@wyX;ߵnwG=le(u4jJ?Ūlc8Om8F}]|=>tO%>⍽[$zx(=S VMXm+w0t vnza?}.kx!e=Y<02m5G@t:XPU}o &:.qޮH;<ɯ}XڻTٝiP.6_OԥSMo 93.F`1̆;^a~D_]GU$] $@1iw+n.yFV{Xd]׽qwWD=>2m|c|xY?Mq6/Y״ MlsQKe3&Ϸ,+ &BfA׌<]T1Z3 m>eV.pV15&/ıހ+닆w hRۢLJC4XN$&>2`֒о} tALFZa2!Q:em#K |;tϴk^#gCbj v0&DuW.*I!!KLN5t;ܵsS?TR+mE9xtnl&P&XZn8)3Zrc72< 4%⦗?sTM5{'((1bAcr{jk9ќTp^+/E$&N!\|&+)} R}[|sjdXwXD( HE>s5Sʴ~HŇeokHv&NJT4YzۜG|z-y$O4p/F#rC4Rbk <Otӻ=CR$^mIM:LGjMUbeTZMuyAz}}ft[8CP8N6i:CMLckQ(I) PIKA8WL*;E{Rz\͵rpLκoK\i9%Jr)e n~DOȏ8?WJg$O`eC|~y/<`C qѵ@Eo)48x55 k}{.6:37N3UgWw,*B\xҟq?hzN@3|fLV>mT<+j+Xჽa/5y\hzI&zSI^3C}p-#U.m˙R^p&)yd o*J#4֋nuarP+VŸJ,j*⪸di| _WmZe ̖X",Jo".Ckvaی$%O`3aj:"su{3pery"ӂ7p z6ƁWa?|Gq|',YzODs%_ 8Nh 3Zb`?8{UEKea׭a_SE:݇0`^Nк*{_j?,,^âwDMaȐ&Q.FQp+ozn}^srzh.١Y;l[Սɓ}rN|Fa)mJ9H!)h!LXu¾Hx8 WG_cNZ~öBjU|f{qѸZ>qɦFke/f\?j|qg:\5*c㫏Rf坝5=VLxiPN9=jO*@%0zS[+Z/ atBNEr4^B%mڜ=]D}ke%{]y{PŤhMO[IT48j4z .?[j4\l Hw92ܟG@ JO}팸u_Y*]Ǟ1oh r{F oBKԾAV! #uRfIL9%}9Mhdy]*҉ehLO)"℄"~z59שeM#枕M?:!Vy3z0X{?$ iHRFd)?K+~:u 'e*؊j_] RD_fG<"1t`7-IȲh<*YsB@8^9v)|)M* Q ,ˠ^IQ9⫦*=r.$uԈ k5(O#Hӓ ,&porBK-y/z"ʖ!nI5p IcB }v5`A `pH=^m@u[.{}cA4`!pե7$^1h1I%x?awVSU8Q91~0&.\ҿOшYroر?H? *:+<,0'gb-JvvJ4'CbVt0 SY'*.n(4qy~9I.pKs.ENиI(w Syª?{GNx2Oo&=O0+ܧsқsͲꚴw 07mpy-Dz e;#iO./ؼ(hG{rBXpi[ɬ ߡEMM"t>aՎUs#1s` N'؞\n"`4KlQx,va= 6%I f/dU]A,&?؎;9&ϭgc{.MI>~y|{yY"J4}DU T3KHoV$Y-ue?B9^<)8 endstream endobj 592 0 obj << /Length1 1394 /Length2 6195 /Length3 0 /Length 7142 /Filter /FlateDecode >> stream xڍvTk.t7 HJ 4H0 00Ctw"HA4*R"!9k֚yzzr(pu4 #*r$!  0H_nR^3'_8B0X.B` ,-qH?@< F]Q$UAy 06yXNNFw: @@!(@qb;B!H E1~*!ɋB\=E 0{=0€g3QR^ ov@<ցD@(Ol c-@ :, _ٿ !P!P( Cb|1 =|7c'J_yB=nOQO׊b`OY SAQO_"LSMCP]ܳd4@UpԈ]R!x$N}J}#qɳG.id1*/BLY* Jj~o^56Y£F7\=S.4@b Io$ыY@++|ŝf3cU*d'7~m>)T̬4&fFcG=K :2~aI =rqYDc(ʕdfg霩v@uӭ`bȧ܋Xabimi$A8Jޭ[z~x_g~uzqfkiN)S*ⅰ;*r5U]n\žttj]&G^ͧik#K~Ojn+9C{}Q6Fo+%_U-?8՜lW &rIHounn\ݾ{3 wrk/T\W?6G[֨DusʭVU:۪O$Mcrוǐ0=Kmch> +y`fCnӪO}@ާگ\N˘Í29IB@f`Ƌ8LI 7kɢNHYsKN:We(Lł t-t\?qMe!鹝O]'{^é>$r!Z‡u`}qDr0TLb7]x 瞪RE~Oe7u7uJI~_+:s8ICv5/iwC2k%fo@zbAfi#4VU.i ~;s16v&K }͠wt V }) E!*VuE} .r  uBLDV ̕a-u9-Ond:zHjWc":gJ\`ܼC_5욘≡=3*tC !(] iIF}cu R%5/~oaxZv,&?0pD\;P_/ik_{R*PZRε[;i{'֕o?~ ʎMMh.p_:( Lx/WH+=o{$SO+Rr5:ppJ>0 5 v1m!ZX1p0k{|ԝ؉]BP뺪R(U~?vw&ơ9]iC/Xd5%nݻ= [^O' fb\D o:~΄]hVgf l9]hHsi'}cł$l>_@ޒذQ(屿Ms@~<4ئ=+t[gsa7\6dyƟ)jpXY*̊8L.m꼵,-^xA-ʣNݐCx\_teRU u3Ov"8A&`SPIBn^!cvO=P&7RPE":+gYG7WJ?ɻY6'[qL9-RYVÿdIlNhBB>+;fG/wL/tt[*6w&;֥"*&{Vr0/ yw$)îJ[w\oY tZ}̄U?H=\dMGJ[60 =qT0u"*F(6\m\o"?!uǵkoSOmOLjQ+HrV]vG(5oUр@!f7 LO}-VrMϨi;:d:=ph  z;ooǏo #Djp$|%;zc4Z*pvaPuD$əs} xI@x6DR-1Y*SMޱݐ$pT9A` ^EU lI>)u)d)ЦAG_X k^@pD͙ro(@H1dGS$KhP=OB`sw+$SvF" y# ]<[cOܰ#MYQ6 md;>c RWY9o:mOs~Ɛ,pGӼ_a4:y卋YCL7ARnEN8.un㼡t,Zd?cY5KIW4Ҝ?Y9=G'7=:W=߲ncKH]nHؔzi@i~5+|8P*;?D#O73}CZo0:m +c+L ƺ 6-O'/ۃyu@mGzeWlkp?QywL{/&)8zp_sHL>5Vcn*9h<17h[Va--\7sǒ5m}ƤJ2%J-/v7p TCgsCj~PO]w >TָH!W&C>I4 $٬2BUޖ~sinlӯy X@)&WF BdWFP;Zk,/4Ͼ Y?o{RV Sn$Z}dhea=r+_;k7TތǼ 6aޒq):?w6]!2tPdv&g*:r+`.,.=RK1 Dp?lV%RY-jtoڎЀ5oaj˥H q ؇II9mL#Cf9-3~סA6IzC<:Wv'W9t7;7v.鸈k\ĥ-N"bIL>ᅙM|iV,㋞8taFB!mzR ٳ<8՘j򆏝w lZT $rqOZ`Zö́r_{6k2VJҽnbt R358Iև# o>iS"ZzʸW (4џ0* vґL Z=([Wxw[Jz@eX؞QF2t 2z1NQt-)3e;\ؖ%E;Rf&ƽb\ ٫s{z[+6E1ayƱ~gY3/DWOQir&S{QrTDؙmt\=v5_ P.5ALa5' ~{ˏlCj9ԷiHO~BT<~Fw]r>QfhZkUВ:~p&2KsR3w [/ nΕ!d Z3(mUr)#䬷.wH'!YLX5 DL-<יl1+~BH )*HOLNqLK[&JN16 Md)'t6;29D@Y GA1Ŝlwf Esq}yUf; `DH6k~Wgظ8Lج(y`꺏VRv2oaB$\ٷ J$6 P|3#MG3Y7e崙C6:\rl1ZPqdW +5XqɭK?0|'IPmVE"J3=~F_?20RLQySԂI!D?j!6,hOd9{ynVB8aSg:]#cyƐ, r@ckPjf>qio | fs3'Ose1 1Tg;#"AiSFnu3#?;!6}I*cWrEc}fD;jѕY;o¨E+!׬f2j`CnWP$cs8Ylb՛ endstream endobj 594 0 obj << /Length1 1416 /Length2 6295 /Length3 0 /Length 7245 /Filter /FlateDecode >> stream xڍTuX%t)=p#61`"HwA#" H*%Hw#"H|[}|ǎs]y}Ƭg+o ^`>PQP @|  '`30{௵:7؀FNp?BH/4^;\P:aC յ0`?3@^0_kA]?%]-P/ !(p}ז0=7|u zub@~_?,A ߖuWW7 uܸ/a0_09J9׆5TӡyW?I ~*եˑcɢ)| HDW,t̐v2Ek~g"UW¯2)${泝IS%۶*{-Y+ehV/C}\2IL30ҶZ"s%yNRxOvԄ|) 6pndHn苩2^*x-TqkeUh ctgP1}k EK$5}7o 8-VH=R ;)MXsDՠ37 ,\?s+G&haQ=yڈH7xVqG\bUn׽-}c2$T@~E.ʱ_d([籪Av& #xx=+mEafsU^OFh%(0|\j覬)eE]hLGjsQ'K2L9Ca2kުW*E41mx=bdNlE4(` ؛J^8[P9D(y_{zR6ʐJН•vdt L< /-ܽ45 vEdȹz"h޷'+LfA n?WHx i5\$=TtMnC.w?gT}i^.~:s3G`޳߹c]k{Fe+=br~ܬ)R"Rn\2 {Vp_  ;J[%!W+%|i V)ۡ j}ɞzaý>1*}:)aۄ]͖'GiHWrmr[׈9Ծ7_K3 IȰi&@X /Q/FdBn䘌eH7ݝr}5[!l|iP$D2ֱau?9 !gkTVwEgHv#0 =2;ՀEt^Jys%iE֣\N1 QBh^iÕZ39 qzQ,Vy溩lUC/#*hQHUțe94]Y 3` ug@ȴB>tȗ*[L4N8Ci{OXw'3c!Q 8!u߬OxZįr|T!w $$W2*sr{9$/$6*BwqKOCrׇt2;e5z7 CKAvb{6p1י"?ڨ??fC*UcR6SABRkMB+! >wm(1te_D„нwph~qe10%8"VTӆ D;9=uP6 +~$>\b.mō!6J8P!yTII&X$3g ;č n\c0}Hr :y&=?lnQ{Nl9R3~.ܙL%ZǦNdHe54U.mj9p1c}i|c 2.,ћumqc5iZ؆!tNvZfRG$ }-}/uK+ص\䬡v7~ʀ/~#|ܹVe%"5<)Ӝg 8gӃ:i2#8jfQxeם6}yi!}خ|BÞnQG/f:W-d&};E3\>"Ni ^jqS.N-BqIG%ڒ}@Z/Dz~N)KI^n|%)aelG7D8@a.~!3֚:8s qwyV0fX_Y5:D@~Bjf@!ǧfa\khGlJiO١#/^bz;2uzG~Z~YZj''."~NΎADRR9 C\ʕ%UWT:n~O >Q r,[0#uLCR?_~Z鸯rڻ?w|c? xd`|Z骴] Wo/lĻ}VOxckΕBaAj3}- ҃&Q{K峽)dsk Aˢ3>.m(wЩ?F#R }L<FUd6KȊS^CQ\r@`#(E-u:wvVqƣ.vM< r+ . .o K s,/!-zG,"6+=XH34{k1F~XݴbИd5Ƚ|叙C36,pN:Wd4'+Rh2Yn,6^iX} `SKl+-+"YtuSk^ٸpФ+sg%wO]xl"b*$=xr p8*rO,❙UgS*Q[8g,VMը_Hs:|ovȂ^~|}1;^=mr]y8)fJ^ 2^C^:[v}R}upX߁WX fLRl+x$SarqrڡşI34I^pz<͗iZ"0š٘0 UJ wq^ff(% Yk|f[3IZGP @_Ph*jˣ:L T%'+> M6E(~ X9>%0?Pr>v8P<,z٫L4=۩,<1.|+ t/v\}vvXqƱ@ ORDOPtR,[М&==~O΃h-{qecO0=?lqȷv-Zelqcadק)i\R+L:]/Myin TnZ7b>IU0\Z`:$kT$KɤF6y߆DOu!-yft.L ɲe[f%@>}ވ^v14[OdZ`ɜ҇{Mi?œlݤ{31a6q)NC kKRFE4ī&zdyA4eapSh7*Cgu,6WG=A`p'&n^ܘyyKF2g'˼ HL 3z 7.9~`]gRJM D$nqMb;]>{|SOH)ȋэ IaW[Wo+2:lm*!R_e:z~*z_-<ǭy=\ܔ[78 'lo8: n^>|P:s}@{0@XR'ͱ 2^r;h.9KBܫ O!^6b/8L7s]{Zm S8,+Q#KocC9(b:;sJ jSP0вa 6:rAlQ3m/Tnf"оXģu-3}n#^Rh!ÏЬxwG"p>Ͻ] 2z)S3tcߝqt_,t%W]tkni5S\zNQ֗S[Z!B? ZedP][_#Ҧ:ČӉ쟁,|0"/^[/ouU`닾ŋjjA3ɭsޞƐٱު `rb< -ˊ+ ̅(By>2~1L~GXGaa0Y67®J{3H\JLڙWD-Pj|U,rp n|˵DI6s*T Cӽ S>Ns~*WPcLr@i4 :'oX3&zTKҠ|⼀Ø!4D5Egct|wV.J j5[bS5źGh;d]-:\}HŚVCQԜ2Tב}%4MLݼ!MߥzDJiJx#6c `|R(v5:aVh . @탚€͟dc+ bM;^ < y{i^yz6@~~.$QQj*F'vܱ4kyP*a";o XDx5?.^pO}9QฐϭO7a;i`dUc=f/EVo0MS6-|u &fd[`B00oo@vhwऩ9[/3R;&`eVf]fϘYKހDyB,>NX[( G[NJ5l:rΪa)9'Ȣ&~{,amDZďhƥI~!q\JgcuEUuӇ =21o`R{f +P/k ȶ(5da茗CSͤ0[ "Vƞ$Om%J1χ&_nh0%U!i{heg/tO'aaG yA5^X(j5fe:*ύ8DaI;,SںOd*NUbe*<>% endstream endobj 596 0 obj << /Length1 1412 /Length2 6197 /Length3 0 /Length 7161 /Filter /FlateDecode >> stream xڍw4k׶ 3ZtetQ0-F!BAE.zAD&9ky}]ɦPC($F@X$ Tֆ@ԅA@HTpr"0Nc8BJ/2`}*P B5<¢@ PBKU; P8Q>h#߷@nPXJJw:PFH6w:!(W nYG UZHK.B;G ԁM 4tD@P/(:08ぴu-+kw‚+W!w2CB> ꂵ1~ i uvGaP3 ;VB#5; pŸ # ) vUv(8՟  a7G}Dy!}1H;_y !npu@X?>8($Yp7 (k CW/7v _W+;a~|ݡp ߁[aaHcp?6h7e0ϝ%dv(OYHPSؔ*)R@qP$!W#+WiJQ  ߵtPX!8Sw鿪-=B /QXA j#`m忣(VH,E8`7N9a/9#p=;CW +3CK!8VE^T C] GāX]($Da)@@{\%EB\,QKGy;*Ɗ7;-mV< &FQ0PO'L^+}ROLE0m VaSIw>buVѫcGkprLX m8zQM vmNr?;4E` 'tʋ2nIo/`jm; Wore-<^z 2E']S' ޫbKW4e+ssTg0 L-eӯγ =ҩ|mըmٵ=?/V*4>XSgj;3th CKA= !RgwOWr_ c޹õˏ>4x2k+M4M"5t^,F<]7wb2|.ɑjloU?Qj}+1uÒ ldGSD𯶻 >qʡ.`-/Չ7ږ}`/4Y $57y[fј0ib?8YR[TMpD!ILYCL [34ש&⽼ 3ǾQe|Ksٷ# ^^_oak R, Fr7-V=5V9cpe f psX,2'ӵ?i }T3|6 oh=F_6#\O#;M^L6>bu?-hv{tC .O~DrPePW43*XdS 5`-KpVa3X\孅p:w س%ђ9yUƟ}?橞|BNqh[VTdv|ZVtUݡ(Qp,ɈCÓeҮ랭 l-$'ዀk8㇓A`llceϭ(Nu mrg XWYz|\xE -{;PıIu6 #6~O? <g@J.M=Otf;r`)iMoX5߮]zsZ\%$**N'ǩD:í_%}ȩTҫ"#EpUgifՇ-UGG MǬ!xWr[#Rb&R=+Ƹ HUXq:y}@ޗ@jGaj;a+h^Ň djP*^ջ څw5L Q^ށɥ\3vRRݒ9hX3KjhY%EQE-%aN& &Uve ],;ܲɤ]4 ')o[sN%@#'CYg$kk/}g"?|MWM[֩kW5H u8A gWåy3BqLë!Vx|s֔d jsZOU4Vvަ_PGL,MT2pq+ e:qpWY=fR\js۱9iFG-inaQNG9x-;&I^ǛW}k.TL0_1:*X$PmW\ka|,R8[b|eed| 4@6ҩA箂B(L5l]JA׺pA[ znFwiI?Vⷂ QuxAkt~F25&C5'bxd_B/k,۰L.l41-vU[3!CbG&wDgOj0lSQջTn7/j5*NkU.IARtˢ~~xy󅸋ߢb6GpSJnhX_]Rv[Ox /pHLc|݇p8V:ĩ @!O9d=WXͥĔw l宍0l\c-)},5XHݵB8.WSFE/6u)1Ieފg"frxOyH(wB{[?!K}0rtfZpl^v<jp_F t\|U=/чh⼫Ȣ. "3Gh(}dfp6+v[cFX ~<JR[%k2$Nу1ǔW)$L#jä?ҙTog*D5CЈH:*5 Kɢ-Mʡ[?c9te&x(JZD|g|nyza0fN$jԒ{x*F&GS vJ.mZ/zkZk:;gȈe&P|;y qU&D%)Zt>_.ZzgۉFxE>$?&[+K7OzL2EnĞ:]7E.0]?R_ŘjڹdG`wZfgr`VHuxFdP#FN)ӻD!)4Nox={)O\w5J314g D9Rf646LST@94bP"" Ք@ɧBQ}Y% IpJge7)$s]pDŽx`E&m7J>xI?UeJrMVVC_{-jAУɩM#WQسӆQ)8{E)ojL=jacLv 9n.,<;.AEDZXAʖl&Nt7tZ9oOdxrW:..3Ǿ4GştA YV]63RM#m6Z#\w$kߦ wu=S3}Lck$K dկ!^ʷ+WҦ)CU]kS6pWMC(dAo^yXq[y^GVnK|v-qDi _WfSy<όL3V@(?`X::{ Mxʊ$ S1Y6 d~V#O5AwXǠb5)sC|?=Qnf%Pvr1ճ ^);\s.ß2,({eS2ϣcv* SǶ}lRDt@=YXmCvm7;ce+'Y"_fIw lGr֙ ;о;g~eY&I: _o+޿d*]k n [%P5Xwo,eoȡx˥oZ?z(kz+#Ɉu:?6 E$Y󙂮>˴-?$74\|psEHE3!RqL˷oK\f7_nN: K~1Ԩ]of_zYAU "bʡ8Z ~-o~AtYgCŗ)|ө"corn12:J?nZ۩2b<MFiXy=OLeGFqJD5#/L h &+i ߥ x;)^G%j(ͰNpNrPi6κ[@S:tGbyȄՓQ3ZDHT D w9ͯIiQ޽/zvCÍz~Ş3+>*sݑ1>`-Pf؛1a2%\E N^C$"~c1wݼ#&r` RF(.,(^1]⍶1&1&0?~arF ydzNMȣt BC|*dyo*رnMCnL+FYi#4i;=g<#z4^J--Ucꯗ^8|5޼4O9ȿ pTJT,:3ț{{-׏+hc:}ۢeKVU($$cq qbD9 `[HᒿȒtISt&;xL _]NˢƖ.nXg]zS`Vۭ[z<&>S?ihyWz|M\ng%Zr-{s}"Kਣ.>4Iscʣ(|ipZ͂Ke7haW(ؗ?TM8+,[ qƞUS\vTZ => stream xڍT[6HKw3 )- 00C !%H7 HHt(H -^}k}ߚf<ϳo0kqX,0(W g bbҷCXL`W;Tr.`  Pu|Oxy a.by@ ]`N^.v6p>|œ2`; mA@@ýUw:r\lq<]+l2@7@/Bf bC]&nP+ ;@OE%VK s8>nP/; h)s= q!@;!:("2+ j#/7cVZP+֯\ Ĺ{){em "Gـ!^'O`gdk}/'o7 F#>|\` ĿWX||+;` b;[F`ʋh?>?fB#]bm9yu?)C<>\.'>>'aa!߿h_*Pk@pwzπK\0?n+ B?o.gK၎v? DSCFFWle  y\ c݀͏C'?_$H !n1Dnf~oWY@n.. j{AX 0xЖj*啶8!8[[u IgVC%aT/G=:RsH kG\?O:mV0J15x(,F7/*7o\)T=W&+A3ׄ.in*cȯ^_ȶDSZ3}>맬T<'aDުC $Y\eJ( Ø^/Vo)H&4+ݵtL|=S'ycB-gu*,S`a$V۶x1+*0QG"`9N~"&#X|d,-na8WFt4 IDžMSȧIki:{Rud][JZlc~Ks@ab+F( ,z:[" =D'Paw92Em>W5%D|!#HZ0NaxH[hN9$KP%]wD'';>_N%|Wevg:]YNVWNUJhEk,!"n$ʤg _ _a 6q1~Gۿ0:=el19z؜@94'nuՂ-^'sR}av4U!WiFv`I '6%<f$^+½yphfgE˓Ng)}^I)|YbG_3-}_ /cGQcl~ F3IEh"tq9=I &hTӐ_>Qg-hk~bPz]='ۼ/W,#:wuprRq9D8X͚ʈ{%KP;&›KmÂzPߦH}\x?Wx(]5cD~ZoYOjlCjFosM<]G5h^jG׽Kb ې<>^* c1Čw3ՕuJٵFzl<ψ 7tL]p.??hx qC?Bn)ߛZa?HsQy 28P;'mD䫞}[%"~@E6vr"1nҺčfWzj#:\)CELowf)Ti.렃 رHl,v> "`٨7]{5ꆑo.q!Qs&}nqKW|\w8}M;,mzr&u~RZNCYp׀< *(7YR% s=`wy|ʼ.oO;nǑiB+L'89U),hMYB &ZWQm=ocYRF!M`&؋s| d,`]ƾdօhdX3&أ$Ee\ս y[#1W7_/_f"t]5V']]thWf2@hUd]ԟHT!4^땹ljs}kSIOUdKWtZӓ=cj-F@if^XxjldVqL1'dAQ)=äixT ,M7`nAqk/wa!2\'AFd&ODط_ 8{ \V.O"׺pb=V.[H($ tZXI'f$1\ٲg{;0:80 Us?ʡH\ 9OuTMLD\)&q:yPZ?ѹMqB݌ AB;tHg>QL{6Z񚩝i^ֻ,줷Ɓp hc ~G 1ӦռKǬkU/>Rѷf2.{6Zoɖqϸ:TX>4O|Ryx^[\Ԯ \2wwz]O2} [\JꀋS+E5m"%~{+ <@bU,ى .9]A[ kUfA# fP>w憪 Mgn>-c՗ٯ8TJQ<3f6Q2i&-cM>ǰPq@$d5Qኋ<.bdkSfCY튉2%_bG~F~] \N) džRN^MKLdsmk5LQTFaz/MZcnaJL$E=x!x*+ͪܮ|e|᧋98`U:xsSx[w쾲~h䓥1$ 5SqlS̈B.%~8j,]fd1Q,T~~W! Qwn:]5O$ׂ,[{d\jr!, 7'N/sՃBrpM@kԇk/hZLKiѕCA^I+qK4Yx$]E4EaVsOSQH'[r(ia[EG;6 x?k+JwW&ah&)wt0'ӝtt7ŠR0{m< ND٨7ѣ _Zj$ lg}$PòJ}s-4݋i,4Ǝ~\$:<=lͣ -1ow>=嗘I@߇SwVeDPޕhI3iī/ Bf(x,&5 IyG9T6DZ|_4 sK }6ObLG 6!$e ;,%>f @WN0C,=ur<(PΖPT#X[G9-z8K$GΥ [=.gGUY>T_ JxS E2^j -kzt?TW[bؘi|'[c* y7h tPr= Zc FSBOr2[<o˾ M3V\Nȩ7zy`~k+yظʌt$̷A ^/5%*},a09A)Հ>f -+9#fO9{}+w[Za*o,O<מoWB d0.4L2 ߋSCk'#^ٿ>YEjCqjw+^c 3(`ET+]/}$QE?o2XBx?'HKRB|*]>}e궾k5ɐp}З5o] Yw;$rt.ZMZW]ux0"tz1N=dmW[nn?fg2̕B2-νśjTfP;v#\4?HIw; $A#z=L4Alb8?4aR XmT?GPg>i>>Z ?HVZ#و,X>}my$[ ԑǴ+Z+W,QS1p%ϭzH2 A!d=B T`QLfa+mze7{ՔM%x[С7.%I$ɱ1g9c4> AIGh(~3B΢Ã54mĽ%cc"E%TИ5( ۀ]wO`+o?b0 _i-T0MD%=TmtE)ރο&Z패.FmUZY9?y=]_I_ÓbePyEO-f20`8d-Z'RMs`n %5WZ U`?v'HߴJpkf,A` Sg=y \ɎqVpκ4n"XHBKRi`0:]KR ƑYIeQ==Z$*͝FI%ܮŬ-f-5 [!g4BG]ܶ\MDE-Ser]ǞP٭uMLt ɍGLNVuvRSLPI]s_P{.}`h /..)5i?2c!JeZW6}gyڏ}$ZxkX{$^Pwl;yZ.o֨jl߰ GQC6qMrG5yx:kSTbk\C7v{rt&/KF|O4ݙ&n7i H2 ޕ"xR}]r^@!gGF ,kv6& ߎ&R[*U8%_yrBʲ"drTƏH> ތk׫{\bfwVܵ8oc{TJ9N~!/S,))N5Sz;~-z CgeFh:ƙ6g98&70:ai7~UJ$lɛ,KCd[A³a;Hn-ߛTEoz>98骩(׼NP郘߯+6hwnL|ę \QаWj2I$1%ڙ>e<7%][+r)4.!D>7/jK o aRZ:g/9~ʉ5$%jNdHR0p0΢frLJ m$5}4rVutma,z:;x/1|žGꊭ)q#Ír2vzc>#"yH]Ц-wYc<_+Hԫ)‹>m=+R jTޑ޲/yGqVZmE i//ɧ@w];>2oQeE͡j/(GsR2AVcɦ9.#4miX LׯU:3@)E~cVbu({* 4HY%Bo#.Eٞ^W:S1$;#Fgqjw@rZ'ZjzI^UnaϐFE>$߮1'upy$X8Fy~`M2*m JAjӨVR1C;jXmy7E C`saϠWKh;8׭̵ӈ@Cΰn÷ۊp#F҅[]6_-HpZ60qŕ586ϥCi1  r۳U{y> stream xڍvTݶ-H ɇc&*$@( {ST"(J)tT@(7s{c72F=\k_ehrCq@j`и gb!@(Tk¹! H _U,cj0An( ,!#,)"P A!=A#@,__~ ,-- (#(8 pHwp`8s7Eqk8VUivs?0w_^80xjY=$Q- e^7 P?8KDpp?47iBzೠw?>B}p D% Aj~D ~ 18| 1p`AU*9P?*ýX~ ֿ݌D!SlK]TQ2!҅dqt~.Y昧= wwF Yw?#PYVٹN'iD2#vkյ4h.ѵ'?ڼ,wrnօx .%܀5m"=*Tȏ CceĬޞ ѽj; ,\oRXRr_z'_e)r7Od/9:^ۜ']勞Ni29+ q2hfLIM8J/4^Ҏ{mQ^ ,؟VBX,RF](k< K9zN2*m<;ﯵzN=C'{\_}+\7}"|1ar%o{KC&~U|p&62PDj;6t+t*-aʜIi=J*G[V?aE~m7s=E lc~&;XjU6}*2Ma䵼hpurɴeQYJdc^y͒ 45<Ė 9wu&"ĖF VEGДtY#>W Z\Ǝ RD8Iaa1퟽70{ }#7f\t[sw;FHuZj0ːhI3D#͕$!}F&o:=S(#.՗Oԑ7CG8C!)9K2 n{A!U>wbw 4nXi^-o$ehch6~){JJ{xp1.ESUn8խM4s;;dR7YEx)]xEݡa/* 3XWi@N=J5#!6ù=;pXklrRky?XI\GBMgsYַ~* 0rԷCTFr wv6^{`0̵OjYuFK_JT 9+UO-cq= \&A~ƨ^樓۷ږ^h T~IwoiY^jl+5C=ř/&L^1A!JYDnL\ZKAϥXsa7eqߖSC|;w}*UU?^m,.5zi'xcA;®ZkyZ]d4BO[]2l"~vV ]k[KgQ U Y͐KK)M]krLUQu䀴mqGܭI6_QzSA&5.Vt.֏澨yRqd G8O]%(KSVRhhV_N}^0n{!ybYSqElijW*CkŸHآԧߊy/6. )*$.,AVYYOkE[5ɜIܛnqмKA_<ѣO<\nr3Fn tiPV\ҾN#ZM1Rp\ &:lUӅU'i {OOT㗍s.l0<] OU~jE3GR5aEx8Y3)r) rL(Ut=1pA%eb7a*TUga3Bw!u* s f;hhhˍߓDW]V9 ;6 ?>o]ațzEuqK}؞ T,~~ :QdCO9td.9xQ !Wy:/ǯWέ*ˈ<,KQi>\' uOLw;41Xzoc$M.il78~.4PCGƑuQ3 oۆkD< E rޞm?=M2a}_jWzEuX;%{qnKUUV߼5Jmtk9ʼep"Rt`cqmrpI=<[E؃ѱS")L[ ɞ[1wӉ#SýKvc_(8ph%bD:WS۠,o||PQU'ʃ}9 2i/] +[P0mp=J|~gJVRj#k^?k< gI;>1Еog۴cDؐ rϚij2/r0C:Hu25cwC=.VHbX^#gcH8&A7NƘѸ=`ڏy)W,o䋅$Imb9֙i&`bG˷+L^);CZvV%Zt˫ܛA- Σjjej?"vLHHѝ!?|vVj;5t*Iq= x=j)Dq3g)Erd\ e-5ԫ8ƽ>+pȱM&E@DO/ߧ2S-D f!KZ:i7[@}Þ԰ڥ\QR"vCzDrZ)dM6 ]u>t+茨~J%w~IʦNƧd kU5T?l>4kCэ<VE.h0<2kft_`׳_dd=ׁXp%{^Iy*M4i ,!k;)-ywǓx{oվ[E&J4ݬv'# 1,i9]4GNwDRCY@^{CR}'B/HÃ,"'Eȉa݆-SzR KC l#=ЇA=[ 2~:}NF4CJ`Q^5,a;.̺ԉ~yG{Aq3%q+?يKP{Mib-utd>X2 «.V!b9m}Aq(N<;#=YQ3d|L] o;Y3(#+-F_8={줣h_.8>ke9*7KquAfBDSɫS>91z0U\A}Zç=D\st᳓ń^Fs2 u\x8r,cJvu^_DKPE|;#n]mQrbvl=a ~P*Lv:;[p@n*F\ggމ~s~j7M/ NV&4F#fcF/׬CMw:G?f:RLxC7? ky4`W*z.E\Vм^Q4;5ݠq;g7hOa͒no_jBf%iv|iW)2 t,B/y%z#]$F߀E!Mc9hk.|Ȼ:p 4le!*$롋f'iy/.)1wv.gOJ1J>۶6k+Y3v8z9Z8n4#s/8,kC#8һ?P g--ޅs8\o:#f?EZV E/DN4_54J'dCB $Gi%a :5L8;hbJ?垓ꭊMUpH$Yh$Ft>m'oq9[%7ʲCx GazH'$'Ąi/'110D&jɃh ŭ Yl9l.ƘmZHq8/` dt(عNԕ4%]HңuIXZ/- b]~? D78K6bD?U&25^Y}`|O{3gHޗrςMGy- !Z.h|cBr ٣A.}|U愇/Yvw1.b>&XX6?VO`IbAXJݨ,U mjܬ_ bͯ%"36ps*&;/h(Ԭɿg۸N /SK e͇ {kqKд[I3 Us/eGWk0 L Z'+l峔31l݊Kek0$9kEelIJ̔QŜ7˯߈^tʻ$2~QrtW'fS :*s{wKdۢkBFgsi#A''O>vg)+P ,x$ÞD;U,3_(]:q:[wZԑ ۤtۅ_&ܧ/77ּS P߷dSS[' 3.R ~9D ΕfVc~DΘnQ[++ǯ;Z$[)$t-Rc8@DsAMXd2׃ _Ŕۓ}'kB7NmPވ%ozYړT:?VHbU[D ٷ8"S8=:DC6f3J#7b\fI#z;_%tڞk- }0:->uiF3ȼ)Āꋏ޹UU}0&cO;F?V5[+-LuX9nv"\1dVp'S/EWstxVN嗢7:/ԓz^i. X6&B+]*21ULW]m NcVWv=iEϏV5fS\HtƼ`W-W(M]Zb*fNE}H-w9GPQO)#S鱯l70TdwNA¼Y"}oq2/`4<':mmACm~ߊxD&34 Evv|kr6 endstream endobj 602 0 obj << /Length1 1432 /Length2 6214 /Length3 0 /Length 7197 /Filter /FlateDecode >> stream xڍtTk6(ࠀ0 -H C0t7Rҥ H "HJ]"(z{Zfg}];5 xA|9 =cQ? ?;>  &b7"]a"!`<  @" Qq~~?ÿ8@hTp+ C >}; E `8@:wz W N ;Y;!R\0@ ECh&#b\z G wEgHzs:@ CVCxl > ; p/`sP0@0Gs0@QFFx$ s5"W)+NNP8ʕW0$>v/u#<>-l`pk_CX9a.nP(h?@]POWy}/g  A@D>`w(tWD A08?0}H'=?oOF_}?'&+A!*&wm06U t>;vK ZjDn/A?@RS*/wCnÜGs m BVj ss 6 -f^ZkP??e5Gp+pEwϿUCֿ|& ,#`/"~> ! (t =$u@#~~c"#"Qh N0MH~!j DKV׿P'B45< m^!6,޸+R1N?CiEҺ?U _FCfmYA)ʹ( CW.Nl,ag+ :̷-MqL'N mxap(Fƚˆi6fe[BQ MU _aEec/VLzR_uSSy'S'dֹbp {)c۩jI(bEb&"n44 pJ{.9x8![^E} Gϔj&݇wlQz=t{rF.ʷ4.7$aCO1֛Q";Y7NAA'wh}2Chg3hB@i0>†钆ťqdմs΂Ugl܃~sĬjPK EYkscEFY ~#FGHQ=YfJ UzT|5iq(怱#F$nM҅6{7y,7RKVt*Q[r]r!{ŝ!d!?Y^«i~m`,,}+z D2#ἵU{Zxs3CӧlBʍy`ўyahxLe$ܼX,d)jExpw̆D@+5736},2Z 28c$m"/gC=4g+{rvR Ga|w.!8lo2'e/Ί|c)R=Hn1,M{~ 8}0>Rc$d4UE\Z^ĬlZ?f'Rɒy>HO@k"i\/o |Y)6/p/W>~E3ެݙ!le0z(p_b%y݉k?NlPσFUpfwti#SbNpuXxAjʚ˃'e&s_t crSʗêVRP_Zs8H|"=0qXv/K-O.DŽ%r1U#X|e[v/E W܋8 {9gh ) '( lqBx18?&2TF>$6)[P$iP>z0ZA7{kaڻḎ7/61h(\sq!\ `d=lɴDP{sB%zkwK)=4 Nx"'o`ˏ0܌Hl}h~ҳ@Ѝ.Vn|[Q0.!)Ϭ;FO} Zz$oo;$|f(y|:A.ThlpGx]0#27ȚnuDta?sM |dt|襈,V*NWj&Õ?ۙT)t7Z ~mBԽ,SRR(9+q&EN3f0o>cHe3YA Ʃp|li<^51K8"lxLޯ/*IiDl *z9a"ML79 wEΐA*Qڷ',&f։gIk`|[Th)xdK4|7. ݅gyX5Ÿ#Dv,v mK#zʠBM`9d&@k}TܩIRD4B0.6RzTELn˴9um&PݘMOl#)b[Hh9KSk A l"ibQb.}Nz %,έG-?눙v]FF 8?w@)QxGR hl%ѿq/AA+4<8b>F{7$8Z}a/g(Ie4u,|*g׾WX pnpᕇ3=Z \>V K`K#+*-Wbp=6a:~2"H-Z^ j3 La3kMK3WB_}?]pQ";X2,:W9zRʉ;7AwӡW2ʼ;mo~_$ 0GRu@QW)KcG] sy攖GA)=^3۠÷IcB#G;$}vsj>c'(4MLpǎO|u⥑dRuWW[q[˳= wФWWl?,Z=2Y,ͺNݯ@B%.F/Opߜ\sKNw֪tOfɄq@t.e} K.XqoLA)(*F8alP^Ϋgƽ0˭)obr[ |[xѩqުr.TCQäd)/Zҙ b8|rI>Oogוg1Z lq2FBƙor10|sA7MCr0,* Q)el:kx~ œ?)zǫ-`ӧ.(uGT &w|dp,xKIxHhTlsOۅ,\4at?ڤz%ö*oyIw> Rц-Mţ߬ b R/!>p*E*__S[1JU.}⢢ێ߲n jYsyʉ-\b8;l3F#j HrYd J.'ܺL 2f Dʮv&Hx4lE$SH%_ Վ7|UWkۯ7Z2˝zXPў~V8 B*Y2o?q)esAI <Ʋ}DYNp_qgj,%y}lRA##>뮎w)@碀!Q/MNfNfzL)ܰNɭ:YE]WTeNX᭦Eb)zJ/ʀu$$JAjZEAlZI:\ҽ "]%᯾k|1zׄ_ w6CsfxKמ:Vu|:.{u#AMu-1ëT4؛q<y&79BnmAK>qQ4d2D[f(vm+{_$[8dV!/ aJzc2삔{VƂBqq!̽WfRqR],έ#} 8ߑ 2% pϫXʆ/將,nrX);oRk;W ,.nei>5kԻ0'SvηmՄ;SNGM"MWino = 5]הcN _8?$Q+ȗUiU&f&;՚nKSH=\sBt]JG0*+El[}Sa|'|C!u/r0ABܭzcwHR&%۴3f.9ďϵy|"KIFzPu`zJ<2}/voMZ-5S.xZ8=fz!X6'x=0Q N'vVL&N'+s  Ed5#{ijidz@]BqX !AC|(!ٸݑi8& ke2A=7[k_\ mqݤX8I$P  PVaQjix\Ҁ2]s RSvVK.~\ȰT gu乚h&! ?gT08 6QTݥ!k5'JyYX/up!dU esQ7mI*Fu^l2Dt͍ )_}; iU/1}7_,٤2WX5ISnLn2EU]H?C6S-}2'~E҃TF,/jNjoxnO0ߑ0{ѩ} A0.Dxufx <RZMGK'A-Tcm1S/zOD3KD" ˳e#-m;w 9k_ n",kuvի(yn p] endstream endobj 604 0 obj << /Length1 2210 /Length2 18676 /Length3 0 /Length 19997 /Filter /FlateDecode >> stream xڌP[ .Nܝ z9sf&g~Z{6Ċ*L쌀vN"rRLFFzFFf8rrU 'kp@G ;[X8 >dNrvigk C;n @ mg t#ww03wG15_!-@hbgltrTNN 6vftW 's24U2@+TL\ 1P(m6ۀPwrsښehhhobhamha/q!%Gɑ f1[;#_D-}wgZڹzZؚU=Wgm>Dpdf@'###'3't36g+=_J5x{L?z[?< ]'g?&N#-ܟb,_?Ə Ofbgk_G̠ )Asx~fa|ffc0118>7ſyW7ݏ>˿g B Xv PtF6F?L_.(;[[KOrv`M5j ?Ac?33-pp(Z8=57k [_7̇#},-1R?vۙllCCw@lO4k vN.vp,;A/߈ qD N`/`0AL?  VE"}p>A\8?(Aٕ*Gv?#O㿈AٍEl:c;/Ιdnbt:~\>G?L>3/h?" |sw{s?,>d<?Jq1#`'هGaqEILlQ 4O\`?<4vvpxK@711Oe]PCIfV=1CkȟB=[%VVz^iܘ Ft^Ө|O2,&f-î7sXt_/2%x)Z+ϡ)~#MFGBcsr u etV wI9^ NN$?~Lcf\)N;9A8vǎ+c0 #~ 2rW ɔڛڋR6kHي#؂v3-iXNV-`a,+/@ +>+>EKvd&AO19s.n,IQsiv,2!ƂN`W9$+rZ=`ʩV$<91r3)izٸE.F3C%C0j)HE~H}EZCY{Cp%yfMNWۂ8ɥZsRp^y(3scA`a/,S3/4R߃ jM#˽rF V${Տo DHtו׻ܒ%Xԓ])D}'V_V8Y?a*`a/s.-4Fsɯ!OQK:!WmJoz57\ێ(A8|Zk?wD(u), `(#_Out_:*6 .Fc"۲mn!!,\ǡ |6reQV3w(=Yʪg)'LO]tC:!Ŧdۋ_?.#OJn$ƼQ+>+[1_.Wlx.4sJ2@@\Mj,X#Hhɨ蓔b ~J|)p/H4āo<5#!_E\T0*'fhzք\7?ynW9|3vJ*Y sbv$_ Té\7Q넅4 R qFOᶴ"|KQB~9~p"d1Fʜ4'/ Zuj*" BV,sMY%o[۠*%?MZ|Awѭ4;~z1{qΤtZ}[4_8*u2l܀ O{!V0I+i!(%PYGꎻN.K@쓧;I\ LE.bj|&vv6Oͷ (1׺F]0+B6O"!aqOmGVXCI y1[y,J>l$<,+vOE9~3V< _{*@"${GY}0qCcI#!z^/sO((^}r, q vb%dÇxj\W^{c_quLMY|mԖ,ۢ;$eA-RJ7L݊”ubs94 K;w"<XgQc=ITnO\ r˺ۡ4M??Oz 1]a( �qT0﷗0Ι #|1A)oLbuVi^b24fC-;֩ 0GJr$Nnup.805.wNv YܩtD:8Z؜gދjL,>">Q(QK=>p?e\y0XTk12f צDF˖9 D6_aĔ>!N84ϿW^5T)``<kNƜr˅)AM&3/oW¬Ѿ aT?jPJ]Kg Ӵ~i@8BG.AQ*`^؏v*5W%+W5+= qc%p0RKS\fJO'JK_gtL4t #-Z.lG[^fzTq& d!\- C7$+ߎIE?bAWSé&։I>bӥzcn%~-γez;9͹O- ,`3DC9άW!{Y{#3?}LbVݶyŝAy1=c ;&ZDDy cYIE*,O~gcK%j+s䏥!6 B72lOu;PH~ ےfnDˈ_5/M3N7<9 \`:D65U֒=:CLn4q W QBQ^Xinp+x~T֐+lym'].5}q@-#BGߋVIp&ﯯ̘^놜 G|ƭXOC?GN+tIt]>:Uwg344ɨ Q{L |y6Oy-G.̾BE`܃+0^;"Q)SZ=f 配4Y[c[N[qcGgҵ讝Eвo}K_6Y0l T&Lkhęk$y\S3K##B_Zt妦(J1Az !Hle"B(]a*h4 J 3.# Ϙq(hO?_߃N(M|wmwZʐ^xvˡ%yFq#r;8d(>DydscTR&(JٌԴX9!zЍl&\LY|.j@PrH %OJ ohU".v="]aNSo Q"9z ٪fM'w_4b1Uf%'< 2a^eA?a ꢻ "Sss qrڝ 3= Ք 窌* #D&l/ƫ[9YmL僂qx '-秠ȸAxRTyP>Wϧ]K N@踕}9z!hSo}3k.k>^rI@;83E騧ulav,yk:a= ReX\n|)\]UT q-/>)+œ:.À[%y ]=J,aܱHL 98oV /_ւvƍ[Y &i+5ڰ^u#1&gsh1)V¦Xqݶq\LT%_VD}M\r 4Y<27\nυ&A ~^٣NjSu7qC SǔOg~/9ؚX`; @3îڷ̀Nۮ D›ŃJ|dw1է yw4aH0325OB*͠'kj 963CgKP֊6(MPet}Šn鍙Xǁ2̤Op01NTM/?ihu GK;H+!htzf蘬f&XvR۾RZ@0|emu֥-0|Viqg^wZG!8^«Gj萾% 9\;!O{W'Ƥ 'T3Y}^8DE Wp&οI䤕bsҕtA[Q#Ft%ݻzz2N穝K=֎V|bE^2]ձ&NiFZ@K0ab w?K(mT K6.VE@9Dm3{pxV ~0XuFH~OvfU{AkFvUᲨ]B)GWsÜRVb)S LwO|,* +i*dX90ye [@@TEB0͎t}]aC(x8w'U@(jݓR!TqP1tC,h ϼWq.a҇rcTŴ.١#nKuʁC~y2xe]OJ .g({bNM-D4P,dh9M N'tPL3/7!t6S}s(}+y56̑Zd 4e#2 Cy-j { ej?6Ȍh՚w@SvK(3|A,7,@--].aHV |@|HYRAjtfbHHP{5삘ʧs\I;\'p2Ъ| 7~3=DwMN"EXw` P5꽅O+u]$2{\F5*=z#0AI+˗ѕ [Z73H~qIJRrjhk s4F<>w)gyQZl,_~$ɹ.yQ E0 cęݠd0jmTN!$m9s{ {ͬ}v=g,'}n*݀ԜL.72W<#]~/#⎇M+{9 xlX3vzQDkw;F;,=X7ob9HFH#:Ԟ퍪wTYYSNg'Cݳ/VOOqrińǴ'(ݖd)&amL|` U:qdC-.#x?.Re0;&e> q7K&1堂C9hE~ކ̾bR;J3ѭZ瘖K%o3 Bϑ.{Z~-K \c(j'1b2{ qdrOZww3ϵ\8o&-G5_꺙8a6VD^޵bk3[pbd+{r<h>%F'w9Jl?2A4Y_7ONYGz(K܂1Y쑕aGnjp#dPJubk$jIoU\)_MU|Nj9B{1*tQg 06VizGtOzot-A覱T&imΤ6&Ąf27Cxm2Z$1(bPXժ-xd$P2Ƴ4<6'~'23c%-F+*6w䝁8^yNA+ophn%6O9E \3t*̶1T &Aع1 tnqɞIaz.`P$nUj( oGu`,ZlD6{0+IA#݇J6pK}l]f鳳t'm{=/ ڮ;iƉJ;9p2ˌW{FqЙ /ڄo$ke +)'CVZ-|ا'Ǩ|I瑈Y]L`UJ[ b+i V]3(\Cip#[T /, Xm!pD-ɳRơ;~պ.>ц `[Rڌ#F00^;>=31y n3Jt :ɪ}3k L‘L!dd'mKN 1(]yfwf}rOUL6:-=bi \z3U&d=zjD^t%WZ^ Y7EőPq~,t>8!F2Em*x6T{?P nuR*Vm+!I :[g(A۔M=" 7[кG O{9g8lb4:cͻ,-4W,d2N#_ړ8{e>  Ji 7nJ@O[7diߺ| GX#EK<>Yg8MO:|_[]n_B2Yg4F$`LRfz_zaUj]5C6шp)y]9r' yt%Da0d_:Y>\z1)/6fM7h]+C P(TQAA{^ ԩ,\6V1/=z/(ܭ?뛃\U[MPrеNp4ӮvFu^uiЈ9Y}q2ΫND2M%ArdyjF||7`Ur 5Sg0(RE9$:& )EtMoչ>@^TG/\"k[aH +FgHT%a@¨N`CսISDl,\ ȥa gH o6xEr:a%Y2(֠4Ь|x|0/f%bl^BUra֤5p"e DML*ͬ${|0\]߸7E _IqyHezb+L7~88(|x1$y/fŎV\6}LI<]Z}xoXnqӛAożHř0HQl ;e 0Pk_,IVssl8of#O{F\MC04B'?G%&-[=WZyT]T_`EN{ݚ,w1 lvČeux,tS#n-U_3<~Ƶ>1Gsޱ7v VRB% L5\tۯAbU5H\|+zZr)#7;t}9!<ዤ f$;zBd3aB@ `,di> 7f-Ѻzbl.D oBDmmZ.,}JK 3AK(o7hu°ɯ%IY#80f=q>:Oܰt_.'po7~>= |π"T}{  0ʒpHng}2;Auu )9^2L"\Yl;1G>Т7إbrtf~$KKw&pj]ü,,%4f'F hL_h bʳKpxIh.>W)Mؖ%WЃcJRϊ;Awʱ0dd=E6>/'0q^tS0#uxk8ͪYqj$y921+C6BEER%OKaW d mYY?t,DiF.LzҊRVWS̛D)N:;Z,yIPY`8ik|Tmu54 6.nIYjU݇®`X7TdG[h#ϥ ׆((8g.ɘ%+D]S%ѿu`!+c~A**+%ws'r[v;e)p[YWc[crAl-{/&ʭL#%].r2Ⱳ%q{e/$냅Ψ*owA 1e@y`!l+=1 ww)D1-'K\51=EeR no#JL=R^ -~{ݦ df݄U5+h A̳xIU4M5ł\0yvD#8;Ar9u^.lp"+E %e F-Ҥ1 mɒl"zlڅ+g_ᤔ!H* [G|*Py\7ĻgnUgs g3Jl$8k@adMJKMm(2XếmWuz/j5OIa9&io6vv_#a`P-0%oDHO_AYDAE }"[h J1a&)":'|W'qr׉>}w5Oe!#Jo#p+wJ" =A2shlb@Fh9{$7$,n-'dAG6w-Q1ݬw` v2gfw]Q%LuJ=DP̅`ٝxz *E +qp-g#ށ6Ԍ$J2ֲ|-;*;<|(Q' VsѦˍ0|~r)j9b?xgg^t"20ZXPp0p&8~^ #v\r$e"4S~ ?zm~9Yj[ pҩ |eH7ntR`g4e19%4=g"rI|1~Uć \Qو>hClDE/b& ,&N?6ṳ.>mK '%i+!kZƍ*:\jΔ.6R/ſ>9q!tZK;T&pwj*{p𘷊\oTۙ2DGMEϥ,ۉڹ ",n -kvLg{˰3%~yMx] #3͢$#jcLM2ÎՋ'~q'|_$ΞoKVx^>0?-PTT r鳛nk/N:zsڰ`^Y#M|Hx֢P֥)_AK y$ҙ r pRpS;D3"}(oe^|zlRle0ߺk-!)cLn]MMя8"q|y@'Zæe*3|v * 2kV\嗀<):з.,XNjD =OӝK$QYf*CSABT)͉}t9_ 8"[qHmA_ܕDA 0C@N*5Yڐ?m`G[,*u)c+idR/ 5) d#?w9=K93%t%qCT"0N]U+קZw ! h2ž/!I.ٟ)}ⴂT"L Fvp[*\Y A.yN{͞@A;Ty!tw*|W `![;oOMVr'VI@blB3<rgZS\/2̎(Vժڜ0Bܲ}P<'dKFB 4?&NyꩡS#Z%zͽ  ?q/'4*H7ƇԀݻoo_p}^/y's.wST[ v t@uhd}]2ĹzAD м/{/>po! FsRs$m6 Az^&DIc}]wJI.uVzEgR{8a 8Ggk#2" MtjJJdK~AGEǁ;mp7YS}ظK]nي 9O{է[I*]EDd6z%r%V!nޡ߼d`0|x&]{)oM9=dvH "%Q1_b\1j qOu n󁻺U2(y$Gܸ oFũHz0زLpplZ펒#v>]Xo}C]:йqc7[% zzk{B"I([̾E<&!@KZ$HaC^d#2됹 O'eE޷J jkŭk8T%S\~ @m32*8\?Nd$.ײrI 6D.:?׏tJRKf1eQlж|K%`Rj_s{ 7(?6.2m Q'Lǫ8 ޺]6 l?E[24ћ5$9bN '/\ IO9dx"{Y郗6Xy}iT*9v x&xYp_5 !=]UtC:@%C, -5(,%&)ǡ7U"m F_d~^d7k5*k=%d1an_2BAtħ |}o49A>"ԉB)V|9q F嶔G˥85[ Jg7#5RYZeeG X~,>B׭'s kT]@( RDAdNeYhʱph7qO[8G ;8]aRF YBkCe(*-2jG+넘 Zmf=LT⣆C`CL4n2R8ī1ؾS"D詄n n~ p Ʋ:|[; Ф@PD=9»<V&K1yNPMMPV*z R_JoL\c|>C_N]QFܮ0!Ph;Pqf*V끔hH1e 6nn2ZF2KBJ5-kwݴU$yؕRpz|lv3`f#`y]&_q zK۪_ӒgV2y:]m_*|vIk]i&m$e,j2{IO"W\ ᑥ^ip=k A+m|L`r:]yL Jv/5v` 37A-ޏuJo$(Oe+e]-We={ ca?aă#f~TCٝk40Ev q Q,:jmϟ{ReTs$W5  Jל iK Ъ] S=hwZ9eV^IIIiDU9}ʤ F5?؜iY(3@BCc3ғ!S]#W;tE50܌ktTcO(dOّ cϡl}7A*xՋ)M5GvEqC'_[8!Xgۨ?;Y=5c 3, y0 c[)К|A+yP𐷰Q,jS /랢+@B`Y՘`l d;6}JHSO:}|'.{PbI$;#lI,9߰ux bO]x"1x%0˧Uė2>o|O9jpxBE]b|;q 8,` 8B? ܦ˞k4xӠpBױ(l*vkteU.V` 4ZAn;^5Xdi;zNˍ(#`)s}!E(c/j Wz߷WfD{]ߙY\ʛZ*]XP؃0|3YbM H ѹpc4ZX+ H~C<{ $1쭄FѨ+a5ajلy9h/y`Ξ+uqk6hwUicsrvG)=֞^_ˮXI΅L,.Υ͇ZV./L8nB= xb@7޳.dP)L1keTb3e|kEГ@$fw_? xׄi1W47od 攺$6!iC3mCB}XÌ|ES zrt]r¸[έ6loY33(Upη IDKnJ mGWx?s5O{!pȀc s4.o'aFY<+.ezܘTδ;STqURGU~F"@VhUSJ[m?}M"U/^U Z97sʖdR*9'wl]Sjٿbq q A[q-2l#w ?!J߿z-7E_,M<6%I)plz%X5:j\SBFf` M99,m]>s|E_o&Lp$1m\Q_{a>"V-xIzq3ۣbL{Ykֻ&}FJ I'/U$q u;qi+Os$>F极UÊoƝ35sbߩьK(? endstream endobj 606 0 obj << /Length1 2569 /Length2 18352 /Length3 0 /Length 19841 /Filter /FlateDecode >> stream xڌP ܝg]!{p@p79ܓszY{w=P*1ٛ%\Xyb ,ff6FffV u+@'g+{;Y9]@2qc@`eef23މ nfeP`(<,,]@y@mJ` b t25(XmAMmjV@ Aohlhd!HCprNn@3ƶKcD[Z9Gfon6V@;g P(9c,z0o߁r665u0󴲳[J..c;߆6 c7c+c_ԍ"*cPldleFa@m3ڹ8#'n4ݓhgn_dnegf 3W& ;+GW6  :L{:RjvwZA݀'WXXfV.Ÿ 1?tNV=fI4afv6:b&-Y)%U%KG)*jf`08ظ<ll7yؙxCԥv{^R-@g?0s0~\(cIo ܺv@ vT UY_1hD,liLXX흭~,f-G ɿT@oF ;S{K 0vr2D`M+f@hgr;!>RNo'I0A&?$b0IA,&?$d Pv?]eW@ Pv7(ʧA&?]e@ٵA< dbq1umRVPcgS+iژt O(jG 2?l? P P0S{W[bkbM-hh\@-5fnNrX HYj%_ տ AUa *уzdB?8@@/=`?@UG z3Ya:`/L\ o _, c3M 6Ζ =frtAŻlrۿ ZPPAPVAPpE:܃NT.⿾y@SE{S`wI9=tNGdꬠ [>Ե Uذ$'gՙvi졩A"xBu}GݲReCT9=2hD:Oge ƅev=gD64[w5akBչO}|[0Ugɻ8zC`)lc|gWO^iGjܷ)P>kOȁy ]R*_x`8 .܉L-V{kFFnJkT|akYt]%Szǧ:ֻzacCs\0%sHM-jv04z#x_uӹ垼 =me8LЛIMt* 3"铸r Gjy ~7i<P;5,{2C{oAWx"|(x 5%ؿ9M0R3RCiu_n0/l0 @bHol27F]-R.L* GpE, lџՒ}BrjsA-=o(qE7ezI+5uzu=G=jjQ%eR7-(>v%lt,Z3L䮎I4Ǎ)4zض,`Ta !1:aT;{3p }?ff*cFn6`Rk-'j7qs勭E9їnՅxI9?".alu.5'%eHFhQ:Pl>ߺl}A%roEe:"ϥ\*1̦ 엮zH:vLώD7d Y^gDdmҮA ECC2#xQɂrJibK}4f.JOq`1 ]Q TdfqL#h*<8~KF Sgv XOqg*ۊj.z$fb##(!PW$ߩfATs9ۢ&zB~X8+>=dثT"Sz^hUJ(/%dW+:Fh3F` )sT0DP琨sв$6y#ؤXU9?l 1\.`kP|?ftL!dd\V>X0ըzO-̮!Ŭ;vx@l2XxI#P{{aa)l4^ѦLzLtf)s"g]N`z:!|Z\u<"W{TZˆ;WnQjB'Czm[yÐ8|/.w9uWm*1uGT[FRR!ݝOhXG o ]׽OGfLg~[m](@LF:_{9S oig"#!bv/,t0կO,n9 "P>}8;"^q:S%M:J# U#HӶ,iOP޷ F,#IdJO^F>ƴUf`i]d-^Éi!sji mr (4\r81CjuZ~}ZHJ't^;3L6sO&Y F}bj:%XXV,)܌E/wICuqtSi}᳘/`pwNDYTՖk;Ϫ঑=cSPvjZO9RP밨3SH=} ƴDnךO4NM'`!0ZQ3&Y?k*-Yy\C,MWty[EaBB{@* D$FW3L0}%-$ܬ B)l\H#0?$J 8Zĉ8R/mXx<"Cs{ =0iܛ;ٓXr0wK>T>" 癗X.1Ob\;9|9daA$*58:;iV̌B 5JQreC ddPBW]N闰qB!^ YpSc1MM"icotAmVfd $j} `޳kߥ_|kFDڂ1e ߨ ўe!BkMCvyOܿ-D:sb  nXҤ="BZ3<*sx |h0Z7rNs@,sz j(Ģvf} C~bDSWy33 }ޝ^]ZRJeIt#5/BEy!c4DEcwٵ A℩S6}UshcM $5%@ߴv>9gKhRyOS9@~Зm2c٣s>viSOAR~}-@1kѼY9!:x䎾U0ޔHCY ;. D1놀,$p,pfl)Z@GlY_d)Ks߷q>xa4~8je {d(Kx53%WcEݻz]JiUt Z@iRʹ(_eW:Wc =,/0feY003M"^t6O85tV㷰HKЩTۻ?1C/ 0Xv zY:f5Qpub?%/j՗&e:yOPIf /s;˓iɜ sKvcf>xiAKU>7^IcќGe8@eھ /J`$O-QesEA%L`ہ0ܽߞ5NӖW6t6Zi:t1 _zxjM 뷍mغ~ |U:“`T7iE[hfp @zDG]X7x|Tj*Kٍ꣥K|<--?"6e_?jz'֍L8hiۚGQtG: Çȇ]yby^j %;*n*R|P*>l@Nh֨?/c!a\-YޤIp* Aäu&V҂6]=M4tA# /^w@nTxIw$fit6dV5nNJ߱CaRr,?mX0ex(Μ i ;T ,|#+0N5w'~UoBdZ{>| U21T"}YUDhe]`7c\U 39L|NXzLD|Cs28P xI:!J-eoS<}pFhLנJƵ%[t3|q x q5 A9XMhY㧂AE2v1Pfs<)r&T[32b &D5gQy+<xn6>Vw̎*`3N;ҏ*n^W.+WqF%Ɓ&wmBTXĈ}[Į#ogɜ>8gNUCz">?2p'P@Ca|^H9c 3{.sN!05hdJ.NWR&eg;a TTB#:.aINYPH?3>Ɠp܊#B9FLF0_Q&XY K]ਖ਼w|Ċ78woZ!:Ģ"o#TN#tJd E7-;^ap#0FM;-pW Ӥ;XA/^.SfnF#ؾ}'[bBOj#e9c)>㢽̢VQ1z֡U%T( tnd-se[I|S:FwCd&ΛVvD%l}qZebC[NZ#tu,u[q9+bb#/X<?p@|q7X{d-b"9zt!&%.,njM#zMgw3NZ6ǽM]O;m $2 riM'<.[<k- 0Cf(&6k銍 eF*e0,M.rL1rEa0fȈUy DoV9)%PK÷KG!0AQ#!rCBߣG&K9rT41wl-| _ :!dr6ET@)90Je,-Ћ7HQpjF!@ƭSz,zN2k}o̴'HQK YXέTNt._hi6l)Uv乒h*-nC~%L$41tQ%2_hPjpFoxom3!H1Z=0Px'vm#9i <I͘33;.ZO굜ó%O)x#'_Rf |q[!D 8 ?_VY܉nꯈ*ˁ/$B!pAf臨p@LC8䡰(acqBo{:V7NM^VBo\b{fh-#;zCv!bU0_JJFnTT%ZÚi)HQN;[ >ܳt;kZ 3[ aFkC֫%نVuTAץ;68ٌ'M:{X3fnF"!#Tkek12h6TZ[mf"*^sA,}XM":1U{Zl4Q.˃? Q]k.[cV P%;N#ِ0igwha$β暺3i b(<{9M|dwҝg$PnXh/.`I+vJm1DZ#C"/9| !d嬘/T_U[uM5IC)kw?M&4cŇSr;z>A=\:Qz+)iV߂<: ])mil -gAQ"z8{V<%o>r7*bRIcF(ZNY HDeEY;tߡI~=mEY,~sR.).8>kXG/Cٍܿ:ŀgfe,0 &)5oyQt_.T%p=/(jy ZӧȒFu )x=l7$KP΃U.qxk|#i?8QcsΎ^+vlru6ķ"ޱ"_ mB}ytnt]l&* 6D^@ƛ?_Il{h/oh|#^Tbd&O [K9~!q<~tVH mM F7ۮQ鷢垣]戣,ot;|UL<[T2eVvxu˪ֈbprJصS;1b/LA*[٭w"5&XS"gm)*N/idLFVLJxqW&32im5:V?J8FBbI벙\,YܯrJJT1­ja&s0%C7`SFP0ʔvKE&um+R!UƲG!T{QFy1plH qŻ8wI7?+^?}Vk-/I\T%ϏyʅXd"~^9v+AA\\K*nTCۻri9 s@rUT}4saȟe1Zi]}7P򣉞͓͘-MZ< ӏ[E C_散!#Bx*eԩrD;Xھ&xtLskSSMd+SB"xBϤr[q7[qr y7x#*=-OBU%®Tڲc|-u bw{G8l?A ,ܶɞV)flͳ/ G]$GQ:;_.x1&Za( HC+OLF$I.2%n ߳wQԕݚB4 B3DFX !_u0X (=I'JR~"p܀(/־m3?a7YYWkQoP݁]-G7ēE~nOmi*AnN]ۚ5'M"S5˱yvI'ByƸlƾ=u+l ~ WiX RgzFD-"@DD0e~߮!\ i+gQ_'-b,nln;p5.afS>HEy;="@rS44,wAM'.W^OZ$?μ+56IeyFo 4m{] k7╕J󠥂B o5!~ ?;XoAyS>3W^]b0'VVQ_>‹gd$N@Nfh3b YChk1'k~=吝 R@ϋ× î 1_VghM؎5 5Z5 alඡ vjVI˓r >J2GbWIԼcϠ38,9+G4:k<a) 4n ?܊M\d6ٌH#oaHapkyŰ.oI({MDFFu= E׺k~lyy4b*OpK˒L;&Jݻe _ϋN<{1uBq5сNv./z, ?*F7cTخގ)LWLR~DB=E;?ۖMb)W]kIjHCIbcMyZprf˜Ann g7~ٸ&}~PK72%Ld;xx1jDcQP_P{BOgLzY8qO΁V <G z(ǥ)W:Hky_҂39ls >\'fn]u!t +eA;=PݹL4xnigU]JO}_|Sܾ65K(doĄdQ/ҽ>&9Xnb6ȡTj%rRY'Ϣ3ps DX-w*E ul+[F׀1ǔ+!Sl^+'ҭ*[Q! \T^jT[I#'~R%M. !Gya`o rƍ]r]ܬ%wf 5NGȾ$mf?LbF1$yѼQғE?(=efWd{&.;9eX!m`3 ?_򡆭tUQ8&\BWgh\ZLubh˸thnS7oMvynOօ֚S! !(&ϋpow ?b5s]SM,+dJWƺ$ bƝ(o~+FG[x+1|NB:5&/hH#"#u~Dnh:֖("5,I%Y .ov6m xO?+!Qf4U[_(RQ0f-%9:hͦCz c[ro8xRK$d)Z !>ԆBIӽOv&+[:K9KKpO3WvLВ"@R /B-3GpsbVx3c*]ri1et eߓO>X{vJ _G-+a=Ք\t25Ώ kB5ۯ5jtQp;;:2C l𛱹#ɳM4 pL8 ?}$; :bٗ) OeC&0BlarRz[1~5{Kn%k1aslm95UI`wo-,EN{?6 k*d-ɯ.v°VRȔԓx*[v @ voͬg1! h;u_y/s ڕƖa~\5Bq<4g')ޡMM}#DHB3?$NS}G?emjd~WFM)>waR.Hto,2=iy{tJ;R=,C27֜CRTL-aT~e#Jр ;Mso+*#.oV쐷}%& ]%y65$T:*wG>lXA/srE)b~5wgl>DcMLtj4MOiyMrPaoI٥j;W1/{G7#{:&a5;YJ|X#g{ϚusbT1%p3[L[ JJD)wk\w֌'g>d8z\ЫOtbzY*QPy  )8pX8hAè@[3&[ʶSVY؀KIL YU ٕJERtZzPn7?WK\ '6 q7YFTN.j\jO, mB;}))ڏ l *qjPi\c6O57 -P+G6 صL-4on7Zw_Fr5(U.=LS3(؝*QD3anDeE><fejKr[ҍe$K%=^<.J H G`z(Zw*Z,븩orJcT; -.: (d4DMBըb\uZ 忢dz}J&6ڮb} UY MZau9ӨtC"=I !N#T^_!U8VsOn7'S}y-)ejD^U2f'WTcpfO4) .Ǘ%WϵNԗ9o 6: >&C1{&\Hi#-ZO+=Pj,3}hMIMV<r>z^B?kpFc`.KiMx\1/LbRjvFDfdzS\Β)X|;"s䧟\𻭚N=Ojf6_7܅'aKc߃SLELjP~ ho@1] 'Յ0]tG8(z!{`JDkbU6WV?\ 47UD!`G.Iy6Npn ޳p/"+#{L^B҅|рī[{[ ͜m5iyZ57bgu /W cg(̇oe1 Qn[xf1a93B8>R60,5wc-dz=ggx#ĒXΎȏq.lHjN멗Y- {:圍ќ߹q3$|Jx~;΍I)+de$/7~ ܋{2op,#"u,dB'lظaf!ka"iFzuO̻K)&oj˯PT w_twl+ Xo̹ >w%%I|ܷTav@;'[mGg4Tr ח/Zȇfm{MgO|lT*9!uMBEfVj% !8Mq\ A@0ʯvc`S/9?Ew.hF:#U&0y =.Jmwj=ID3]KC3ĚQ|Onm ;8W=`Q#^ s(:f[k585}=;,UrG^ȶKsJqkp])'5_;B NgxrNIXM" ^kŸ rw|C]c7qG$,:QցG?p^U}h$H?25F&tji-?B%W6HH77n@r/6F5_Ihe'@e 2iIj0х=_xbC8| /م >;TK, ~d^]|C=VI ױkYSA+<4U4  z&/76ZmXl]Yl*%C2$3^ NI2 %aוYjs@g#+]IB0S^}YOd!즃E ( 1=UDnb {Zà: c;fRu`TyoO#\ o>;?u%=|%xB$Ύ-">97f&0iRE0#ZXsĥ)QP^B'-?7.:$.1<}D?^ RrWim{ ̆1x1QX>/+ޯ=[Ul5tW+1&{r8wPvL?#NVfp<|жƬm zp% \@1a5:uo@QEM6GIr>8.:pw'GEr2޾(VMЪ$wqŮӑbSTt{זƅ`:3\f>ky*?EE\!Cz-&qXU=>IfrxjtЀܘ"Bʍ鿒ZԠ~?5aH<)Wrx}erB'|?8aкsz_ 8&) ?M@S&=tP)DS{o )H6eqc VwZyG@@y{wro-6& >}Bh\@̇~#QO,^8sӾ6̸֝9b<}'0~'L&+]a$QxHYZ{ڛ\auQR52jlԜ#$p'0M{N٧qŮ](J*EDx}l+-s =wڣ{Z#65͢hHk7OY!Z|ڏ´1%BPT{ql3-IJ?XOYBfin{0 gAE zFGtWh7! 5gZhtf@e' "T8Aoj50Mxd6  1mt l1 aռ_@NxmKw}d>Bs91Qr}Jl!\@GBcPW@y)Ϊpd©aRxw;*_rEV8YYD)U5G^r.EשHOI0_fb:Ux["ۊ4 l. "_؆mr"u@\~V=f3pKਪ\6P"VsK>]S'#A#(x{10 is|̶^V=2cM3M/C9r S+lxDqEx=bu7hҀoy^FQ#ż>)EDLiMgFK;nVߕ E"b=I{Ow'*l!?U^R!Mؿά]+Hc$.2ِ}Ѱ'5wSjH{23vjy ?i1՚ o i.Alk;hA%'9q9>b UP X sxv3~"X#l6K6 _5ԧjQK XԡlvSפ0xZNg*o_f7p( )=sGRP:Vu:ɫBkdgL_޿.L?Ip)%,N5&7*?e +~y9;jO TgR h-ݍ]MlɾF'2'o;Rp1dɕny"'3wt"1/#_ )3oRJq:W4// `6#`w> %7*@[sN"(O'O1V*jY'ep}~t7i0 endstream endobj 608 0 obj << /Length1 1998 /Length2 7440 /Length3 0 /Length 8637 /Filter /FlateDecode >> stream xڍTZ)c蔙VIabf$%DZEIAIF)锐Fz[}k{>g3pܒEUPH-H eh(D@ a..C@!pPtC1XB!,& QnR%'%PG!$\(?0>XRR\W8@A-(u`8_)xe0%uqBx!0};n Y0@2!./ug tFx mn5MY/Ag IwD`( rAC>= h a1(#zBP/P-an4]D4SVF*\\H;O}J78 {>n B v"l=@#$ Df HB0>hEO3_4 GH|ݡp¿ "` $kwCxA@?dm/[*U?k (o-0" IJD[wK+{4Υ,-@ n_!kk uA8mY P!@=_#Exy?LjpWAxmuïlsĜH.M@+p%8vl29_1 C¶0c@!$ `ءH~^i@$ *C P7a=~q:0D@$ &17aw9M!I c0m´l~6``(g{@? @lv ?[?0gv b%;؂@:?+7BȌ}K?+[ v4vPTƊu`;CpX?jjZ@V?/WS<ܰW_ p7F25I:63zZ%؏: L﹪(sM$;zRet%tjS8Ʉt/-Zfኝ͢ MwMf3k=N~ׁX]w;WycIH6g4)ʏ6qo'2,t|*E|Ic Gkdly&Ơa=ZjS;|ƽW7I 28|^FuVCvRg1JpJTNjF)yv@{AOyDcZjmd=nsOj! Mo@WCoG^-WJY4HHH_iQE<ef!(-IιĨTeLϣs-dqź)mrp]k B Y aL:_tp#_=ij9n27דeI+ܲ\z\|li2x* oOVf~qaV5_L$U,b@`9,GK^\o"|T}WF[׀6Z[r"{aۘ\8#V!m߰;ڮ5RL~* ,ދ,(̘#^ڬ?<֬ꯝ1^Fw9W&e^!#nD̕.Ck (A)r\lxI78nE9Tq@$ƍbx4s1cqXz>&bT7XADg1n`4Hu{ j\(Er\k蛸=ýlUѳसj  _V   @V_!RAuڊ߰Jpf5kuGN]`hM{#b)k,OC5sg}S0.QloGm251]Wl#Xp9 H@?1mߍ<~rƱ:gH[R>*]EY]/Y(uuf~Ny?أUsb[`;dC{8y3.\;uY7gOQ} +;V`/T^Vln|_# ;\j~/-rX%P9<8cxΖthZ .a%z8\qf f 5Ѕ:=G's*r0 :(vS5ʾdyN/R N eMo'.!jZqQ.X]SL}x18*ª;vM%_6.5Əܙ[{ł ur86Ww kz("NcHd ~g[gry>qX|M6!Qt:{Ұ^1-&o%#pOJt5YPJ@*ԱK?&_T6kw[!1%kxt$d}[Լ-1j] pBU;)@35k]Z2DP"eavxV+'X#]:3o݄W+h7t톃Ǜ^A%9ikɧmmZ%XOʍ!Y`L1#`8sN IISQ;HX>j-k:"ڤu<{-]5aC5vهR&~ImhQDOE^72OA9XJqQU~֛.[?d0(ɋB OqM泆8g8d\dV'SNs6yŽy㏈+OJ/n> ˭$vcGN#Ļސ4S"y9ev/*5f˙A[DiwHT7)5xp9ΐ(u^ w1M;%9}l1#غfaMb|;qFL.-}u[)CPQŋ)iچu^>ōOuCFwvڥU'V2|h |̮";rsス+qafФ+өQR0L%]D?=S}m %芽9@= g[ `Y{26e"'UP6Cs([Г/[*:sl qVLkJCM*fw\Zמ 7-xc,NN;,7ܐ˒TL6jGü Z`ǙyQwbLRԊ)]&iS)1V{vX&YR}TtCkI=qM28RdkeVA.vD3:\?'XS<i:OgaϷ@\B)GE׌;BΆ!͓zeӜ4]z13G'ww{|Pa)t\802#nDܺ㨜 E^vӥYG'Z7Xѥ"'}8BQ*_2X3$jW/=ꪙ}__[ӛ&1N}axvq+G_ = z5tgIE4^Q؟hپ&Ⱦ>"yղ=, aDp:cu#+ ^(hy4q2^Ю+1Kffp`b"i@x%oRJ9n2^qWꫴIJ~f. XY!bQoʓ:` NZf%&ޯ=zwBZ$݂pMiFR*jPڣ_]h͘X {^4yVv֟>+k0蚾i\T sf>UU%wFC`ΰtd-Ce{#\hZP>eelj bRUvQ<:l$!7ab);Vq)O| 6Lң-3@f} `R+dR燲6W{eҞT9_8i:k Z6h|k×2,Tx epرʲUg6^ƍx;\qVl͆Ft!BvKQpuPPnDëe !rr)Sz T))HI9mΫ$) Z"ose EK@,*}Է{iFԅr3骳Ǫ=#/뾵OZD0EttZUG eONJ[Zzp =mZ3V 1μ:}"&ohdf99E]{CGJ? |^jdr֗rx9u#nݰ1O&n>К-!ܺP&M\YJ%.G NrOt 'Qw)-}I,po#(GZ0u^UJxCbsHfթDފp N`+Xϲ=d;pۭUk2\('m%䛽?1Im rnIc0j/rTDD$t%:>;6P0y+"C$ObeQU6W qv;v9&<ß΋ ڥbYuwmn`{<!´:?dr@X"01׮EA4"hO(V{[q@\u8%.U茔 4MFԶF܌Nl<'EֻR@:+xb1Vu& &vtY5IWyhArdQ GC\ ֎r`.^B8CVƷY%*EUǝ2Bd.}'$ wrn8){(x^C<[~m"yUwsۡ)("je'8 rWAp | |k'v9l*x&>Y< yFv֕Ky+Z%r=ˠ[Yw#OU\9۫>=<[6g(Ȧp e9 k?U&ѡ GroR],~}Lx74^l4ۺ&vZHw:K[}{t(vWgiY`]D}jRqpوN>Wr?竗VsrƤR2I Z\?S%F1$|rʀxjmDUC9_Θyq{<6-ql>K?qQ_TɁkr2K:4 kcEfG]NAAz6gqPFDoEg^Q?Ѷ,S[P::ѡU<*,}-C L>G)8-VOd8Hīױ@E߼sBu+xR^鍽>gwb} )vC @/hxEr5K|wnQw 'w/ຉ5߭' e-"~p!n2h#͛x%uF @ E9D4;va*D[tSb8|R=1mV3q(cwΐroVт[ЕA捒>d0"8MQk1 n0ڎ34 K.qfD!k%G gKkE`.:WJ,jX]7L7 G[6WZ.S.ametf-,K^pP);3^ a*_׍Dm*o[pNEZiI$ +)R98=g8' 4|_*o::} ֹ ZAxW@1!~2F*>~sBWkY}J?' ^ί$iLck7U}rH˸ų +{,@+e>߁yn+ty(}۴'Rq~Ő&[vIzKh=:zQʥ@$KG{M[)gA[O .JVT?LtXq16pU^X䴈S1=csȇcdz0ta1:zZw30[k/ !eR;L^BӷBH\#6^N9lܨG0\8THi0fZ#ͺ}*s%xX?}Ժ endstream endobj 610 0 obj << /Length1 1876 /Length2 10647 /Length3 0 /Length 11812 /Filter /FlateDecode >> stream xڍTj6Lw(!!1C!])ݡtw+7s-ւvYQkJZBArP++'@ZE[[AG vcƠ9?A@WM SBJnNn''CB;PB@.tPG/g+xeg+ r[! v\+WWG!vv6 Zvh\@ K@ߝam.۵V@gf[ . 7%; PsAV;ll)OB`_@ #X 2+ w@=s @NR5O{.`GW6MYb) upA\]0~;,`cbf P? ݄#R'ff rrppps@N ^az;BV&@`+ puvzo [A`ƿafv`OL{?d %bo_.#wIIA=ެ|V.nn/OPEԁHp u_ ixprX~q? ?}&$fo_'&Y7WU%P=+9o+k"Y]-lf+fԡ.o `{ea{7\`z( Z/.^>// Ͽ `g@]a)Xw+3 K6`"A v_o@N;ʿz [@6@^;f2ÿF  qs0-akQ0? J8^I=_+?ֿ53 S6i#؝k 46'uªap&M_Z}/iY9Zska;y,0!u!5dtzΝn8IՙA7I}ֶe_]KP=zkm@ kKhi9ێ4Eid :9Ğϣnb+|]z>ǀg`ػ]jט?fXugӠR1y]L>Q)1cGsynqͿ]r!%1$@:HV"Z.-]`d[IYܰQs/2]OToY¹ \~6quz k-ƳtVWxN[!u̲W;ofsn(?C~ʉlaR!aEyٸ@3\F {mֽIGmq@%ZѹTCC;Gu= ZLH^;#L/,{ b˱yMXD69쪼^;ǮdmICGZ;3$󛲦#FO7)0L6(m涎:;p(8!T!u&nB w(+:44Vin;oAؠvkIh֢f&s^T{GdDhBtd?3?|k6uݲ ͊_6&eR *mR{/C :6 htuds=*oY$QM`P9SXwzSq~4"68bNtjb R눘"+C?|% @êmYJ[V(o8Q,Hwo!J7,KTϱ8y~!Fm[ЈF qD 뱛lw7d.Zy c+UULbW18Ʀ_`y5ƿSn o:kF{fH9ӖѪ"sJJZWs_¢5;*ӊbPa# ç2$j>nl&_U.I3=͎x o7,Bj8N.? S8 M^oP"P-_Q0\cpha!)zSي#yD.wuD#J}B\=&[/~:-L^#"e_ Vʪ0AYO: Z6֧I:z&M\aE.Q%C溵vA2'J98veF G5%q6_“ч@)iTVH~T_sa)^_}>Y?Y Hsv/HeD\_>9BO,EHYr^^]. -Z2JKސ,j)cڄ67G$ (r=!WcGhXMKe]hF>"ߴAwRGs}E1P1t:;h_׵l?aV8#,5h%Гf8ʹƑR^ȎFO;%<{=S&"gMPpYތpF-7%lS#)/zbj!!MpZMr0w(ZZseb81p-|ջ:Um?2Tޙ;!~il"d$6F枩swCx+866Ŏ:]>$k}F`[n Oץ9$ ~݇SVgpDeq Tg}wCp0~0b&!Us5<\,K'+dCU74=K&\)IXZq{TkOd (U8A#;ed*Q$ݴק>_W-l [k(b|} ypRnok.7}j$Oa͵mJS2(O[Bh~4N.I X\klE*>~=\OvzWW4N?>'Vގh/z~r$+4Iӹ^w_f!p! Ӌ|z[L!>hs2\h]51MG ܂YAM$3u7[pm;m@Rp!Rr"ʮsV3N)ԋbׇJӉ3P0{Dކ7LE(.6V^tc~fq29f>m9p|9nTdb%/V2jd(83!t]j/bsI9' N_&+TvoO^fLg^hF/FhK7bZ5kg59l wfa6-)b΄B 2N+U-Z[:ܱ״T},y*(\j?u@9I&Ɔx{fb' 4wqgbza7h83>W)RtOMVATDz4=_4P?f ʲ}ģ137fcM " ǦDVR*kSبȜ4iƥvTj]oG(ޝ/x@.Ka$ )wT%"3B Hx^ƐXzGt 7U zU_zϜ{Zcڐh5cm`1hHӿvAqSjNyƷޠu]ӷڤwR0U,/fI)YzX-]e*G!4lH*GF<slcD}?d@06E;e7Qfܦl's$ {}HV nt3 㪳$ei̘r?u۬x!ZYY.gOhiON\ 4?h ؋EIֺ0K;߶3ۙE^Q`Yͅ~{"|_N3)dwAbVAg ^ Իs?r`9/n!-mI yL>yk+zDj9"|gns8mDkZ98/2tLסXN:WS9d{AS& DCs>'m-vI&ڢ&<*N \:JRjs6-ZdfdO{hxVՂaNF}G*RhF1Pdk g#fdrlޞ 04!DՃwx< p0 wV~5rLWo,E>x|$F7Rup3n!6I>IDG;܈lo[NZdd=gD,v6ג㘔{|ɱţGf2DeS b>$TGm/h2WTF."2|BժIn @4o󒱨.\V:h0vjtcv 3/ i,P%U dALVJ$kNfgZ'} S?HDގ63})4QH>n1__F1AdFw{쎉<%*{jȉW&Lu}SA*{zzevUеP]MF+v{̢܅2plOf~ߞTqueZ%L}~kU`^@ ߗ!ɣDz<2gf/Pq2 ub&S}E0d=|k˶mDΧd,7w1JZ9yLaWM1ťT[O+>zߗ2eyff1kl% V7C;_TJ`@TюR/fǮX(*y7/4@\`7iAʫhTF# ^PG8O+`y 68;*.HajͨT$OZGbpbk[xx=;M  ;t埓d Qz\׈.%-٫s"TjnbKsU|SX/B#-Bb@(ב'@վi^F)6 _ oD |ݦVA+/rظY1K$g:LÕ31^ϞYUH,QRVKj]-1c( jjeb.ى'hdwv`Th0N=##}a #(:EJRЪ7]s㜧vϿx1?OE$`L;2ѥ7g6XY=+/v~֩u!jh8ֶ~Z(aԕX y gwg‡fz% rĄ7 ]EF\D8qtŗPZ9ZR L+^`-AE:ڬiĽb(Zk` 2Z^QχwN}SV";겙E,z}ҶuU?lRЋ)ᡆ[>/fxl:{DpHK3)CfIM:C]%jF<շ*V,^Klҧ%IVO}KLr[GOxkOpvZ[9q?6[w*' P$Em"{&Mqڧ%r52/(cm!)ϑZXgt ]er !_I^0ihƻvA޼˔5wU;g9%I슄u|} åZfa{Xpu~kz?!߲'qlD| 茐Zr $p(a{EН}ʉ4 :4DMu|S%"#2O:,!{VKe> 9F;xX5cWNݰl-iϱK19+ .H {9&&G~|oBfTl:~37!/ѷ^d fz$s&ajL2Z=O$3}p4lYVf퀏tH,83(yyHu_Rb\i&H:TϏ¶WRKef<^yCgv/XU Ycۘ7% *鞯,@ܗ8CPkdxPpJ >^ߞA؆Ox-mH'^F[V;Ij)Rķ%ci 3ѤW>HؙhK)1aJT )H@ ;S0R`/3Vo MqL'URR}4Bf( Ŋ㢴.\5hH1ɑ,etnYf`,{'uVw'S$?|.^7{C$& ^!uCϒ Cyoʣ RYf2$SA3uRAù?<%Kನ( DBǃ|kƭP/Wo7SUN:NO18ծd$pXA7H_D ˽ d̪o 7SwqTU6\c߬TϬ&Wy;dJ[=)e^vZq-!-Q/x%/x]4V3ZcW/U\ .C1m ҏyop1`0 xRi#PM2?ONi~^<+Gl[;%$s_gU 흎 -? 7,?o4@^>HEXK^%9t!pCnTM80q+B޻jO-!6S52qS +8D2Z, h[I/hxq1W!jHSŁgG7֍{GѮO}oF+18,njNX"FQ|M"e:ӱނ0߂) I6c›M/F`H(\[T%V+g680,R6B:Xhk.襑vA?Uq CAqe;հiύ 0>wR##O]?wHMhWTRۢK(~GN@a:5i/|mĴU 3E؂QkzY?ЏXɔ+M0ʀcob=IqX\*.hyZb2*EwZIڡ@p*&4o(MhFWcol, D&o'Zlj叼Xlƿbmz sh9NuG0O{4ǧGtP0)ˁ5]X(٥xpy@%ke6K&:7 mB&-"x²\ѹf/q367~Dޔ[E$*00V-de)i"@k} endstream endobj 612 0 obj << /Length1 721 /Length2 4672 /Length3 0 /Length 5264 /Filter /FlateDecode >> stream xmrg4ju :ѣ D%.E13 3ѣN"D'щ5DF^7]Zz>쳟˥A!0HDT`n `P<V2`pb 2^ `@D!c ȹ*➋`+\7"=`tBTʹ @F`N6NH@ CqA- p'0h8oM8?Ю,Z-A t4x5â>_//u'!p$ A!dM m<?wt-w p f?wrCQ t1p 0YP_z9 $N醀#VB- ]O?ڏcN;z?<50 ⯽bP? \""X7Oa#i|žc4׻9$ #d |r o Y {igKX /(lok} (V{"B-XOΞuZjuӘ'OM{$ަ,}'OίmE3;1|KyzI!TB3`eda0$3;6/3?=KqrytnEGu2rHtn%MbԈpsڧ BJ ;`e`FX(8WD"Q/]*\ұaRƨoV@~CM…bԙe3'3'>]}TJT!{QyŦr؞{ } 2%.Evpz#J, Jc9u}-*;\pf4ѫ&wϯ,3o;!@ LGl** 7$WWpYQ5Ϛ5# o9-ͰEq?sHf =R=]q'b."_{88  8ixxs=e26R>-MԜy$l$Hr*ReK\w:(_``M:ǦBԲmhR@NP >ѝU%' 13atLjgt4O ")<u@VoYA38IG 4_?)o~[u.ᅬpLw$,ttQ[ \6Qb})Ŏ72K@w>T8~5,N乁c-Tlv#$I2<-fJLZ摳lru^Pd<=.m1MMf+km(=[3/71,(m}!\.·ڔe=D{ωM^ E2 !w/3+H6= M4A'Z,Dƞi*s\F. ONޜՍ 6 ۹,W!#%Xfo߷90 )!Us*@>i}ޟ|Gv-z C-d9Du1N,tA po%ǞMݩvIeʾ&Ĵ6flVk;;v^-YlM.#&l^D3 KYOhlu9ZM:IQtf\jwwŶLaG|-;+qm@٧ N4 8$ZTcg3-KVn*?CmY;S^cyס8'"R\R.E(/^,j&Ny[뙧}x0Q;>vdJKo7f>!ʏs5hr\TesnX͈S)lY,W%!%?b:I9;D>b60*/꘤p&8y\/+5D 8ǒܚsϩRXKIHdݢxN m& V}ih6{͎Q z|yń'<3reh;Xy3E ="A`.jbZ_+2f%vI^ف7Ҥz3q|Po_-g畈 eWGߚ&PJ/$/32pDqDwu&:`O#4) =lp7X\~\m+r-]hQ"eG>xTh "#Ud5i\*!' xAE@}oU4gnş5Y,tl:/IZo8io'"v){gdXߟ;ٺE+u7{</&Uiѝ*v|0l (kN1S#k>w?{Y9Ay|'?8*Yf dW(jP ]~:e!=0iټ౱]PEf-|ѝ6%~R)'ryhz`v,z5bphѵ1[$1ʪ{Jb~Կ s;_<9|9t*ʝX|Jy~>M۩^L(ݡ ֣KHڪzԴDjt³ޘy&m=t9+r[lS3΄QDgy+3f^x_hiޠdd357hm Oڻ;=F!}7;\+9n"jqK5T灁?"(l ,A]Dn,,fhaP)Feɻ3o52i@{;H8dg%lo VUÜ{#gZ#K 2f}{UZIݴzEW1M;7I^_w󱛍^1cŐ=!m endstream endobj 518 0 obj << /Type /ObjStm /N 100 /First 921 /Length 5941 /Filter /FlateDecode >> stream xE8U.Z`7F6@A8"X5hcM,X+-V> VI$,BJ@)[`Z@d<(xb@S[@K !(,KZ$ d:p(aAs82+zx`8G( %,V2X<!6 X(7H#2:jVPG'RPdɴِ8~+Ah*Z!ea3I8HESڢA2m={U(>[(?DDo}?zΆzʵnXM Eu-XZ-փh`a\ĵ`u -ׄ2-׃U!ܚ,Vƶܚ,VBb&]liUk*bAIYQP]}zyLOi%8|5\ O0Un4<&|b(O^a/VVT'cPoGxp7dt;g4NKQ F¦G0~FL&eL"}-7#Ro:Ո 9<~lUP@s"Z2 .&0dy-= Iw/ӣ}f2GY~5ZQYMVչ+{Gjm"hn:F.pʠ+Rhh9Mtf_ _ǭ~w8n]^˫n(B >yܚ  J:2*$Sk|X S/$O 7Ӓܔ{FLTWQh[]mN*0OCnaZCfOmg--Eཀlxr`P[๭ E@Du,fc׃zF50KXiyC}ocx A * -x Mu NF%[F1&zUEjH[,U\Y+ ݫD݇7BzKlIphՐ4Q0R*j3t<{a3gHt` !1 A~yc@h7e6 aIJ@k<$ D@ŸDij D?D"SriG\0DprK%23/=36JXQo^k[.Éhғ!E/5Ʈ`-Ah 2YIIPi݀ Why9Ye +sb/l )R=ЖA !ނH*jwv`R"iJ2/$c+ri}23D\9u'Z6 l!! }2%n+m"6F+0A1;C&QaOJ0j8Ty9O9Ã0@w$P"Z5K[VKxnȇ@2E*9IF""$6hIj!Xj$gΒiGE԰^#V@ I _?ۀI}~絆N_oW*U {:ig;T\ְWVΐJa94.qп$OBuANfoX'`X3ѯt5%W )`]{LJɒC%>C1TD4 rq "]4u0"ApXN=qZNgnTx,-8ų!m>V Tj:LGa=w6$>pɩOJaj]NMëAgg B8D>YIitS$N E)f* UQ*҈JIl'9p 1SD 9{`(<t" !(vdٛKBw1([-"Xn|W*:*AȆ]]L%CT YlhLXGtǠ?I8R_T:-:̥(!(B#HH@jJ`75.wOSCS dN)sѯp= ^RtvOΥ5#2),ΥR sUB(Zπ7(̧[u`䘘Ioe#ǣݷu4fV\9 x7yEᧃ/蠋t`#4Ϙļir8Ci2Flhtͩ-3[]}}K-dͱ$=z9}]GSV  )[*OΥVDHg>G$BG ƒNDA "4A:qt[]sBCIc.HkKo atr܂6Bp["m3gT! n3AO悘6EGHt "HڜϜ9\wyg;Kd%hoư R|K!KnX-kFHqo*Ѹ>Mi()ؒI%Yt!ӛ^PP#=&"&PFljK:ʻ ALJꃢJpb)!*/إU<95rsWLjcL`R#:şZ&["e_ uCc q=(m8r)f]n$}$7,X?܋~__Fqٵ2<mAw҅t`btk cBg$:$A:R/ٵ\_`ķ3ߴox~#0o )ңR"lK7V=|Uuo&n8 MI׽_?T:Vy:`p<,ߔoˣ<)?,;eU^7M/ײWޖrPU9,GWIGW5q?;Tn&ɛ#IΚhb] [ϓDF)t&'YIU܍~(T`@ͨP{cE؄"0ETE4RDR)]>/_/bIfޑ$)U;My E[W-EL$JjtoL-nFtUuëׯ6 «V>7I#d6AhۃB(6 ;''99ay@fNrubpm^U9Ѩ!fDȰ#Հɑ2GbԾZMzd{}=}!,5 Ew!=-tAr鎇rt/=ZL 7ï^=y~Du]&&}4)U#R<)˃9sV@ڽ E5i]ee/Q՞TZ#ڷv٫cNwڣItjeսQ=pw~tQ7oO} ەt>]Dd@nb{;/}+'B,{i䈥x`X$Dni87;56#M5k<89ꠑCݦ`hFv''jMgy(!%𱦄RZhge֢=Fq}Ӆ;{Nx^ۋj_c򎗬Cv/ld(_qL'2FbH: "'Gb.*q#pqsz2s"!nmOpF5g4ȻxvR6lۻޤ;LZQy o^?{({W1_x'5s9 )i6t)ٶNykO&]apԽH}~ vҜ;e(/FC{9Yƾ'˄L4NlP?~-ҸO3o2>M2uO}۷C~g΋<*.^Node^Ŝ hOh B15r0%dt>8?C X·qsQZT,&7QqwtyTuV? gk:Oj:LsΌ3w^5e_:5սM_- l2Oxi+][s_7\+[A;=Hc}v&7cEٕo\>4ͮ//ήws y g΋ۥc6;Jtr7||xqr8tzq_;17n6}8sav$u3gӛ_Cٗ ] a& endstream endobj 652 0 obj << /Producer (pdfTeX-1.40.20) /Author(Alexandros Karatzoglou, Alex Smola, Kurt Hornik)/Title(kernlab - An S4 Package for Kernel Methods in R)/Subject()/Creator(LaTeX with hyperref)/Keywords(kernel methods, support vector machines, quadratic programming, ranking, clustering, S4, R) /CreationDate (D:20191112073620+01'00') /ModDate (D:20191112073620+01'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.20 (TeX Live 2019/Debian) kpathsea version 6.3.1) >> endobj 614 0 obj << /Type /ObjStm /N 83 /First 726 /Length 3149 /Filter /FlateDecode >> stream xڽZmSH_yhkk$! wA1 h1W=3#X Vw?O I(JN&iBIx**E" D1,IE0*"1(c W\r 6KJA8 IbiB}B(TQ304Ex&F xfJP.R"E aTL$ᔒDCbB܎ <, @&\q0Rz= r,#@!@d$L S -DS(7Uz?5m{wpu]5Dr/@w<>DUtOae;[.8y9ۓë{V!6t*z1kĎT\So@{Yw |S]m>s9_F8bUwp{zʵקg z8pݫ؀no/!B> ^s]&]9Idz~}^6;)ɿOgf6<Ɍ@0{6ucp%|؂nCb·=r+c!C@SfXfZЏiL[Ӝhn fZ$ub ^dLY)[Ke[.w >nڣ>n&M;7d=7 ݨI XU*Q:⦣XP,}UbQ,(>*B( bXtOP,:Ebc)"UV u&v]_?QGO~G2ߜd><7fjw,؀|!uSuc-#]L;"r~_66S5T[5QOWZa#febn;ji<1kdsZ6WqVpzuY;vi(Fmb [iT6+_ܻ+`r0 J9JŪTTJ%JժT4] NO} }UҠkaŤ)95uuPW;E=M-e&9DyE>9;A:ٯfrǾYϋ.\34A[XKe2Kw.Pi LiЧ@ [,GwL|b|1a;yAt*_;zM< li|kf20Hr>Y>y#& 1i5MitR=ͧEcL'մX<` S-JtE Ţgh3IX&EI2eG |as>:y=eWfůbZHnmQ`*guwgsubȼx)jޤ&|T?3B•1sQ/~Y>MJrcZuTԨCpK#8 E&oLz̀b[, }c$r{TM #_aΜVdV5*Q̘N:.7lb}uUK%?wDY񭨷gh~whΧ_Kׅl>LŲ\m˦uskjXǰijŶ+#rP-pbGEi,o[Aziq2Le9ümLc4|gwE(]TBQ94CZ ~\i?-Z 1LQ?!WDլMl\C[KXcC*p³דàiG88'>ͬ%(Hla~h'bjF{leV H{ꋱ'H Iz- .Eqv4 C>تnB9Nn>OՊ{!R}9r#"Zr$IP,lm6DLc*8a` g"[  X %phɐyD ,EyXA u$qMp$j"يP"ܜ €92sFrtٿ+ܸTJaܨWlU.*;7*7v:'6Nǖې{aA_u_:_v8XʝwDsr-Fn8 *2e;43%\ n+7HU8>[Xs?oiu?Ey9Kωfg#W'(bDdݏ " endstream endobj 653 0 obj << /Type /XRef /Index [0 654] /Size 654 /W [1 3 1] /Root 651 0 R /Info 652 0 R /ID [<0806321E0987FA9E31DA9C4A7D4BDE83> <0806321E0987FA9E31DA9C4A7D4BDE83>] /Length 1550 /Filter /FlateDecode >> stream x%KlUU:w_B[ (ҖG[ mPZ^m!wHMHeb1ՁYFFƨqBP# 1hɗ9}>&I&IXҘI$-'2SDE ;Fk'ʁm`\vP&JE(2PNn/#4+@%ZhUT$wf z+Ԓ;@f'w{@h͓k4b:H.KN Xh;LN䴒#WOSW)T#,3,8.~028.!p 0.Q $>sk|*%ŀ9DoD3^-4OP% M: pL)0 f `Y%w5k<9 06M'D gU.Y pwȱ݋=xf%wN0v8r\?$kA%y'èپagP_p|q:Kzo/^>`8:{O 7[2fO]:*~̕ݒe88靠 OӠuu۩SiyvAzΚ#}M\OE0fdMOS<*%3`́P͐4̞mׅ@ `\o'T`EieUKkuu`bUYvPf黽lsUMlYQ{b W[F>D;, Fk|c@\.KW>}DGi,G}/)J״cꦽ7MFBvsUZz^"b7H"ŋSਥW4aOMZ)<}hڲXv]Ѭe_,UyZhh®>EK>St¥ۊ-HE % |hr] E{=Ekd@Znf?@2f:1fx/<>t љUahԗx 0 N0 lEd}",u endstream endobj startxref 505596 %%EOF kernlab/inst/doc/kernlab.R0000644000175100001440000001050513562451344015201 0ustar hornikusers### R code from vignette source 'kernlab.Rnw' ################################################### ### code chunk number 1: preliminaries ################################################### library(kernlab) options(width = 70) ################################################### ### code chunk number 2: rbf1 ################################################### ## create a RBF kernel function with sigma hyper-parameter 0.05 rbf <- rbfdot(sigma = 0.05) rbf ## create two random feature vectors x <- rnorm(10) y <- rnorm(10) ## compute dot product between x,y rbf(x, y) ################################################### ### code chunk number 3: kernelMatrix ################################################### ## create a RBF kernel function with sigma hyper-parameter 0.05 poly <- polydot(degree=2) ## create artificial data set x <- matrix(rnorm(60), 6, 10) y <- matrix(rnorm(40), 4, 10) ## compute kernel matrix kx <- kernelMatrix(poly, x) kxy <- kernelMatrix(poly, x, y) ################################################### ### code chunk number 4: ksvm ################################################### ## simple example using the promotergene data set data(promotergene) ## create test and training set tindex <- sample(1:dim(promotergene)[1],5) genetrain <- promotergene[-tindex, ] genetest <- promotergene[tindex,] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot",kpar="automatic",C=60,cross=3,prob.model=TRUE) gene predict(gene, genetest) predict(gene, genetest, type="probabilities") ################################################### ### code chunk number 5: kernlab.Rnw:629-635 ################################################### set.seed(123) x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) ################################################### ### code chunk number 6: rvm ################################################### x <- seq(-20, 20, 0.5) y <- sin(x)/x + rnorm(81, sd = 0.03) y[41] <- 1 ################################################### ### code chunk number 7: rvm2 ################################################### rvmm <- rvm(x, y,kernel="rbfdot",kpar=list(sigma=0.1)) rvmm ytest <- predict(rvmm, x) ################################################### ### code chunk number 8: kernlab.Rnw:686-689 ################################################### plot(x, y, cex=0.5) lines(x, ytest, col = "red") points(x[RVindex(rvmm)],y[RVindex(rvmm)],pch=21) ################################################### ### code chunk number 9: ranking ################################################### data(spirals) ran <- spirals[rowSums(abs(spirals) < 0.55) == 2,] ranked <- ranking(ran, 54, kernel = "rbfdot", kpar = list(sigma = 100), edgegraph = TRUE) ranked[54, 2] <- max(ranked[-54, 2]) c<-1:86 op <- par(mfrow = c(1, 2),pty="s") plot(ran) plot(ran, cex=c[ranked[,3]]/40) ################################################### ### code chunk number 10: onlearn ################################################### ## create toy data set x <- rbind(matrix(rnorm(90),,2),matrix(rnorm(90)+3,,2)) y <- matrix(c(rep(1,45),rep(-1,45)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2),type="classification") ind <- sample(1:90,90) ## learn one data point at the time for(i in ind) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) ################################################### ### code chunk number 11: kernlab.Rnw:894-897 ################################################### data(spirals) sc <- specc(spirals, centers=2) plot(spirals, pch=(23 - 2*sc)) ################################################### ### code chunk number 12: kpca ################################################### data(spam) train <- sample(1:dim(spam)[1],400) kpc <- kpca(~.,data=spam[train,-58],kernel="rbfdot",kpar=list(sigma=0.001),features=2) kpcv <- pcv(kpc) plot(rotated(kpc),col=as.integer(spam[train,58]),xlab="1st Principal Component",ylab="2nd Principal Component") ################################################### ### code chunk number 13: kfa ################################################### data(promotergene) f <- kfa(~.,data=promotergene,features=2,kernel="rbfdot",kpar=list(sigma=0.013)) plot(predict(f,promotergene),col=as.numeric(promotergene[,1]),xlab="1st Feature",ylab="2nd Feature") kernlab/inst/doc/kernlab.Rnw0000644000175100001440000014230512055335060015542 0ustar hornikusers\documentclass{A} \usepackage{amsfonts,thumbpdf,alltt} \newenvironment{smallverbatim}{\small\verbatim}{\endverbatim} \newenvironment{smallexample}{\begin{alltt}\small}{\end{alltt}} \SweaveOpts{engine=R,eps=FALSE} %\VignetteIndexEntry{kernlab - An S4 Package for Kernel Methods in R} %\VignetteDepends{kernlab} %\VignetteKeywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, S4, R} %\VignettePackage{kernlab} <>= library(kernlab) options(width = 70) @ \title{\pkg{kernlab} -- An \proglang{S4} Package for Kernel Methods in \proglang{R}} \Plaintitle{kernlab - An S4 Package for Kernel Methods in R} \author{Alexandros Karatzoglou\\Technische Universit\"at Wien \And Alex Smola\\Australian National University, NICTA \And Kurt Hornik\\Wirtschaftsuniversit\"at Wien } \Plainauthor{Alexandros Karatzoglou, Alex Smola, Kurt Hornik} \Abstract{ \pkg{kernlab} is an extensible package for kernel-based machine learning methods in \proglang{R}. It takes advantage of \proglang{R}'s new \proglang{S4} object model and provides a framework for creating and using kernel-based algorithms. The package contains dot product primitives (kernels), implementations of support vector machines and the relevance vector machine, Gaussian processes, a ranking algorithm, kernel PCA, kernel CCA, kernel feature analysis, online kernel methods and a spectral clustering algorithm. Moreover it provides a general purpose quadratic programming solver, and an incomplete Cholesky decomposition method. } \Keywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, \proglang{S4}, \proglang{R}} \Plainkeywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, S4, R} \begin{document} \section{Introduction} Machine learning is all about extracting structure from data, but it is often difficult to solve problems like classification, regression and clustering in the space in which the underlying observations have been made. Kernel-based learning methods use an implicit mapping of the input data into a high dimensional feature space defined by a kernel function, i.e., a function returning the inner product $ \langle \Phi(x),\Phi(y) \rangle$ between the images of two data points $x, y$ in the feature space. The learning then takes place in the feature space, provided the learning algorithm can be entirely rewritten so that the data points only appear inside dot products with other points. This is often referred to as the ``kernel trick'' \citep{kernlab:Schoelkopf+Smola:2002}. More precisely, if a projection $\Phi: X \rightarrow H$ is used, the dot product $\langle\Phi(x),\Phi(y)\rangle$ can be represented by a kernel function~$k$ \begin{equation} \label{eq:kernel} k(x,y)= \langle \Phi(x),\Phi(y) \rangle, \end{equation} which is computationally simpler than explicitly projecting $x$ and $y$ into the feature space~$H$. One interesting property of kernel-based systems is that, once a valid kernel function has been selected, one can practically work in spaces of any dimension without paying any computational cost, since feature mapping is never effectively performed. In fact, one does not even need to know which features are being used. Another advantage is the that one can design and use a kernel for a particular problem that could be applied directly to the data without the need for a feature extraction process. This is particularly important in problems where a lot of structure of the data is lost by the feature extraction process (e.g., text processing). The inherent modularity of kernel-based learning methods allows one to use any valid kernel on a kernel-based algorithm. \subsection{Software review} The most prominent kernel based learning algorithm is without doubt the support vector machine (SVM), so the existence of many support vector machine packages comes as little surprise. Most of the existing SVM software is written in \proglang{C} or \proglang{C++}, e.g.\ the award winning \pkg{libsvm}\footnote{\url{http://www.csie.ntu.edu.tw/~cjlin/libsvm/}} \citep{kernlab:Chang+Lin:2001}, \pkg{SVMlight}\footnote{\url{http://svmlight.joachims.org}} \citep{kernlab:joachim:1999}, \pkg{SVMTorch}\footnote{\url{http://www.torch.ch}}, Royal Holloway Support Vector Machines\footnote{\url{http://svm.dcs.rhbnc.ac.uk}}, \pkg{mySVM}\footnote{\url{http://www-ai.cs.uni-dortmund.de/SOFTWARE/MYSVM/index.eng.html}}, and \pkg{M-SVM}\footnote{\url{http://www.loria.fr/~guermeur/}} with many packages providing interfaces to \proglang{MATLAB} (such as \pkg{libsvm}), and even some native \proglang{MATLAB} toolboxes\footnote{ \url{http://www.isis.ecs.soton.ac.uk/resources/svminfo/}}\,\footnote{ \url{http://asi.insa-rouen.fr/~arakotom/toolbox/index}}\,\footnote{ \url{http://www.cis.tugraz.at/igi/aschwaig/software.html}}. Putting SVM specific software aside and considering the abundance of other kernel-based algorithms published nowadays, there is little software available implementing a wider range of kernel methods with some exceptions like the \pkg{Spider}\footnote{\url{http://www.kyb.tuebingen.mpg.de/bs/people/spider/}} software which provides a \proglang{MATLAB} interface to various \proglang{C}/\proglang{C++} SVM libraries and \proglang{MATLAB} implementations of various kernel-based algorithms, \pkg{Torch} \footnote{\url{http://www.torch.ch}} which also includes more traditional machine learning algorithms, and the occasional \proglang{MATLAB} or \proglang{C} program found on a personal web page where an author includes code from a published paper. \subsection[R software]{\proglang{R} software} The \proglang{R} package \pkg{e1071} offers an interface to the award winning \pkg{libsvm} \citep{kernlab:Chang+Lin:2001}, a very efficient SVM implementation. \pkg{libsvm} provides a robust and fast SVM implementation and produces state of the art results on most classification and regression problems \citep{kernlab:Meyer+Leisch+Hornik:2003}. The \proglang{R} interface provided in \pkg{e1071} adds all standard \proglang{R} functionality like object orientation and formula interfaces to \pkg{libsvm}. Another SVM related \proglang{R} package which was made recently available is \pkg{klaR} \citep{kernlab:Roever:2004} which includes an interface to \pkg{SVMlight}, a popular SVM implementation along with other classification tools like Regularized Discriminant Analysis. However, most of the \pkg{libsvm} and \pkg{klaR} SVM code is in \proglang{C++}. Therefore, if one would like to extend or enhance the code with e.g.\ new kernels or different optimizers, one would have to modify the core \proglang{C++} code. \section[kernlab]{\pkg{kernlab}} \pkg{kernlab} aims to provide the \proglang{R} user with basic kernel functionality (e.g., like computing a kernel matrix using a particular kernel), along with some utility functions commonly used in kernel-based methods like a quadratic programming solver, and modern kernel-based algorithms based on the functionality that the package provides. Taking advantage of the inherent modularity of kernel-based methods, \pkg{kernlab} aims to allow the user to switch between kernels on an existing algorithm and even create and use own kernel functions for the kernel methods provided in the package. \subsection[S4 objects]{\proglang{S4} objects} \pkg{kernlab} uses \proglang{R}'s new object model described in ``Programming with Data'' \citep{kernlab:Chambers:1998} which is known as the \proglang{S4} class system and is implemented in the \pkg{methods} package. In contrast with the older \proglang{S3} model for objects in \proglang{R}, classes, slots, and methods relationships must be declared explicitly when using the \proglang{S4} system. The number and types of slots in an instance of a class have to be established at the time the class is defined. The objects from the class are validated against this definition and have to comply to it at any time. \proglang{S4} also requires formal declarations of methods, unlike the informal system of using function names to identify a certain method in \proglang{S3}. An \proglang{S4} method is declared by a call to \code{setMethod} along with the name and a ``signature'' of the arguments. The signature is used to identify the classes of one or more arguments of the method. Generic functions can be declared using the \code{setGeneric} function. Although such formal declarations require package authors to be more disciplined than when using the informal \proglang{S3} classes, they provide assurance that each object in a class has the required slots and that the names and classes of data in the slots are consistent. An example of a class used in \pkg{kernlab} is shown below. Typically, in a return object we want to include information on the result of the method along with additional information and parameters. Usually \pkg{kernlab}'s classes include slots for the kernel function used and the results and additional useful information. \begin{smallexample} setClass("specc", representation("vector", # the vector containing the cluster centers="matrix", # the cluster centers size="vector", # size of each cluster kernelf="function", # kernel function used withinss = "vector"), # within cluster sum of squares prototype = structure(.Data = vector(), centers = matrix(), size = matrix(), kernelf = ls, withinss = vector())) \end{smallexample} Accessor and assignment function are defined and used to access the content of each slot which can be also accessed with the \verb|@| operator. \subsection{Namespace} Namespaces were introduced in \proglang{R} 1.7.0 and provide a means for packages to control the way global variables and methods are being made available. Due to the number of assignment and accessor function involved, a namespace is used to control the methods which are being made visible outside the package. Since \proglang{S4} methods are being used, the \pkg{kernlab} namespace also imports methods and variables from the \pkg{methods} package. \subsection{Data} The \pkg{kernlab} package also includes data set which will be used to illustrate the methods included in the package. The \code{spam} data set \citep{kernlab:Hastie:2001} set collected at Hewlett-Packard Labs contains data on 2788 and 1813 e-mails classified as non-spam and spam, respectively. The 57 variables of each data vector indicate the frequency of certain words and characters in the e-mail. Another data set included in \pkg{kernlab}, the \code{income} data set \citep{kernlab:Hastie:2001}, is taken by a marketing survey in the San Francisco Bay concerning the income of shopping mall customers. It consists of 14 demographic attributes (nominal and ordinal variables) including the income and 8993 observations. The \code{ticdata} data set \citep{kernlab:Putten:2000} was used in the 2000 Coil Challenge and contains information on customers of an insurance company. The data consists of 86 variables and includes product usage data and socio-demographic data derived from zip area codes. The data was collected to answer the following question: Can you predict who would be interested in buying a caravan insurance policy and give an explanation why? The \code{promotergene} is a data set of E. Coli promoter gene sequences (DNA) with 106 observations and 58 variables available at the UCI Machine Learning repository. Promoters have a region where a protein (RNA polymerase) must make contact and the helical DNA sequence must have a valid conformation so that the two pieces of the contact region spatially align. The data contains DNA sequences of promoters and non-promoters. The \code{spirals} data set was created by the \code{mlbench.spirals} function in the \pkg{mlbench} package \citep{kernlab:Leisch+Dimitriadou}. This two-dimensional data set with 300 data points consists of two spirals where Gaussian noise is added to each data point. \subsection{Kernels} A kernel function~$k$ calculates the inner product of two vectors $x$, $x'$ in a given feature mapping $\Phi: X \rightarrow H$. The notion of a kernel is obviously central in the making of any kernel-based algorithm and consequently also in any software package containing kernel-based methods. Kernels in \pkg{kernlab} are \proglang{S4} objects of class \code{kernel} extending the \code{function} class with one additional slot containing a list with the kernel hyper-parameters. Package \pkg{kernlab} includes 7 different kernel classes which all contain the class \code{kernel} and are used to implement the existing kernels. These classes are used in the function dispatch mechanism of the kernel utility functions described below. Existing kernel functions are initialized by ``creator'' functions. All kernel functions take two feature vectors as parameters and return the scalar dot product of the vectors. An example of the functionality of a kernel in \pkg{kernlab}: <>= ## create a RBF kernel function with sigma hyper-parameter 0.05 rbf <- rbfdot(sigma = 0.05) rbf ## create two random feature vectors x <- rnorm(10) y <- rnorm(10) ## compute dot product between x,y rbf(x, y) @ The package includes implementations of the following kernels: \begin{itemize} \item the linear \code{vanilladot} kernel implements the simplest of all kernel functions \begin{equation} k(x,x') = \langle x, x' \rangle \end{equation} which is useful specially when dealing with large sparse data vectors~$x$ as is usually the case in text categorization. \item the Gaussian radial basis function \code{rbfdot} \begin{equation} k(x,x') = \exp(-\sigma \|x - x'\|^2) \end{equation} which is a general purpose kernel and is typically used when no further prior knowledge is available about the data. \item the polynomial kernel \code{polydot} \begin{equation} k(x, x') = \left( \mathrm{scale} \cdot \langle x, x' \rangle + \mathrm{offset} \right)^\mathrm{degree}. \end{equation} which is used in classification of images. \item the hyperbolic tangent kernel \code{tanhdot} \begin{equation} k(x, x') = \tanh \left( \mathrm{scale} \cdot \langle x, x' \rangle + \mathrm{offset} \right) \end{equation} which is mainly used as a proxy for neural networks. \item the Bessel function of the first kind kernel \code{besseldot} \begin{equation} k(x, x') = \frac{\mathrm{Bessel}_{(\nu+1)}^n(\sigma \|x - x'\|)} {(\|x-x'\|)^{-n(\nu+1)}}. \end{equation} is a general purpose kernel and is typically used when no further prior knowledge is available and mainly popular in the Gaussian process community. \item the Laplace radial basis kernel \code{laplacedot} \begin{equation} k(x, x') = \exp(-\sigma \|x - x'\|) \end{equation} which is a general purpose kernel and is typically used when no further prior knowledge is available. \item the ANOVA radial basis kernel \code{anovadot} performs well in multidimensional regression problems \begin{equation} k(x, x') = \left(\sum_{k=1}^{n}\exp(-\sigma(x^k-{x'}^k)^2)\right)^{d} \end{equation} where $x^k$ is the $k$th component of $x$. \end{itemize} \subsection{Kernel utility methods} The package also includes methods for computing commonly used kernel expressions (e.g., the Gram matrix). These methods are written in such a way that they take functions (i.e., kernels) and matrices (i.e., vectors of patterns) as arguments. These can be either the kernel functions already included in \pkg{kernlab} or any other function implementing a valid dot product (taking two vector arguments and returning a scalar). In case one of the already implemented kernels is used, the function calls a vectorized implementation of the corresponding function. Moreover, in the case of symmetric matrices (e.g., the dot product matrix of a Support Vector Machine) they only require one argument rather than having to pass the same matrix twice (for rows and columns). The computations for the kernels already available in the package are vectorized whenever possible which guarantees good performance and acceptable memory requirements. Users can define their own kernel by creating a function which takes two vectors as arguments (the data points) and returns a scalar (the dot product). This function can then be based as an argument to the kernel utility methods. For a user defined kernel the dispatch mechanism calls a generic method implementation which calculates the expression by passing the kernel function through a pair of \code{for} loops. The kernel methods included are: \begin{description} \item[\code{kernelMatrix}] This is the most commonly used function. It computes $k(x, x')$, i.e., it computes the matrix $K$ where $K_{ij} = k(x_i, x_j)$ and $x$ is a \emph{row} vector. In particular, \begin{verbatim} K <- kernelMatrix(kernel, x) \end{verbatim} computes the matrix $K_{ij} = k(x_i, x_j)$ where the $x_i$ are the columns of $X$ and \begin{verbatim} K <- kernelMatrix(kernel, x1, x2) \end{verbatim} computes the matrix $K_{ij} = k(x1_i, x2_j)$. \item[\code{kernelFast}] This method is different to \code{kernelMatrix} for \code{rbfdot}, \code{besseldot}, and the \code{laplacedot} kernel, which are all RBF kernels. It is identical to \code{kernelMatrix}, except that it also requires the squared norm of the first argument as additional input. It is mainly used in kernel algorithms, where columns of the kernel matrix are computed per invocation. In these cases, evaluating the norm of each column-entry as it is done on a \code{kernelMatrix} invocation on an RBF kernel, over and over again would cause significant computational overhead. Its invocation is via \begin{verbatim} K = kernelFast(kernel, x1, x2, a) \end{verbatim} Here $a$ is a vector containing the squared norms of $x1$. \item[\code{kernelMult}] is a convenient way of computing kernel expansions. It returns the vector $f = (f(x_1), \dots, f(x_m))$ where \begin{equation} f(x_i) = \sum_{j=1}^{m} k(x_i, x_j) \alpha_j, \mbox{~hence~} f = K \alpha. \end{equation} The need for such a function arises from the fact that $K$ may sometimes be larger than the memory available. Therefore, it is convenient to compute $K$ only in stripes and discard the latter after the corresponding part of $K \alpha$ has been computed. The parameter \code{blocksize} determines the number of rows in the stripes. In particular, \begin{verbatim} f <- kernelMult(kernel, x, alpha) \end{verbatim} computes $f_i = \sum_{j=1}^m k(x_i, x_j) \alpha_j$ and \begin{verbatim} f <- kernelMult(kernel, x1, x2, alpha) \end{verbatim} computes $f_i = \sum_{j=1}^m k(x1_i, x2_j) \alpha_j$. \item[\code{kernelPol}] is a method very similar to \code{kernelMatrix} with the only difference that rather than computing $K_{ij} = k(x_i, x_j)$ it computes $K_{ij} = y_i y_j k(x_i, x_j)$. This means that \begin{verbatim} K <- kernelPol(kernel, x, y) \end{verbatim} computes the matrix $K_{ij} = y_i y_j k(x_i, x_j)$ where the $x_i$ are the columns of $x$ and $y_i$ are elements of the vector~$y$. Moreover, \begin{verbatim} K <- kernelPol(kernel, x1, x2, y1, y2) \end{verbatim} computes the matrix $K_{ij} = y1_i y2_j k(x1_i, x2_j)$. Both \code{x1} and \code{x2} may be matrices and \code{y1} and \code{y2} vectors. \end{description} An example using these functions : <>= ## create a RBF kernel function with sigma hyper-parameter 0.05 poly <- polydot(degree=2) ## create artificial data set x <- matrix(rnorm(60), 6, 10) y <- matrix(rnorm(40), 4, 10) ## compute kernel matrix kx <- kernelMatrix(poly, x) kxy <- kernelMatrix(poly, x, y) @ \section{Kernel methods} Providing a solid base for creating kernel-based methods is part of what we are trying to achieve with this package, the other being to provide a wider range of kernel-based methods in \proglang{R}. In the rest of the paper we present the kernel-based methods available in \pkg{kernlab}. All the methods in \pkg{kernlab} can be used with any of the kernels included in the package as well as with any valid user-defined kernel. User defined kernel functions can be passed to existing kernel-methods in the \code{kernel} argument. \subsection{Support vector machine} Support vector machines \citep{kernlab:Vapnik:1998} have gained prominence in the field of machine learning and pattern classification and regression. The solutions to classification and regression problems sought by kernel-based algorithms such as the SVM are linear functions in the feature space: \begin{equation} f(x) = w^\top \Phi(x) \end{equation} for some weight vector $w \in F$. The kernel trick can be exploited in this whenever the weight vector~$w$ can be expressed as a linear combination of the training points, $w = \sum_{i=1}^{n} \alpha_i \Phi(x_i)$, implying that $f$ can be written as \begin{equation} f(x) = \sum_{i=1}^{n}\alpha_i k(x_i, x) \end{equation} A very important issue that arises is that of choosing a kernel~$k$ for a given learning task. Intuitively, we wish to choose a kernel that induces the ``right'' metric in the space. Support Vector Machines choose a function $f$ that is linear in the feature space by optimizing some criterion over the sample. In the case of the 2-norm Soft Margin classification the optimization problem takes the form: \begin{eqnarray} \nonumber \mathrm{minimize} && t(w,\xi) = \frac{1}{2}{\|w\|}^2+\frac{C}{m}\sum_{i=1}^{m}\xi_i \\ \mbox{subject to~} && y_i ( \langle x_i , w \rangle +b ) \geq 1- \xi_i \qquad (i=1,\dots,m)\\ \nonumber && \xi_i \ge 0 \qquad (i=1,\dots, m) \end{eqnarray} Based on similar methodology, SVMs deal with the problem of novelty detection (or one class classification) and regression. \pkg{kernlab}'s implementation of support vector machines, \code{ksvm}, is based on the optimizers found in \pkg{bsvm}\footnote{\url{http://www.csie.ntu.edu.tw/~cjlin/bsvm}} \citep{kernlab:Hsu:2002} and \pkg{libsvm} \citep{kernlab:Chang+Lin:2001} which includes a very efficient version of the Sequential Minimization Optimization (SMO). SMO decomposes the SVM Quadratic Problem (QP) without using any numerical QP optimization steps. Instead, it chooses to solve the smallest possible optimization problem involving two elements of $\alpha_i$ because they must obey one linear equality constraint. At every step, SMO chooses two $\alpha_i$ to jointly optimize and finds the optimal values for these $\alpha_i$ analytically, thus avoiding numerical QP optimization, and updates the SVM to reflect the new optimal values. The SVM implementations available in \code{ksvm} include the C-SVM classification algorithm along with the $\nu$-SVM classification formulation which is equivalent to the former but has a more natural ($\nu$) model parameter taking values in $[0,1]$ and is proportional to the fraction of support vectors found in the data set and the training error. For classification problems which include more than two classes (multi-class) a one-against-one or pairwise classification method \citep{kernlab:Knerr:1990, kernlab:Kressel:1999} is used. This method constructs ${k \choose 2}$ classifiers where each one is trained on data from two classes. Prediction is done by voting where each classifier gives a prediction and the class which is predicted more often wins (``Max Wins''). This method has been shown to produce robust results when used with SVMs \citep{kernlab:Hsu2:2002}. Furthermore the \code{ksvm} implementation provides the ability to produce class probabilities as output instead of class labels. This is done by an improved implementation \citep{kernlab:Lin:2001} of Platt's posteriori probabilities \citep{kernlab:Platt:2000} where a sigmoid function \begin{equation} P(y=1\mid f) = \frac{1}{1+ e^{Af+B}} \end{equation} is fitted on the decision values~$f$ of the binary SVM classifiers, $A$ and $B$ are estimated by minimizing the negative log-likelihood function. To extend the class probabilities to the multi-class case, each binary classifiers class probability output is combined by the \code{couple} method which implements methods for combing class probabilities proposed in \citep{kernlab:Wu:2003}. Another approach for multIn order to create a similar probability output for regression, following \cite{kernlab:Weng:2004}, we suppose that the SVM is trained on data from the model \begin{equation} y_i = f(x_i) + \delta_i \end{equation} where $f(x_i)$ is the underlying function and $\delta_i$ is independent and identical distributed random noise. Given a test data $x$ the distribution of $y$ given $x$ and allows one to draw probabilistic inferences about $y$ e.g. one can construct a predictive interval $\Phi = \Phi(x)$ such that $y \in \Phi$ with a certain probability. If $\hat{f}$ is the estimated (predicted) function of the SVM on new data then $\eta = \eta(x) = y - \hat{f}(x)$ is the prediction error and $y \in \Phi$ is equivalent to $\eta \in \Phi $. Empirical observation shows that the distribution of the residuals $\eta$ can be modeled both by a Gaussian and a Laplacian distribution with zero mean. In this implementation the Laplacian with zero mean is used : \begin{equation} p(z) = \frac{1}{2\sigma}e^{-\frac{|z|}{\sigma}} \end{equation} Assuming that $\eta$ are independent the scale parameter $\sigma$ is estimated by maximizing the likelihood. The data for the estimation is produced by a three-fold cross-validation. For the Laplace distribution the maximum likelihood estimate is : \begin{equation} \sigma = \frac{\sum_{i=1}^m|\eta_i|}{m} \end{equation} i-class classification supported by the \code{ksvm} function is the one proposed in \cite{kernlab:Crammer:2000}. This algorithm works by solving a single optimization problem including the data from all classes: \begin{eqnarray} \nonumber \mathrm{minimize} && t(w_n,\xi) = \frac{1}{2}\sum_{n=1}^k{\|w_n\|}^2+\frac{C}{m}\sum_{i=1}^{m}\xi_i \\ \mbox{subject to~} && \langle x_i , w_{y_i} \rangle - \langle x_i , w_{n} \rangle \geq b_i^n - \xi_i \qquad (i=1,\dots,m) \\ \mbox{where} && b_i^n = 1 - \delta_{y_i,n} \end{eqnarray} where the decision function is \begin{equation} \mathrm{argmax}_{m=1,\dots,k} \langle x_i , w_{n} \rangle \end{equation} This optimization problem is solved by a decomposition method proposed in \cite{kernlab:Hsu:2002} where optimal working sets are found (that is, sets of $\alpha_i$ values which have a high probability of being non-zero). The QP sub-problems are then solved by a modified version of the \pkg{TRON}\footnote{\url{http://www-unix.mcs.anl.gov/~more/tron/}} \citep{kernlab:more:1999} optimization software. One-class classification or novelty detection \citep{kernlab:Williamson:1999, kernlab:Tax:1999}, where essentially an SVM detects outliers in a data set, is another algorithm supported by \code{ksvm}. SVM novelty detection works by creating a spherical decision boundary around a set of data points by a set of support vectors describing the spheres boundary. The $\nu$ parameter is used to control the volume of the sphere and consequently the number of outliers found. Again, the value of $\nu$ represents the fraction of outliers found. Furthermore, $\epsilon$-SVM \citep{kernlab:Vapnik2:1995} and $\nu$-SVM \citep{kernlab:Smola1:2000} regression are also available. The problem of model selection is partially addressed by an empirical observation for the popular Gaussian RBF kernel \citep{kernlab:Caputo:2002}, where the optimal values of the hyper-parameter of sigma are shown to lie in between the 0.1 and 0.9 quantile of the $\|x- x'\| $ statistics. The \code{sigest} function uses a sample of the training set to estimate the quantiles and returns a vector containing the values of the quantiles. Pretty much any value within this interval leads to good performance. An example for the \code{ksvm} function is shown below. <>= ## simple example using the promotergene data set data(promotergene) ## create test and training set tindex <- sample(1:dim(promotergene)[1],5) genetrain <- promotergene[-tindex, ] genetest <- promotergene[tindex,] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot",kpar="automatic",C=60,cross=3,prob.model=TRUE) gene predict(gene, genetest) predict(gene, genetest, type="probabilities") @ \begin{figure} \centering <>= set.seed(123) x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) @ \caption{A contour plot of the SVM decision values for a toy binary classification problem using the \code{plot} function} \label{fig:ksvm Plot} \end{figure} \subsection{Relevance vector machine} The relevance vector machine \citep{kernlab:Tipping:2001} is a probabilistic sparse kernel model identical in functional form to the SVM making predictions based on a function of the form \begin{equation} y(x) = \sum_{n=1}^{N} \alpha_n K(\mathbf{x},\mathbf{x}_n) + a_0 \end{equation} where $\alpha_n$ are the model ``weights'' and $K(\cdotp,\cdotp)$ is a kernel function. It adopts a Bayesian approach to learning, by introducing a prior over the weights $\alpha$ \begin{equation} p(\alpha, \beta) = \prod_{i=1}^m N(\beta_i \mid 0 , a_i^{-1}) \mathrm{Gamma}(\beta_i\mid \beta_\beta , \alpha_\beta) \end{equation} governed by a set of hyper-parameters $\beta$, one associated with each weight, whose most probable values are iteratively estimated for the data. Sparsity is achieved because in practice the posterior distribution in many of the weights is sharply peaked around zero. Furthermore, unlike the SVM classifier, the non-zero weights in the RVM are not associated with examples close to the decision boundary, but rather appear to represent ``prototypical'' examples. These examples are termed \emph{relevance vectors}. \pkg{kernlab} currently has an implementation of the RVM based on a type~II maximum likelihood method which can be used for regression. The functions returns an \proglang{S4} object containing the model parameters along with indexes for the relevance vectors and the kernel function and hyper-parameters used. <>= x <- seq(-20, 20, 0.5) y <- sin(x)/x + rnorm(81, sd = 0.03) y[41] <- 1 @ <>= rvmm <- rvm(x, y,kernel="rbfdot",kpar=list(sigma=0.1)) rvmm ytest <- predict(rvmm, x) @ \begin{figure} \centering <>= plot(x, y, cex=0.5) lines(x, ytest, col = "red") points(x[RVindex(rvmm)],y[RVindex(rvmm)],pch=21) @ \caption{Relevance vector regression on data points created by the $sinc(x)$ function, relevance vectors are shown circled.} \label{fig:RVM sigmoid} \end{figure} \subsection{Gaussian processes} Gaussian processes \citep{kernlab:Williams:1995} are based on the ``prior'' assumption that adjacent observations should convey information about each other. In particular, it is assumed that the observed variables are normal, and that the coupling between them takes place by means of the covariance matrix of a normal distribution. Using the kernel matrix as the covariance matrix is a convenient way of extending Bayesian modeling of linear estimators to nonlinear situations. Furthermore it represents the counterpart of the ``kernel trick'' in methods minimizing the regularized risk. For regression estimation we assume that rather than observing $t(x_i)$ we observe $y_i = t(x_i) + \xi_i$ where $\xi_i$ is assumed to be independent Gaussian distributed noise with zero mean. The posterior distribution is given by \begin{equation} p(\mathbf{y}\mid \mathbf{t}) = \left[ \prod_ip(y_i - t(x_i)) \right] \frac{1}{\sqrt{(2\pi)^m \det(K)}} \exp \left(\frac{1}{2}\mathbf{t}^T K^{-1} \mathbf{t} \right) \end{equation} and after substituting $\mathbf{t} = K\mathbf{\alpha}$ and taking logarithms \begin{equation} \ln{p(\mathbf{\alpha} \mid \mathbf{y})} = - \frac{1}{2\sigma^2}\| \mathbf{y} - K \mathbf{\alpha} \|^2 -\frac{1}{2}\mathbf{\alpha}^T K \mathbf{\alpha} +c \end{equation} and maximizing $\ln{p(\mathbf{\alpha} \mid \mathbf{y})}$ for $\mathbf{\alpha}$ to obtain the maximum a posteriori approximation yields \begin{equation} \mathbf{\alpha} = (K + \sigma^2\mathbf{1})^{-1} \mathbf{y} \end{equation} Knowing $\mathbf{\alpha}$ allows for prediction of $y$ at a new location $x$ through $y = K(x,x_i){\mathbf{\alpha}}$. In similar fashion Gaussian processes can be used for classification. \code{gausspr} is the function in \pkg{kernlab} implementing Gaussian processes for classification and regression. \subsection{Ranking} The success of Google has vividly demonstrated the value of a good ranking algorithm in real world problems. \pkg{kernlab} includes a ranking algorithm based on work published in \citep{kernlab:Zhou:2003}. This algorithm exploits the geometric structure of the data in contrast to the more naive approach which uses the Euclidean distances or inner products of the data. Since real world data are usually highly structured, this algorithm should perform better than a simpler approach based on a Euclidean distance measure. First, a weighted network is defined on the data and an authoritative score is assigned to every point. The query points act as source nodes that continually pump their scores to the remaining points via the weighted network, and the remaining points further spread the score to their neighbors. The spreading process is repeated until convergence and the points are ranked according to the scores they received. Suppose we are given a set of data points $X = {x_1, \dots, x_{s}, x_{s+1}, \dots, x_{m}}$ in $\mathbf{R}^n$ where the first $s$ points are the query points and the rest are the points to be ranked. The algorithm works by connecting the two nearest points iteratively until a connected graph $G = (X, E)$ is obtained where $E$ is the set of edges. The affinity matrix $K$ defined e.g.\ by $K_{ij} = \exp(-\sigma\|x_i - x_j \|^2)$ if there is an edge $e(i,j) \in E$ and $0$ for the rest and diagonal elements. The matrix is normalized as $L = D^{-1/2}KD^{-1/2}$ where $D_{ii} = \sum_{j=1}^m K_{ij}$, and \begin{equation} f(t+1) = \alpha Lf(t) + (1 - \alpha)y \end{equation} is iterated until convergence, where $\alpha$ is a parameter in $[0,1)$. The points are then ranked according to their final scores $f_{i}(t_f)$. \pkg{kernlab} includes an \proglang{S4} method implementing the ranking algorithm. The algorithm can be used both with an edge-graph where the structure of the data is taken into account, and without which is equivalent to ranking the data by their distance in the projected space. \begin{figure} \centering <>= data(spirals) ran <- spirals[rowSums(abs(spirals) < 0.55) == 2,] ranked <- ranking(ran, 54, kernel = "rbfdot", kpar = list(sigma = 100), edgegraph = TRUE) ranked[54, 2] <- max(ranked[-54, 2]) c<-1:86 op <- par(mfrow = c(1, 2),pty="s") plot(ran) plot(ran, cex=c[ranked[,3]]/40) @ \caption{The points on the left are ranked according to their similarity to the upper most left point. Points with a higher rank appear bigger. Instead of ranking the points on simple Euclidean distance the structure of the data is recognized and all points on the upper structure are given a higher rank although further away in distance than points in the lower structure.} \label{fig:Ranking} \end{figure} \subsection{Online learning with kernels} The \code{onlearn} function in \pkg{kernlab} implements the online kernel algorithms for classification, novelty detection and regression described in \citep{kernlab:Kivinen:2004}. In batch learning, it is typically assumed that all the examples are immediately available and are drawn independently from some distribution $P$. One natural measure of quality for some $f$ in that case is the expected risk \begin{equation} R[f,P] := E_{(x,y)~P}[l(f(x),y)] \end{equation} Since usually $P$ is unknown a standard approach is to instead minimize the empirical risk \begin{equation} R_{emp}[f,P] := \frac{1}{m}\sum_{t=1}^m l(f(x_t),y_t) \end{equation} Minimizing $R_{emp}[f]$ may lead to overfitting (complex functions that fit well on the training data but do not generalize to unseen data). One way to avoid this is to penalize complex functions by instead minimizing the regularized risk. \begin{equation} R_{reg}[f,S] := R_{reg,\lambda}[f,S] := R_{emp}[f] = \frac{\lambda}{2}\|f\|_{H}^2 \end{equation} where $\lambda > 0$ and $\|f\|_{H} = {\langle f,f \rangle}_{H}^{\frac{1}{2}}$ does indeed measure the complexity of $f$ in a sensible way. The constant $\lambda$ needs to be chosen appropriately for each problem. Since in online learning one is interested in dealing with one example at the time the definition of an instantaneous regularized risk on a single example is needed \begin{equation} R_inst[f,x,y] := R_{inst,\lambda}[f,x,y] := R_{reg,\lambda}[f,((x,y))] \end{equation} The implemented algorithms are classical stochastic gradient descent algorithms performing gradient descent on the instantaneous risk. The general form of the update rule is : \begin{equation} f_{t+1} = f_t - \eta \partial_f R_{inst,\lambda}[f,x_t,y_t]|_{f=f_t} \end{equation} where $f_i \in H$ and $\partial_f$< is short hand for $\partial \ \partial f$ (the gradient with respect to $f$) and $\eta_t > 0$ is the learning rate. Due to the learning taking place in a \textit{reproducing kernel Hilbert space} $H$ the kernel $k$ used has the property $\langle f,k(x,\cdotp)\rangle_H = f(x)$ and therefore \begin{equation} \partial_f l(f(x_t)),y_t) = l'(f(x_t),y_t)k(x_t,\cdotp) \end{equation} where $l'(z,y) := \partial_z l(z,y)$. Since $\partial_f\|f\|_H^2 = 2f$ the update becomes \begin{equation} f_{t+1} := (1 - \eta\lambda)f_t -\eta_t \lambda '( f_t(x_t),y_t)k(x_t,\cdotp) \end{equation} The \code{onlearn} function implements the online learning algorithm for regression, classification and novelty detection. The online nature of the algorithm requires a different approach to the use of the function. An object is used to store the state of the algorithm at each iteration $t$ this object is passed to the function as an argument and is returned at each iteration $t+1$ containing the model parameter state at this step. An empty object of class \code{onlearn} is initialized using the \code{inlearn} function. <>= ## create toy data set x <- rbind(matrix(rnorm(90),,2),matrix(rnorm(90)+3,,2)) y <- matrix(c(rep(1,45),rep(-1,45)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2),type="classification") ind <- sample(1:90,90) ## learn one data point at the time for(i in ind) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) @ \subsection{Spectral clustering} Spectral clustering \citep{kernlab:Ng:2001} is a recently emerged promising alternative to common clustering algorithms. In this method one uses the top eigenvectors of a matrix created by some similarity measure to cluster the data. Similarly to the ranking algorithm, an affinity matrix is created out from the data as \begin{equation} K_{ij}=\exp(-\sigma\|x_i - x_j \|^2) \end{equation} and normalized as $L = D^{-1/2}KD^{-1/2}$ where $D_{ii} = \sum_{j=1}^m K_{ij}$. Then the top $k$ eigenvectors (where $k$ is the number of clusters to be found) of the affinity matrix are used to form an $n \times k$ matrix $Y$ where each column is normalized again to unit length. Treating each row of this matrix as a data point, \code{kmeans} is finally used to cluster the points. \pkg{kernlab} includes an \proglang{S4} method called \code{specc} implementing this algorithm which can be used through an formula interface or a matrix interface. The \proglang{S4} object returned by the method extends the class ``vector'' and contains the assigned cluster for each point along with information on the centers size and within-cluster sum of squares for each cluster. In case a Gaussian RBF kernel is being used a model selection process can be used to determine the optimal value of the $\sigma$ hyper-parameter. For a good value of $\sigma$ the values of $Y$ tend to cluster tightly and it turns out that the within cluster sum of squares is a good indicator for the ``quality'' of the sigma parameter found. We then iterate through the sigma values to find an optimal value for $\sigma$. \begin{figure} \centering <>= data(spirals) sc <- specc(spirals, centers=2) plot(spirals, pch=(23 - 2*sc)) @ \caption{Clustering the two spirals data set with \code{specc}} \label{fig:Spectral Clustering} \end{figure} \subsection{Kernel principal components analysis} Principal component analysis (PCA) is a powerful technique for extracting structure from possibly high-dimensional datasets. PCA is an orthogonal transformation of the coordinate system in which we describe the data. The new coordinates by which we represent the data are called principal components. Kernel PCA \citep{kernlab:Schoelkopf:1998} performs a nonlinear transformation of the coordinate system by finding principal components which are nonlinearly related to the input variables. Given a set of centered observations $x_k$, $k=1,\dots,M$, $x_k \in \mathbf{R}^N$, PCA diagonalizes the covariance matrix $C = \frac{1}{M}\sum_{j=1}^Mx_jx_{j}^T$ by solving the eigenvalue problem $\lambda\mathbf{v}=C\mathbf{v}$. The same computation can be done in a dot product space $F$ which is related to the input space by a possibly nonlinear map $\Phi:\mathbf{R}^N \rightarrow F$, $x \mapsto \mathbf{X}$. Assuming that we deal with centered data and use the covariance matrix in $F$, \begin{equation} \hat{C}=\frac{1}{C}\sum_{j=1}^N \Phi(x_j)\Phi(x_j)^T \end{equation} the kernel principal components are then computed by taking the eigenvectors of the centered kernel matrix $K_{ij} = \langle \Phi(x_j),\Phi(x_j) \rangle$. \code{kpca}, the the function implementing KPCA in \pkg{kernlab}, can be used both with a formula and a matrix interface, and returns an \proglang{S4} object of class \code{kpca} containing the principal components the corresponding eigenvalues along with the projection of the training data on the new coordinate system. Furthermore, the \code{predict} function can be used to embed new data points into the new coordinate system. \begin{figure} \centering <>= data(spam) train <- sample(1:dim(spam)[1],400) kpc <- kpca(~.,data=spam[train,-58],kernel="rbfdot",kpar=list(sigma=0.001),features=2) kpcv <- pcv(kpc) plot(rotated(kpc),col=as.integer(spam[train,58]),xlab="1st Principal Component",ylab="2nd Principal Component") @ \caption{Projection of the spam data on two kernel principal components using an RBF kernel} \label{fig:KPCA} \end{figure} \subsection{Kernel feature analysis} Whilst KPCA leads to very good results there are nevertheless some issues to be addressed. First the computational complexity of the standard version of KPCA, the algorithm scales $O(m^3)$ and secondly the resulting feature extractors are given as a dense expansion in terms of the of the training patterns. Sparse solutions are often achieved in supervised learning settings by using an $l_1$ penalty on the expansion coefficients. An algorithm can be derived using the same approach in feature extraction requiring only $n$ basis functions to compute the first $n$ feature. Kernel feature analysis \citep{kernlab:Olvi:2000} is computationally simple and scales approximately one order of magnitude better on large data sets than standard KPCA. Choosing $\Omega [f] = \sum_{i=1}^m |\alpha_i |$ this yields \begin{equation} F_{LP} = \{ \mathbf{w} \vert \mathbf{w} = \sum_{i=1}^m \alpha_i \Phi(x_i) \mathrm{with} \sum_{i=1}^m |\alpha_i | \leq 1 \} \end{equation} This setting leads to the first ``principal vector'' in the $l_1$ context \begin{equation} \mathbf{\nu}^1 = \mathrm{argmax}_{\mathbf{\nu} \in F_{LP}} \frac{1}{m} \sum_{i=1}^m \langle \mathbf{\nu},\mathbf{\Phi}(x_i) - \frac{1}{m}\sum_{j=1}^m\mathbf{\Phi}(x_i) \rangle^2 \end{equation} Subsequent ``principal vectors'' can be defined by enforcing optimality with respect to the remaining orthogonal subspaces. Due to the $l_1$ constrain the solution has the favorable property of being sparse in terms of the coefficients $\alpha_i$. The function \code{kfa} in \pkg{kernlab} implements Kernel Feature Analysis by using a projection pursuit technique on a sample of the data. Results are then returned in an \proglang{S4} object. \begin{figure} \centering <>= data(promotergene) f <- kfa(~.,data=promotergene,features=2,kernel="rbfdot",kpar=list(sigma=0.013)) plot(predict(f,promotergene),col=as.numeric(promotergene[,1]),xlab="1st Feature",ylab="2nd Feature") @ \caption{Projection of the spam data on two features using an RBF kernel} \label{fig:KFA} \end{figure} \subsection{Kernel canonical correlation analysis} Canonical correlation analysis (CCA) is concerned with describing the linear relations between variables. If we have two data sets $x_1$ and $x_2$, then the classical CCA attempts to find linear combination of the variables which give the maximum correlation between the combinations. I.e., if \begin{eqnarray*} && y_1 = \mathbf{w_1}\mathbf{x_1} = \sum_j w_1 x_{1j} \\ && y_2 = \mathbf{w_2}\mathbf{x_2} = \sum_j w_2 x_{2j} \end{eqnarray*} one wishes to find those values of $\mathbf{w_1}$ and $\mathbf{w_2}$ which maximize the correlation between $y_1$ and $y_2$. Similar to the KPCA algorithm, CCA can be extended and used in a dot product space~$F$ which is related to the input space by a possibly nonlinear map $\Phi:\mathbf{R}^N \rightarrow F$, $x \mapsto \mathbf{X}$ as \begin{eqnarray*} && y_1 = \mathbf{w_1}\mathbf{\Phi(x_1)} = \sum_j w_1 \Phi(x_{1j}) \\ && y_2 = \mathbf{w_2}\mathbf{\Phi(x_2)} = \sum_j w_2 \Phi(x_{2j}) \end{eqnarray*} Following \citep{kernlab:kuss:2003}, the \pkg{kernlab} implementation of a KCCA projects the data vectors on a new coordinate system using KPCA and uses linear CCA to retrieve the correlation coefficients. The \code{kcca} method in \pkg{kernlab} returns an \proglang{S4} object containing the correlation coefficients for each data set and the corresponding correlation along with the kernel used. \subsection{Interior point code quadratic optimizer} In many kernel based algorithms, learning implies the minimization of some risk function. Typically we have to deal with quadratic or general convex problems for support vector machines of the type \begin{equation} \begin{array}{ll} \mathrm{minimize} & f(x) \\ \mbox{subject to~} & c_i(x) \leq 0 \mbox{~for all~} i \in [n]. \end{array} \end{equation} $f$ and $c_i$ are convex functions and $n \in \mathbf{N}$. \pkg{kernlab} provides the \proglang{S4} method \code{ipop} implementing an optimizer of the interior point family \citep{kernlab:Vanderbei:1999} which solves the quadratic programming problem \begin{equation} \begin{array}{ll} \mathrm{minimize} & c^\top x+\frac{1}{2}x^\top H x \\ \mbox{subject to~} & b \leq Ax \leq b + r\\ & l \leq x \leq u \\ \end{array} \end{equation} This optimizer can be used in regression, classification, and novelty detection in SVMs. \subsection{Incomplete cholesky decomposition} When dealing with kernel based algorithms, calculating a full kernel matrix should be avoided since it is already a $O(N^2)$ operation. Fortunately, the fact that kernel matrices are positive semidefinite is a strong constraint and good approximations can be found with small computational cost. The Cholesky decomposition factorizes a positive semidefinite $N \times N$ matrix $K$ as $K=ZZ^T$, where $Z$ is an upper triangular $N \times N$ matrix. Exploiting the fact that kernel matrices are usually of low rank, an \emph{incomplete Cholesky decomposition} \citep{kernlab:Wright:1999} finds a matrix $\tilde{Z}$ of size $N \times M$ where $M\ll N$ such that the norm of $K-\tilde{Z}\tilde{Z}^T$ is smaller than a given tolerance $\theta$. The main difference of incomplete Cholesky decomposition to the standard Cholesky decomposition is that pivots which are below a certain threshold are simply skipped. If $L$ is the number of skipped pivots, we obtain a $\tilde{Z}$ with only $M = N - L$ columns. The algorithm works by picking a column from $K$ to be added by maximizing a lower bound on the reduction of the error of the approximation. \pkg{kernlab} has an implementation of an incomplete Cholesky factorization called \code{inc.chol} which computes the decomposed matrix $\tilde{Z}$ from the original data for any given kernel without the need to compute a full kernel matrix beforehand. This has the advantage that no full kernel matrix has to be stored in memory. \section{Conclusions} In this paper we described \pkg{kernlab}, a flexible and extensible kernel methods package for \proglang{R} with existing modern kernel algorithms along with tools for constructing new kernel based algorithms. It provides a unified framework for using and creating kernel-based algorithms in \proglang{R} while using all of \proglang{R}'s modern facilities, like \proglang{S4} classes and namespaces. Our aim for the future is to extend the package and add more kernel-based methods as well as kernel relevant tools. Sources and binaries for the latest version of \pkg{kernlab} are available at CRAN\footnote{\url{http://CRAN.R-project.org}} under the GNU Public License. A shorter version of this introduction to the \proglang{R} package \pkg{kernlab} is published as \cite{kernlab:Karatzoglou+Smola+Hornik:2004} in the \emph{Journal of Statistical Software}. \bibliography{jss} \end{document} kernlab/inst/COPYRIGHTS0000644000175100001440000000056312376021447014313 0ustar hornikusersCOPYRIGHT STATUS ---------------- The R code in this package is Copyright (C) 2002 Alexandros Karatzoglou the C++ code in src/ is Copyright (C) 2002 Alexandros Karatzoglou and Chi-Jen Lin the fast string kernel code is Copyright (C) Choon Hui Theo, SVN Vishwanathan and Alexandros Karatzoglou MSufSort Version 2.2 is Copyright (C) 2005 Michael A Maniscalo kernlab/inst/CITATION0000644000175100001440000000151012643171236014022 0ustar hornikuserscitHeader("To cite kernlab in publications use:") citEntry(entry="Article", title = "kernlab -- An {S4} Package for Kernel Methods in {R}", author = c(as.person("Alexandros Karatzoglou"), as.person("Alex Smola"), as.person("Kurt Hornik"), as.person("Achim Zeileis")), journal = "Journal of Statistical Software", year = "2004", volume = "11", number = "9", pages = "1--20", url = "http://www.jstatsoft.org/v11/i09/", textVersion = paste("Alexandros Karatzoglou, Alex Smola, Kurt Hornik, Achim Zeileis (2004).", "kernlab - An S4 Package for Kernel Methods in R.", "Journal of Statistical Software 11(9), 1-20.", "URL http://www.jstatsoft.org/v11/i09/") )