kernlab/0000755000176000001440000000000012651724611011725 5ustar ripleyuserskernlab/inst/0000755000176000001440000000000012376021447012703 5ustar ripleyuserskernlab/inst/CITATION0000644000176000001440000000153611304023134014026 0ustar ripleyuserscitHeader("To cite kernlab in publications use:") citEntry(entry="Article", title = "kernlab -- An {S4} Package for Kernel Methods in {R}", author = personList(as.person("Alexandros Karatzoglou"), as.person("Alex Smola"), as.person("Kurt Hornik"), as.person("Achim Zeileis")), journal = "Journal of Statistical Software", year = "2004", volume = "11", number = "9", pages = "1--20", url = "http://www.jstatsoft.org/v11/i09/", textVersion = paste("Alexandros Karatzoglou, Alex Smola, Kurt Hornik, Achim Zeileis (2004).", "kernlab - An S4 Package for Kernel Methods in R.", "Journal of Statistical Software 11(9), 1-20.", "URL http://www.jstatsoft.org/v11/i09/") ) kernlab/inst/COPYRIGHTS0000644000176000001440000000056312376021447014325 0ustar ripleyusersCOPYRIGHT STATUS ---------------- The R code in this package is Copyright (C) 2002 Alexandros Karatzoglou the C++ code in src/ is Copyright (C) 2002 Alexandros Karatzoglou and Chi-Jen Lin the fast string kernel code is Copyright (C) Choon Hui Theo, SVN Vishwanathan and Alexandros Karatzoglou MSufSort Version 2.2 is Copyright (C) 2005 Michael A Maniscalo kernlab/inst/doc/0000755000176000001440000000000012560430717013447 5ustar ripleyuserskernlab/inst/doc/kernlab.R0000644000176000001440000001050512560430717015211 0ustar ripleyusers### R code from vignette source 'kernlab.Rnw' ################################################### ### code chunk number 1: preliminaries ################################################### library(kernlab) options(width = 70) ################################################### ### code chunk number 2: rbf1 ################################################### ## create a RBF kernel function with sigma hyper-parameter 0.05 rbf <- rbfdot(sigma = 0.05) rbf ## create two random feature vectors x <- rnorm(10) y <- rnorm(10) ## compute dot product between x,y rbf(x, y) ################################################### ### code chunk number 3: kernelMatrix ################################################### ## create a RBF kernel function with sigma hyper-parameter 0.05 poly <- polydot(degree=2) ## create artificial data set x <- matrix(rnorm(60), 6, 10) y <- matrix(rnorm(40), 4, 10) ## compute kernel matrix kx <- kernelMatrix(poly, x) kxy <- kernelMatrix(poly, x, y) ################################################### ### code chunk number 4: ksvm ################################################### ## simple example using the promotergene data set data(promotergene) ## create test and training set tindex <- sample(1:dim(promotergene)[1],5) genetrain <- promotergene[-tindex, ] genetest <- promotergene[tindex,] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot",kpar="automatic",C=60,cross=3,prob.model=TRUE) gene predict(gene, genetest) predict(gene, genetest, type="probabilities") ################################################### ### code chunk number 5: kernlab.Rnw:629-635 ################################################### set.seed(123) x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) ################################################### ### code chunk number 6: rvm ################################################### x <- seq(-20, 20, 0.5) y <- sin(x)/x + rnorm(81, sd = 0.03) y[41] <- 1 ################################################### ### code chunk number 7: rvm2 ################################################### rvmm <- rvm(x, y,kernel="rbfdot",kpar=list(sigma=0.1)) rvmm ytest <- predict(rvmm, x) ################################################### ### code chunk number 8: kernlab.Rnw:686-689 ################################################### plot(x, y, cex=0.5) lines(x, ytest, col = "red") points(x[RVindex(rvmm)],y[RVindex(rvmm)],pch=21) ################################################### ### code chunk number 9: ranking ################################################### data(spirals) ran <- spirals[rowSums(abs(spirals) < 0.55) == 2,] ranked <- ranking(ran, 54, kernel = "rbfdot", kpar = list(sigma = 100), edgegraph = TRUE) ranked[54, 2] <- max(ranked[-54, 2]) c<-1:86 op <- par(mfrow = c(1, 2),pty="s") plot(ran) plot(ran, cex=c[ranked[,3]]/40) ################################################### ### code chunk number 10: onlearn ################################################### ## create toy data set x <- rbind(matrix(rnorm(90),,2),matrix(rnorm(90)+3,,2)) y <- matrix(c(rep(1,45),rep(-1,45)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2),type="classification") ind <- sample(1:90,90) ## learn one data point at the time for(i in ind) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) ################################################### ### code chunk number 11: kernlab.Rnw:894-897 ################################################### data(spirals) sc <- specc(spirals, centers=2) plot(spirals, pch=(23 - 2*sc)) ################################################### ### code chunk number 12: kpca ################################################### data(spam) train <- sample(1:dim(spam)[1],400) kpc <- kpca(~.,data=spam[train,-58],kernel="rbfdot",kpar=list(sigma=0.001),features=2) kpcv <- pcv(kpc) plot(rotated(kpc),col=as.integer(spam[train,58]),xlab="1st Principal Component",ylab="2nd Principal Component") ################################################### ### code chunk number 13: kfa ################################################### data(promotergene) f <- kfa(~.,data=promotergene,features=2,kernel="rbfdot",kpar=list(sigma=0.013)) plot(predict(f,promotergene),col=as.numeric(promotergene[,1]),xlab="1st Feature",ylab="2nd Feature") kernlab/inst/doc/kernlab.pdf0000644000176000001440000141755312560371302015572 0ustar ripleyusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 6121 /Filter /FlateDecode /N 96 /First 792 >> stream x\sFy[bHmm-DZK#DA""`Go_$.Ziis5=3=,bqeb<6L38w̙eIYxq!_ĸDǿD`"L %,  2[1_¤‹Ơg2."΄d+UL6DmoTl1xtD´ 3-5Lkti` L'dR3 3"A?ˌR3c"Oa$Hcp $2Ab9f-q0Pe 1{bubM|,$ 'ג%xLњ%:4 M$@$D|1IdAۣ(d940s $aH<@3  0$j6"p. p,bω^@HU,55dip 21@V \h)!Ĥ1!dhuMPF{1  =y46 $n%øPFpT LBX' /ŪbRG4}b-PĮ(|]GӉd1ZIZUVrUEgSnqyQ__}{"JDlAv e@ix.g=_/Wix <a^0Y8a.2U!|e~{|4K*Fϖ{{B:m%T(ezE:/^|OV,_\1^OW @`DMTVi1).)-/q)@o,`R`ʉiE Go#eVVEY^BGēE@ q$|ZIo+Zvtg-5$1~ EZM' I LPq_K; @15@~{ԉ b؏ԀqI=xSp|9;m S?ΖO,T}~=~3ΓigWixy^WY9O*s]2@gQYYaΒ_){MV׉(Pd$FrIi+v}kֶOb7A }/V/|BY߾K9k{D mllOhr'0v+>?>y}-5O %߳. qAΆv8' EeQ``eyұJˀkF60G$0Ugn .]l;~C12e_o=z=vs `kxTa:^.~4]O8P,\IU } hk1#@s,/6!0im]mqD_L n u`j:H'|#GrD17[,A̕;a);ZBpd,9a9%9|xH6mf-eZ[!SB-:3%IDAE$-sZI$^ˡx6*1@+u1& s/Jx X@Hk?+/`!%^1~xĠ`ti$p#H#(ƭ4:LZH^IXyVV8R|U*pkX* 4Ai\NeLio>[ިsk"j&-f7{5wgh_ؚ 5ݚopηl|˩Փ@&t鋎0c0,XAvVNpW P:Z(%MvQdBYjRmq$?¥ :&MGÃִ;_mbWr*)sݣ^Żv *ׅչ0QFKߎ6)șJ!AWAQ^gr7RX#Պ\&`2=[e)&j`Ao.\L/&A: ׇ`1sJp^rz2V^LIz߻(|.壳_>0_\dlqLW!) Ċ8Ի\÷̊2O2:+ֶCwv.&21H"yWATŪXxm ܐt-_\_E " hQo%6X*E+upHdUQ΋O^8_Lwi֣nQpPv mdavhPQ#.<ZXQ>8*N^Gi8 (ZL<1J m7K#I I%Ic7Ad $uuL8-ea٢>MO 48I鐌IRR#ɭ|_^f0dh0@Ev"L:d>tQz ؇)epU%1&ME,|—.G)Y+ǭ3Jݖc7({[ȿQ$ؔLdda?XX8g!-@0]w%g!5 ?# ?3 J+ncT& ~G7h_߷շ˻ݧH|?7(!J`m1Vs{a6Ø c~;07O܄(7)GsDVAr9p߁Fx,>Y$ESjX #Wtrnbˁ.3v9FL1ϐ~EqX QR>c#(V7 xN߈%iK(uzöHhmumPtB٧դ+I ,bپ\춵te\#2ڶLd)YF ߟH`ז0A Ԏ;wM[2+7γXwH$cx>r85lse|+ dK.3眵"\}-ʸTI0s귵 kղQ~!Hmo|Q ͍0U n\_x LMV};=k$cds.C/V*>brt;ZW;e$e18o7㻷x} ˍ Q.JK*bh~B@f7JY*A^iB q>lɴegvQYQ;.@麪te_e">߫tq ȑ&*x1rAc{P2/%"R5aiDInD¥t\e`YZCe] 2I"!2.Ӈf+s%[b=]1aruՃga7]2]WE}wQ)Z!.Yv[|\ae'9j-je}|Рo^i U\͊yV*345\mdkʒE1F=-iJ^Y:mꇴ"yzQRb#twc^\5ꗳy#dLx'UB9b H'Z1vᔽpGLGL8i~)VJR"0#l4An?!&$l(H" Ip>w%Dp<@8!`%-i(F}jZ<7EnibN1v;:N\`SD'5iʏׄi8vCaK:%N\]Di%ND] {[ڀi⩨ &`h©M[t:EhCqJ P,~[@QMf6n*32I}U|W Y*aG*\!NaLN >nxtۭ>aK!ҳqt'cթ h4SA %6_TlQTW hS0e;7$M |u r80Pbs iU^g&!kXdGg'8\eQQ9s]t;w̜C|IЏT"(Gӎxe|P:*.2\K4{}X`ugc.FcyTl8DW2J)mw٩^aֳy@ ŽΦyoyq B87TRq{d<}6;"hVîjZe)Pkhc 'nk`=;.U,bd''ցs /~6Ppendstream endobj 98 0 obj << /Filter /FlateDecode /Length 3589 >> stream x˒>_ʢ ĕ;ˉTReynrȯH:\@w7bbYg)Ζfvß?jHL7]-ѳ̉5:)6v]v.m5Eu~[o?~gH׷nqYC =_NWhCZ-u lk0̕z@+t7^ZHׯpL T7ȓ]S7R|;\2F" bD-vGO}u<;CG_ڙCnmgl7sa9]u倔q9T03 e6 YWwR٢0f@Kdkؗ/T[+YJg8:/Ϸ0Vw _="'h'?r< _?/[]})UQZ֦s FikSzIs- Nu Goz%kld@{M5;PoȊx P.szd<#b _=+FP7 5_^<+Z!6mՙ,u6̘atf\_iǓ3zwh*^Ff"؊e' G$#sJ@4dlm2M.DMв5aM˶h2O&:+pqb#g7;iWCB}c٩GF|wCF :xއgb]@L@?$8aK1>)YSE`a;$Ĝs_9`mEy:Lr}>~Q@@[V]x`o>}E/O__Ad.<H%zޏ  +5EҶU_bCC_"1ѮuӫQ03s ݜe{\KB0R&ѳqcԫetvk1"֓ʥ3יE^o)x=(Ov߾1:p$IN d\aH״K8:&a87KX-c{ l`:\jLwZHx}8rA ,QMݎ6U(F-GƓ5Glh2{Ї583RvtE8WIAh0[{ۧmٝmO rʏ:XH68 Vamum[e'כxO%\KAzU^75<07&38_>f40i5BMZItiO=M 58&ƜaRU . 2 O!z>)BzZHN?w`N<礇D3 WU-<;"Vj$~]"2u딋vDmI!߀_J1RKZ9-XB^4g[:u ѻ.JX}elȑ2+K'y!E \U.q~}hvO%*:56%NYY1ջRjbBt#:/QMj4sdX)[~К- 0WsR3 ߛ 4a=`vveʗ͂G ;]c@MY3g-m26{ň6_g'ù!DrWnȉ4Y͏|(X&2Y;.`>FΝAYG>Ϛy4d;yCO^k3A4)'tp/Ia"[ot"Iנ1SNAHrQg8B\}ګTth6az}RC^$8X(x[m0 _ޣg8~?K2Gdj~ȝDW d+k>ʗhWHGbeE,߄<Ö?(|`|?V/uJendstream endobj 99 0 obj << /Type /ObjStm /Length 3478 /Filter /FlateDecode /N 96 /First 855 >> stream x[[oF~ﯘE~-N4ianA@KM>w(Jm%VT9@ΜۜیL2%3 f SZLy|cZziEf `eb&๒*0h = S!HSxUӤOD)=%,K^GY끥$YNG;5 DE@m$S@DC$Ef$IO;#v%I]Z Q(N)G{һDDZIIc/X\LY" j9e^?Dx0`M@wÃn4`R.C\&~uē)wޭ$Fa[ϱFV!Jw*qo2(%^G,&ƩzR{]wL֭^W*W {ƵN+r~]ABJB~re[4磍o!^-2/d2ҁFM0G6ٔ4"_FBˢg! d4aUo 0;ênGs_~Y NQ*lc1E- \FbS_&asyQ) )345GBi[^'\)Q$60oRz2tO^H!\IE~Tl<[7ToFUͳpTZx3k &8qAZ܈n%s~33`Lt"D\o8EǽtpJsD\-Z>-h[q`*]eߪ}k[߷o|?^ x/U,x~ЏB?^G26˞{f, Кh;KOw ottQ{䷟؊hrBo):imkMBxEڰObmrlcXxn! c_ ;oe #:l$=ؑen? %ikPtR1u;dPI#dx:(`LԐ ` ??R%ɟbw׸x^COp N'],фp!%N^!2-͛<G 0ӬH,t/$vz\C(zҁ"4㖶EZ [ejlW tbqHHJ,E[EzpGŠp0&wވ"Y3y_bxo`NJ;1l%X& QGͥvFBH-2nJK}H}%ȭI|T6T־ TrnY$tNtQ+= MwVGy%R1&zĠg_S4bpR!@Q5|wG)o!,K;jȏ_mJzzA/ܶsd zaSW mw/.f@ OI GhNOmCjI i&!w*rRY4 r)rV؝)4n=v0/>ƥϫIhQ嫔^5fFI%CiK:]VT򒺃QYwU󪜎r%b2*[w@ttVsM[ &H\.sCA0~V* + ~2m|,`H~'zh-3QxrTKI nA}*沈 cIH ֌?-tuc_?iEPon¾nhQ7lObSQtK:izyD]eKtTvX5;S?_.{`'~dai`HDa7oV ^/ߢ'-wL23q(8c,1+P.0Q30ҧL3q[&uMX:RQv_нWv> stream x휿OTQ7E%DEt Qa0HƂ‰  6$`Ig[[[|rW;3s:"ULg}^~ ?'J(QD%Jf]__5?;??T<\[N{ Ν;֭[R)K~]& ƣGN8QVy[:dsu]TݻGcx):9ݻW2xoD_TC5D%Q3gΠ#(-.I߼ys/b p5Vf376~H{H 3ÇDww?o>z vY4dØ>_tVe'ܽ{={ѣG5]S:717ۢm+8|׮]cccmQD_C߿KbT BG(A`8@ J{xxYaxsD]ONȑYDii"~oZ8of$5%===L=y[S/_A8A!79A5P\AXLkm$Ds:ɯ_#AOѧBJQ#;@?,) <,1̰ɎӶpoh̒#孀y{@ﷁa6 %a9Cayospp 67n܈ %APTT0՚unJj_hA1YT0ɓ5.,žV,фݣZPF\[[k\]] 0vVfg19)={SV {pِ0;6|˜ jbЅ *x%(2zp*4hݻwE,Y>x 9/:Y'.-mD1\O.pºF_%T1C ˗/6 ` 4+++HŲwÕdeE7,_C/v~tt/+nЭX0Y ܹs+_ $3[sFO LV2_*O(v+>BJTr챇˥[4n)嶁DqqUUaN1UPmi4HH`|Y$2%%>$6}#$d[vɁ<}trrDhzny3 *ÈչOTS1n#pɗV|QkcV;Q1AHi¯ sec Il3aϲFA# ħO8~a.a)ЋCtOmzE)y\Ī@9hbdjj.RxGʆ(0D~yyK ,JQ4f\L#U|En>:f쭇r>f-}T`aaaddP.1b0P#ba)"C<4.#1 0[#LnఴJaV}.+W_|SL:XK߾}͌sZZ5V254P8i]X8d訃i8gneb,[˅ℿ(Zn0dyh3"JRԋx\?i <[gr<::*H}eI1}#Sb _BFrN(Lqŋ?UF؎2boP_*Ŝ> stream xeXm)AqHAbFa!i钐C.I%}ϭ|>5_:׹s:NSC Ü89E2j\@nN.+fsAE@)w r]@OWxغYdX% vV`gꄬavnޜ@)GGp6uB8@jc 2l  k G M!0gGo j ÐZP S^\Q&2%\ݠ@5пICE hg%lryA!vnV@k#W w Ȯeam svv俘7#{j4F&"z37-9g+9@+ $}v4 t!Z\.v:^@]a@#KP? Aj Y@7!wkCB<@ot~ 99 oB*X&?A@n "%m@<Tu R7"o"ua . R?yv?y;iiṁ\~_+9bk;C^P+,J4>!\x'6 굈6ω*ϟ,^eo~ 5"m+՝:;խ9ʕft42.!D߇:Aaΰ7 %j *Ė*Oy/& uKeTbCpPg>CUS4e#1~E}HFi _:% (TD+,`Smv<)ˊQ&%:?_Oa4zPK[C`"1nw,FNCPg]6S܂cD}VPnf"&Yo&-XS(ߪ.i7g^/|P0;-@W8ү2plF 5H;Лy@=.اu=vv12l;cb؄D;L,B1s\onGitYtl6,< y] QU>f&Tm$0#~&DbCO4‰ ^⦇<bw>:j)|D^YRDdKeŖ9h䕲ׄlֶ\Z}CGp(KxӺ&*ګ׼e|( %If)ls|X}d-pq|Vǿ0eS~XޝfYOCL*Ã$2@{uAtϸSUJ *kSIrjoq0Nݮ!PgYcJ:<37&;V;lw0(++FQlQp)ϮHPLz+eOAaWjcr;~dR4Q.wscU%buwra[K.mz+I$/ꉍ{| o ώDZ1mSz z:펡$зc(7;4ʰNVL -;sv񇔍VIM\z,vı 0PE=xti'0#u?d`u%ԃr))bP0໼ D Mx]i[NFʍ;X Ώցz8иu9gCufh)ɴh4K.Bgȳe9 cwny[=Z-҆25G=^.WܑuF=5 ,G}HD}-ri鴛:6؟XMR5fv4hXttci.lӮ> 2CL}e ɰ@#w^^ mG{o4TӼy@1r,Sz(p9p<0fTd|MP L?h: TY1tCƨP'CaZ;g;@^(v-#SK!bMQ6KqܫU9bIAe3 ͹O2藀I,1XՀ[,ߏZLOD]6Iw\7q\cGcpU6c_ .۞?b-*2cŔw%91R?!BJg8I';U6O`pUk [b# U.Xӣw򗝷a:PŲRl^M bs 2/wHl*> UwdCB>mτRh^ORN1FHh´ˊ:F [ZdSGǪYJ}0 !ydj;KԾ)-_mr}{:,6cҙ/;=jM @M;*,DR2:5ĻVkP;J3f` Qf; 2CM%`G8Yӷӊc+qI*TԆΗ/,d-}P'qw+UN䉡W'3T.N'4 *v)O9} $]Ll{۔P͗q:-gP ]+]M>&ő5 Z"\/P`TX >C ;{!߱Zn S/i[WW::! >Yܶ29\>^Ta{aJTt%<^UBL5KZHm|2!KG^1N˰=QѸL9@'\Gq U ꯘ~оl5#3{iQ!)d6e ec?G2 =Pcw"L 1 ^m5}-Q.KowS|4}fЍ{<bf$cz~2qq{C'=:=yu0dYIyQ =["eJ(M*BЕo*8ػ&-ɻ6pn^?xTzl@gP"I>M!jE_yEƆRSD^⠲yM/٠?+ <t(1HjG~q< x'߲:7)*,Y`` qm+->$[ѧzRYE$`Ĵ@Ѐ]^Yd[o`B_tW<Ոv-8j-H>>14mR]5lkTn^$獾츇 dI=,ξxVb߆%wd3)YpԝB~40_ ?VTŤ0r9+u=naP`zFDboL}!|X1F ]^"USEP~N2:m+{.Q?f&]絗uu*;N7K5sE̵Z0C^])G0(a}*CG|xfO(5RSOև-+Qbv 0? Ug3&Q>A=g?]^Vs⫼ePqF,SIdOX1l!#P*j,[*yރ"r?!K1[ۮ枊M;KtMM:FgU/&}aG?2[ԵqҚjǡVi!Ir%7b޶L٧[nywNVHtOo1GӾdm*U_Mz68Tm0MY^wj^wkon^GU9,D :a.;pt)Gd!UaXEezyBTHmNP(*SaۆpPFp:am_jČBP0"z_*_qcb|9id:K>x%n0hܜQd~M_Vam%/C j|&Ujlx9'q0L $Vp+ I{u7N^q@3fVLw `O t.sSLPO6HdS4%cxrA12 e I,,|:E(L2f2ҍ#p5XU~p[w`*AB \[#Sݖ ߙuFn>QM{CZiC~K#lX}mkcdi4cO٬_ܑC.qNJI.G[-16И*RiKTr J4!_ҿg❯A>)$5.JǦI0W$anyH= D|4ۤN:}&MA3'^uikA$Vnpaܧ7f/Wr}u0Qq1:([GBZJ3M[I2,Wuv>B"8 n8%.N;"ߚǵnG1'sXZZB.6?ݵM1{˒1v|^5%i &ꨠ gZFNE: G. ゙><Iμ.&!5^5z:0&EȽ3hݔbrmA*e27hџ-ܑfځG8Z?ky7@Ӹc!Rջ1!q 7e-'YFFܑ[l3Rsv!x!P%Dž[<^ /8b,\nz@ʲ,D1Umg]V)4[< Z7(lŶZ2`7X߈fN%^l Kz*Ei#ƹwllV.oj93!0W|VgAGR*u' Ly扛k{܏ $H;mcS3  _#nϻ $L(2 e65&s+E{VY80 (`|endstream endobj 198 0 obj << /Filter /FlateDecode /Length1 1501 /Length2 8734 /Length3 532 /Length 9604 >> stream xUX֠4ACpơqwwOpHp Np .{dιgo]Uo.R- $ vteRʒ:@NV 2u;J@ @  rq pN^+kW_I| lj r07ۀ\p\@w*1wlQ2Rpps;0@%PE dʮߐn*K5n`c?`'7W A3U/9IMjjoc.hep+d"k Pq5Xڻ-SڶإȪ2~=fjOٿfovb `B2d6 0@LP'J<  yBGЖ,Կ_ w@v<vW<@([C@dp-n.h n t0A ٿ6^C :,r 'n ZS7Ak*&hMm?moΠ3o4MКZ?:]uR 7V B@mrVn ZyFB@.`OVeo*'K4w@@_bfKyQBie2E/`%ZUzf:B0̴VsWb%͘IsbӎN'RLem'CX鵑d⁜/j H+0Y|:at\eV'Bm#[nwEIOcZ&׊ǩ^zw׸uћnUf+@}جY W.b[UrQ0l:)5ak&ZV ;MTG_Y+=7Ez }ooS0ki_s)-&̇4Ai(IVނ_b_ݠ,+]5_T*~Ѥ9j(J$54юeZ ,7TOHn#DNi?ģI!jLp.y&T ]!K=a/Yble-B2: Aؤ6 SI8Оt,O痌HhfZNhg, +e8 [ *O}DT|71gZ08xJK7WrrE3q5Iy5'N սn4vlq.̼عŲ#AcQlfg<rj._]F'򤹮s!-R(A:KӜDra F,>1:)Y* ݯ}: "8R$ʚox$%EWvME!L2bLHCfwy $YJ8/ St'YgC,)f>ߴ@o( ,ĦӯS)wnmі۰\ lrM/`t7ŰWWܚk=sҪ$,Z(҄]4MgFJnSqD2 6Z?~'$Wa}ri!N4lFx:&LorVCo^+o=~dTݛJMJVi@ׯD'U/ygs mjtXLb5j}obtwpXb#\\:!b̏$6EipDwp]y "F$WO/KP=(iniz=ۍw?$ YG+u/W+n`[NrAlyÇ)-Cspa*ސBѮ\׾S~-d 1IZTV2U]O񴻍'`P3To+*F$$"OxʯMOeG"yvjrOy>j#xcvK+}Wcڳ&u^(B>w͉.1Zt8t &p_=q1*ۀVEPA쑚1MP1LUn lK/Rrڷ{0(uag!Z3QCD6{ҧ5Ɵ& = 'WA6\JLشʇdpOf諳gJnD/\&< 8ΑVFm@o>'*ǖ䨱黓s+y{klKSJo4SW3LW,qp5 ݕI)Wfgrd_l{*2y֑,ul̯$<tecmaIJ%LKf?q턓`t8]6{|̟֮xûZOW;) GO훎do$LVT/yhcpVIJftů, EV4x; hklbWJՋQ,`"p1oģ(N+xPjAK׷%UӡQ滘ʖFÞl١Zë#)^ ̽q\:IїԎv:yH\v,@=oY^ңȜC< :8#"7Bl0c% _1dfC¶oK[fSR[Lԣ\VsKJ ZFd1ȥdVޖdX!Qjml EԩY~h>[ȺbO<ǃ~2®}xK ΩϢqmCr;|F eL* WURy9'uS*,(J4Y * t̢KǁN5uk'"P^"M9-)?-:S?Rz!x9޺`VQN2 % ]_EENG!#!r04fGclF7Jh^$)cD*gR "UWLi 𯗰ҭ "Hb?GDJ@_g"ퟸm}(DNy~;mVÖ}{9OVm!L)4PqPQTzsl4m_o(S-|@]+֚ޏo l\1ϸ¼Ȉe66@b[ { #%ExVX\2sPDe @*T!MQrB>IvpoeZ3{XRt\^5_PkGcu$nįWдFd'm;cyLL5W"7ˈ&T7c ~|$S@òg{!jžm^;B@kZgpN*8믏n;w[ _3Ĩ3 y%AE!l QW~?@N YA0Ԭo6}yG"ۦDuֵHxgﯶ39[~&f|l׌̀ZB11ۨ >R0ϔ>*bm5D$&FKI禰"Ϡ[}^u>1AI3mRj 9iEFWn1&`\=Kj+e.:(9A!Dp JҤ)3D b>3yL`[A>Y\j޹xfTI$w$#0*B)p#oB׋8$r"lЄ >;ն?ǘQٙZd3vo{y~8`1jVF?y4Nm\IcI?x`˭;A-_V׋, +!kUgD{K){I{&cS&U!CzT//C@+ck[Q&LRnt y/ka hv6~ -{uJLI^OX悓Wvl'XO9A\ U MZh OIњԭD_c:e|FpKc'zs_U3R 8p-Ȅ̥cWK]AU[%30 R51UH>¼힚r*g Y4ZT Cw3YUJ|!vex=C&-5Ug-bMLnR<\e"ݮS7+QĘqOz`!n.%D- FnSskL5)(3Ֆ>rpf/Yt88F&~ѠL\Z>^'s^Y8F෻8ztT# 1Hzu"`Ҷaiv}Gzt1FI4Mm1~`fJ7iEvG_)snR>K-T˻ZZ9ZX0vGMq,c@FV)^G"B%[$@Ȗ|v6_$׏ެװU:Ch'r  %T›JjtO\WF֊ OZtlJ97ghL'b %xS[лdh"sRΥiEᙢa}敹.7r>U08ntK6C$6:[3Lw$U.l%/6(?ih|d@@$, ` t6C*4T" |9*b$ްsN V6Nv\Oʤn+.WeW'urt܋]q^^lε5>nTW5 kndtAPycgi5ܯo K(S[} VAH3h /s=Ǧ} 긘 'c\`&uY]y>%?@hr fJ@+BmI$;!pXo@V'+ޚzOh% 9{ \׻,US=x"Οuݤo /wTh?2⇎$qNх/0 6|񅳸[wGq"* d}VUb.;TPy̓+O%,rc'YlvF/o )żS[vx :3L ?n[\9xm{xIF|? ,v;@5~j'1MڡK=k98c4l&h > ZSB ͩqF L8zTדI|&P]gf m~q]h?. 7 `tAKb7H-^ T*T̉t)_zn=P1,w<AtGT?r O3NX5eL(>r Yxj8]o6ϩQK?eo3Җ]CU760oFBh ]H'KT]HXޜU=S1]-bd4[ah20n=X?~%݉C1jAbFa3;{NGqQh͖i%jF -QWYaS'x$I2=QNj.2}_DEǝPER>/@78D4)Ogm|扄𪱲w\O^6 ]W?jdd当rq ;F*٢z%`gjeQ9BK616+V榓$~ + Gj]Ha!csKG"3e>Fqsh$d.2ӡ(Wߚ)7q'd5,q$9O)O~҉%W50vAdy_5,"QK}Ye(KZC8{%&Szl1}Hg);jUkڅ1觳\]ȅ9nfla'=&F8Mq u _/,Es ~5Yu𨂘U%9dY< Kp.W% $=(;ËWC^o%jb3#P^F:naicDv5PE gdzC|dQMN;̧Gt8/<ޑ>?xAcL|]'ެ .{gBci{8Y,lw LoB J[T.> stream xR{8T2dh2*V*AQ$5fְZZƦM;B:eoK(5$'q+jSY99z߻G^C#ޠcT&l9}L1d- r1ffL:`ucC6Ø@l(€3Ÿ+8`-'W0D@>d|ޠ0L@4P tI6w\y?+D!h 3Qk^s<DMWa0>SP$ pa-;](B0~3PH` &./_jg>Kad pQBGF@(`>(@ N ?C9 0F,.@yv0 ׿!@,oRMed&&(c  @`APy_J 9MW֦^l(*J5 /oEKcQ~5<:_`TlVʡGuh.:Q yz-ˬ$c;uK6v? *W(14&?}0g*g/ 3v$O맇 VyS'Lس,,.Y-rF+]zEE^A{ X XN|nTkے;4s.eDOFgjVk +Ifޭ'~%*譯>i>( ͈8|wQ={zA/i3 Yp19d¸J9E;h׊}ilG y|еm6cdTT!7_?$&6ZJ!Gɓbկ˛R.<)NVyr3ywCtbyJ’c'qzN}/EJ%9qk;oaYr>D~[,x;e},}קy9ݪec3$M\W9 E^ o5iCKaTlZ>eJ>:mskY$+Wd$:$nWZU5]Q51>7ㆸ=ȴ(Ddn475@1ѽH{eJh')#`{PEݵ1Ή] s,Gӽݫ2$=e:D$\-WexQɆ(3MW9\f=)!)ߡSot /k6ߔ*Z/=|ݶeTD6f߱.Tn; ɔt4(R;(e- 9bZJђpl`_WFcU>>fhq>3]7YCn`#Γє}ʸzVUho|&XjiWN]xC)|-+ȯI! ۙ~" =ڝSTu۳COrQ sQ!_3 pendstream endobj 200 0 obj << /Filter /FlateDecode /Length1 2193 /Length2 16015 /Length3 532 /Length 17213 >> stream xڬsx]ָVcN8;ƶƶFm|9߿ߕ}cq1sQ(0$lXYxEYYE((Dvb^bVb  GA,jghafLL-JO ЖX`5كXښX+NGW # 3%i[S;b ߐ+ (EL/Mb IX tCo.bm-ohi3lhca v6.Gb9;jM`bbߣΆ¶fb Y8IXL-͉M lM[ع)0*ϜkLYw'_qp'aoUK֌zABlakp'm휁{Cljτr3)[-lf",Lg hchkbd00geoe%]c3P@/s{s'*`Y;?!.b&'kC'?`<v@_;[(gl+ڹ8 B ׿2N7eg T[g"nccQN r3v?ACڢXXݑCҒ?C|C@?tC@?tC@7E]E]TE]E]4ֿX]9[c7Nsg[8ؘZ,!gbbm@3zB_n_t Rm$@+6!_ V?+/Z9B_r V!hi-uUM-g0[Y3 Lm)/.1ko\>&@YRtyH4)!.m0ó _&C1 yTavs,Ih(j,_ZLzOǵ/3KS"Z-4D%͡,=t %)bge(L+ڵ] K!L^GXTN"6]Z E(u5Bf*ć0cҁ9!5ٞx3WSar:5I~Spn~g7/\|PhWM ~JS)G7rs~WmH6K2li O,5ߝ,P^,Eh|OrseecwDb<kj+E)ʽʩyPmAo )T4 'MqKc~ρ A2T q`8*EG†ru=h {/UxQթJ5NR!iҼ[y8ڑ3Oyj&X_tq^f 4!D6#$q"YDv^Zmƕr !ZE( */ 'ۡu.Y4ͻgd"E:f"1sB]k-T N ʉhy'>(NTad?j8 I\bu$?+sՅ(=A&sq쭂d$OcEi s1r߬F)_M; l~9q{r{ %ʭܦe^).6­+*wݴO{iw7xW$w bA&م&Vټ:=ֽm5ϊ>&Аt̃#=V`\Ks kzZ5-V5^0"w:'$VTz'FϦRBEZSLܒT,Aɑ;`A uM%?xUN\qBJ퉙cBLWZUJ"dvDudPm+ VTbKU_Cα?Y3lO|al=cI׺ L$+I蟁! (5"c;j_70KAnfWw.$r&vS-CfD&=\!V9hPMgiD`;9587xO-kH1Yt)P11+wԿ/;xHz(Lc(=4|*zͧfJ_c1<I\IFtcF-'r@ Өͻp1Kk/hnSNs4,C!"ͅo3dp m={z5zСApd04h8]:y~dfML9%HHTs_[->1~ 'ޔhlWĽl<2^Bg3OYL.>qsҾ9)YҚ"~#7]i4(L՞%2,AGUOpkd9Hr52&X0TD5d]M:G/&N3^lH]{x;ènSe0̏I𔍡{(V>J%qt>c5>C*_oK.մwCsE:pq.!\wX_Fnem׉h6IXݵ9qW֍x,Xnb,%KQ_~&Ry/pTCt uܙ$߅-^\| hpAWI5=Mӑ Frt~nkoπx.dܢ)-L } թiPhz7Ŋq&fK<=bPN L`kPaXڐF Vj2UT䣕MP,UGcD}HYxO+*a_ $x B:e08SE»6A4V1K/FB9d(?(c1XXxS\"QgYA-_ᇩ+֙JjtC7#?#5ɝR' k' ?z&^*}6tnfᏽ-N{ŏ@R3zq<ٳµX 1L]Fu=P4NaQ-Ȩ$FgfPޤ7Uˍ"|0L*Vkh-=VXLc?t9fyTQz\ť*])No×9~t c;hV,5H_P=>і+cmГHKUiѵPԢ f~gۗ)PL|o}1>gבJ$W}C-ɀ+udON]=N҆Aso(FW.)f{GNa!q(abNsbj1H9!" )߭m%Dv$Ƀ"i9|;wv:$gq\'^^IeEEMQ@3"U*{϶_i)Mrٶifg-H&%C 5X9I_뀝L&ag.RHNxf|BƜmzGp}GO7YrM3 dڡާȓk¿w0K VgW흻u"63i Ȩn]}щasT 03Ryԏ_B6U|B>NqRx?Msns=`?UCdBNVR`SP:a9DV5E_y[XO5ջ2Q{ =s==h* 㿸b[tiQk\H<)x#Gp ۹SJb2C$ӿ"I[ Uߤ\%PVvsmU8?llk-3٘ e&'cnψZ &,‰+JQ2>FK!H8c}:GbW}R&byO^{~&0X\CmSt|]Lڇ[FͶdGءz|D^0N)3NDnTԾr) ne?1'Q} [d^o 6 jHw0C1zZ -Na[$L]FwOOkaF\@SzS=+ Eƍsw.53K]ZT_d}p᪰v--Ig̚V(ͫa1 726ʸ5W3YlKeusw8XۗC -m0iNm bmE[#Cڞ־ȕv%k<68r JGBbBE᭦ɈA5V RCV\Z_Qi HT_*rWɩKGY@Ǹ^!{xjQ*):GV#CEbLAy biC v4no @ەrj\D'۠?R99NPe>OybYס%h}19@ytg@E cP+&I:{L#!ct¼tHVE@FKG͚i1T>Kop}] 55F֯WS Vͧaث`eu߰h Qo u VKEZ{̋Sus\|%h2eNz}\7bgެngl]6pK s&5ӎj'gU)z>J! Y[X\a98}{DCל c Z&Kz;nvHg;?Dhr?&QSUh|TtC]҉']MƄW=,i陞?;kFfٷjs@Se>(ʢU[jzݏ0?Xw3! ޷:{q jѵUBI7(:aT^ AtPqYH`h;C6[;_z uqMW:7<{ ~SژРBPzI_*M 7l)a|I.̮=5H,q^UE,@X-\IuI(l[ahH ]H* (WRcѢ  / -SGt 9g!itq>[^> kIN"Yx^e%YtW4J]*\0:o6y,|)*d$t[m?4 |~gs \J :^x-KB*2r]KaΣg:I 8\*Հ);7 33YP\np'D_lM kc嵃h)֨k;lѴX[OhWyڗdpIx q$S)&xyl7d"n%`$dO.XHHMVMM/(B 1Y=05{'z$ѐe'nQ'zphdaϴhw*Y.Bשs_mn5IZbY 0Jn}Gkm-.)& S-Q?^$`.荎 TwR$iUMʴ4{(:6&U/ Zc]t'$Jtq9y7 >:sq2́8,r@,e9tF׾L/R5iD,*ͫⲢΒ'DB VgK"nÆI0~CƆf}<{%J{8eLa{x ՞n/]Ka_r7Jݯ}W!f`V U=R0n􏄋YLG<.u7+åVx=*ΰwһcXXˁL$D Fh"MlfpK-;u~bF,PM~ o=\Ε[8ĘHݜz"AM1k.O1JH86/ H?RO ue7OSTײqp,԰xF]=v[M\ٝ8F ۛYxi0Kx$?o_7*2GHEON{ ^z86<,mV󸊃ɚ-|NMk֬M=ۤA}ʵs7V qw]rLF?g0E-]]5/ 4bJ4#6[MB?̓=fm,]{֯w#">)98dht:WY{?{nV.kkG&LȈЯ~9&ePfF!-y= e>1--Lۥۣc Q,+{Xb  k{DdQLz[ D` kzuhʮ>}qyyYW=, 8rlCaCz Hk 2WNa2 Ax0:iqX) Q K*Yby(Tj D+Rd?ʢCeEEш& V -_x_f: ܩ‰* Ŀv2tFS"U͔)S𫥃ƉU#`U9u)-ҡi]2_/ɰڴzwz[AOdܼ3~ZtW%TLI3Nifr۞C.»Oz W5)Oƛl|?m2o[Y@R}8}F[OekpitI\~Յ"wEf6qO80̶MsLW}rKы~zL*Մ&o^,39ŴwfGK( Iw=i[\P3Gx_pJ\>zc/2/.|uSIUyQ8H쬲G'i=h?2&GzD\yDaXh.~"{k*[x8"ZEs3M|SY-S_E3fBH}oeJ* %v{Tb&-)KJ-Lhî/xW< 1xB'TW9/| mY8AX2ͫ Z#wG) Yx)!< wfkOg%z5t-Zѩzxf^\:saO(Z&@E $ ٙr@,yag*{M ~]TtytT{ >S)͖6|\(ixZp`T@lrG [L})? 8}N<(YNNVjTI+`%eIc $M'9̪3 V$cR> \bt/nN%~ I !1Q̠.S "{kC 2~]z !&pI\,b5 d,W~5#\TMBR%(S-W l hlqu0 CWՙ \Ϸﮋ"dZkkpm>aZ pppNA , 3 SkP3 ebu}fjŭ9sP3Aԩ<%O:%'MY| ?8 u@{BkX rUrjoL)ݦ‡tr Old #R&w=4dKG뼢%!L8RwS#Hg @qƑ>jZgA'̢ǜ!9bR kz|O~C53:ϴs6g+ǝ°> £c]4%Ã`* ;VzC}$8NlAs y[ߋIzPtgMiYxm{A"J;0aƀCיoKU6MmRNbImMN88zE{Xw/7Y9"n{Rm%صb~XhtF.LI!/ZuM#dXsV 0}`azEAvdYjR{bW8#Kzx-4#_m)389s`2z&#snFŋsv/G6͍0zε6Z7SCtӱ)g?}xYfWIkcBGs7:]?ǒs<ȁy<_XR{KJnB A98 `ч"9'5Yr̚NZ!r'pp Z; wo,}ЎM?؎벂"ryB&R|Ntuyp'1iwYek8|NU< )zVD[羰ç R׭qhl%@U3P Eǣ ȈaQ1'C:1ˏúRkumն3MB=1C2獃EaQ\v3bwWM%>HysǷcHLzUI3 X.`cwQ1d*jUzA2Qc:fA3f6}m5҄BTUS,I($EzHh*36͂x',7] Jlepf8ASx)u7-?u @hW5~[Q RѼ,؂.NVv65Ra/j%t?:"K:*O<(5ɧtǤ-5v&cL樶ڍ puϗ@XAY0🌪k*Uå{o 6#"d-Qvf5)sm Y6c[5m W(s7ܽ9O>93כ}~t=.sbڭ&潮3A^6ZlnB9 Hcfb=}ձjܠ0 JxC(֘i6/jd7:mj'Gy``nҿar-k@qè>Ňã#vs} G e WՆnA$EZӱiq }b >+, ! fט1CQn:$LzT.qcܨp$Gvk{*>SR/a\tIlj4 &i.KE*-<^VFҬoS.[Ļm=Sw}e: h5\#у'8t99Gx BCM~"7\C-ϼ7 5ѷ+KSt@~԰OfoLXl'\۫K,!ެR"I7#2LYB{Nq- .ԑJ YCVj^2\Ns?9F)Ӟc)u^::RʉWWvS%.=3:?kGoB&mb0eבתMíXC}3h)a1fY ھ& G _ W{K.%/f1 `Z%WZ""f0_@IaؗKqO=ʿdUNOSJTg†wJsd+aˏK1SҵeCǒOc}P^ySR48}- wOpIy&A㷻+cҬ!|5ꌬ^%Ja(SK'Wsne;:fRc⹺PG:FiJQTjԺ͑ Hj(eV4|xj?2UL1+5,)iVlpU96\ͳ 3YL4q(z=``/\+9;d <$5L(]IQ;3I!0a$/| rnysYǼƣ*G}k;oj+̍Z"%gA>yg V&FKI j1O75m'$G!ciryxx1I܃="71R_}Qa0`mHl=_ U]|/x4 W(pI1r&|)^ 0w'E:buclPHUW#:ڒ /{ϳVvFkFv)Z>zS*2 r_\c3C%u oȠ2btJLk6ǢJ(!l(>  n{TI&I-s堒=7=Q 4Q<ًrB;Zx~S[lM_T>|H` Cт)RrN}C廉.O+%1x%}3xYC:އn71֎ƙ4i65C:TĈb{ML٠"A&**B)ؤP nyOfܖSI#ڣm5رj-륄6+~ghlpǤ]th4)Ͽ*嗡4 %!,|Z gP3KcS(Lk)d'$NTLK|;2.aǎU^;^ ɦ鷋yGby4ko^CaUpo2h4+M9rjS2LOY{BRJ{5Yc;!XG/43? Us]8X4LtcJIT`<"дQ+9*e B5 lVkD4V` o!^zBy*xhq7u4bgRr;6l#6 󟫌Xt85YIVv"gov|?)I}v9A]ue]MxŚqDܾ蠆1ݰ }),t, s\4R'^56ȋi5TjTrDX4,F(z0*a#1`Pu٨̵Q'UvtC ?1zy,H`%I)g6Fu#!HȬr)h:tU9cDdň5՗Ei:X%o[NvM!U1.((68Z:PiWq*. iPsu=8}uѦwWje5˹\h%qA9vR2lgKiNhh] i k{z^#~:CT a|+VsЄO!7e|#FfFՇOAnk:ɊCqb=Vzӱ{% 9ƾc}^:\ڔBp ksV赴;vRykRK?Ci* & (2Mfq_C8|tz.Db5xӫκDuZt^R=h{~R5N*;`Il4`53Ĕww(*JZ~cns-Nkj$U4p@ 6q Y @ҹTp t^+Km֋%c\̑)YG֠!pi%8/6tMkhMS+7mOBULJfJ_ar!0Ahbg%ddW+4}g;WLɱj$A`шUa6*Kwg".9-] <yO^{!`(8lyմSP]V*:FD1}:%ͫX|" 3Fuu(e~=9U=OJBe77%;7gߖ^Hd|cPV>M̯Ϡ]Sx?1CNb6@Am6WVS"q)0دuҵY¢9]YЯ2b(C4 Gb Kԋ[46YvEF'8J>" .ln\QxT[sXV8M4AV:`ig{m kIVi[śvO$3澩⢰Q8Q+H&{iH.UL*ϦqAkP\6b{,5'i-o K'5WRz}̋KlE3՝(˧d_<-E8B: !r t_pRILl?d]\^׶#qH#=12pZÞ{>\ZMG{oʓ~z= 20XZ#*,P@_2|._|`ݛ MfWܱ"؄ћTx" 6*&u6O䶫dq؛4mi0JXH dUVS׹HAC}ŊE-6HܚթU?X,,5he 3YĽ3َÈrP{ Eẉ$9N/.Ww܄ؖ+>dLltLUo3ե_HKt| G9lgQD,ʞs䢬ťh󴊶&nS^o!RE'dsT?CiY1|1X{)SIL%rjޛń ]zygWK N)ĕP:?Nf.~*MzTЮc <7D7C1o] i !:r 3Gn@Eb coY"I]7"]<[[/pL5Xzz0r"[!M<'3.O;;M2l:3'n0;θqRߦ˦ܪٿWm&ߟ=J6W9)mqwz&Ohf#ADbn>p'hzwrLݡdۿOi*b^fċ}Us* 8-xxC esS2ڧ3^:UulX]c>!GwJr֫SY`KyecȪ!!j!e6KߗMq*α_nm9knÂ7Jvtwr7\F.}>9| endstream endobj 201 0 obj << /Filter /FlateDecode /Length1 888 /Length2 2167 /Length3 532 /Length 2786 >> stream xRyR՜s%ҝ*vU6euB7eflꊉO|:˪]*?wjcA d *Uw{?e _8Cd횱٦0ׁ Lzrq F{5t)2)]aNtn)sc [SWn6ᙾ7u}:Rhk;E6zDg"u~A?ת/3'ouBjo;urF_*aYOCGlНڙ"Qh"Kd_BŦm:e¤RJN;iOJW:nG2ٽ v*'ʶsItIJ֖HT@ZH˿u8Lt%}6ca)3JN],8'kg;4ƫ='^[e-R7Y)7DŽmͥk+@OwH&]5+]^ˏ$TYQN^Z;zVdYٌTkYZȶ)ѠP!l͞r HN4لmU[PX|זD؈GXz|dg1+~%%0Q'G hu-mͿļOn5=o:8q$TNDWaKx75K~(/8J3wzk>RLق:UgWe\ڥt0۾33DӠ e#%>P%( GOCߛNgx۶ .JO&:pt{{QjQ#lS“MWD{M=D2ohW)SKtTXGYް\ޠ:_׳hO!N;ʇJG7[*eƄ5^m?*5cry0sDh]]=dkRfƭKY!&1lsUghq {H,%ڒVٰ$26#;><5t<Һ)Hү|VA]t!^;6 Vi]![e*OT݋kkNmiF^zoز}gRYP C{suzdo.ʀFK9Q*s;є;cL?&TxEMqgVƬhKvj:nYF4TTgW`lmXD [Hho wMk gA$ƄhE' ޘU@[=lU2ӧ=n=ֻpWuaQ9O|A:9pm7H%iQ4Խ酛 `9pƓ_.7Tu1]XF:s{nv }7e0qmV*;&voU?r٨Iv~<_=[Kqoh\̋$kv8);w=WQ;lQ<f^K6#F-S'R#)3(uYsjZڦ`Tuy1U':^DK="w+VP]5 &pt) ^3ՠP/>lRMm(\Ж>@~ɇ<"_yWHZŦ'![$/wc廒 {my|*߼#l4 Br~ޞdK[}9[ Uyж==ʥVNpf*V5va_uָ~? ' t$N_~endstream endobj 202 0 obj << /Type /ObjStm /Length 4930 /Filter /FlateDecode /N 96 /First 902 >> stream x\Ys8~_ٚ AV%93Ĺf*Dۜp$*qa~ !ɒ-$I$AhX%bRQ34Dh7GDŽL3l3[ ^ bMPY\ d2C˒Ljz&D0%I”t$)Ŕ jWI BÖiÉc g:Și)t IHɌF1D84xs M8fϬrhcg RYG5chFd(hIIk`˜r M{%uze`2T^hɼ7/X$tO,hV)MYp1jcOMLjHL3E4g$  aAh4Vx_@T9W60@T3ixgD$ՇaCgцԆІ4l82sKHb`!+D=E(O#: ehCǐ҃3Tmb: CBʄ7`0ʡR60XgцM ki>W| ga:ϊtO.="_-Ր)hA:ϨMa>|)gZ@Wt'˧^ˇ,^߮@)P|X\͊c|W1C>^j􍏲]9pf5NGWиH0ov5ϡSQqIGlɂ_*z_)|itVh}_JIsRmt}7ÞÇ|woW^[[Tfjj=YQmvE/1h1>\.y1-2(O< o$^8+j Gdv;eYi/ϳ/x''9,A^|^d"GYZqNMpmSѱLgWgǙ|^N ϋ|4Fo)܄i c.˚oߟ"ڗxA1U/fR3"ffG1wb|rVX%[` 7`5ۍgx&=>Ϟ'a.̲n ǜۅcB08XN(qBr!|+bBP.+ yucu>k$[[2Of 8}ˮ 𪖾dZqb7z"::G'D`L-0OKScAs 00dlPtz !W~^dK>d? ">e(=E??9lT:. 2=>`*ڂ 9 x #z4>""Cw%xc\i SMh$#+)xHBFFc0oI-,gK9J"djǠ@1j ZlðƘmyFŢwjj` ^}`v5Z` 7ep܂!B"NÝ$}yqՓ_{>Rfg RUKrV"PuB z!8N-Ub|OLbG[w[sЎZ6lUk&[u{{/X^) ,J2t$l-:ݚNĊ\|>f3xLlcZKIBҀF'A^c{iFE# Z?3Zt~*S chR%&Ra]IrTw#TylsYeQ\ׯ_<ϢI"*~62U#/ '<  `b9.(Zdy>,AK 6Jp5q]"],,gk8Wp@8x3KB!vCmq\E ̈́ VXC]gL3{zTSa+:lUkpe='T+ٌq%|zS'"C\(:W<=P cNK %A80.u5㠗BFCfMVRw')WNh?B0 a{eƽ`tKKI4QEh߅BC`Tk"fW1="G@U>$|7sb,="OSQaw3axC@Q$qسt4;y63I ̸QDo\lnUii53Yx~khf kMƎ},Qg5;D=Y:g}mJb(M㖥2k0TK&$r l WPxI(^X)ʠP8qMRx(Bo(0BnQؚr͕r$%}]P>0aL$(zOg>bb\¥($N ҆< #M4+:yX@1e:wht/ | `PZAti b>5sr@=%F,iz7At jDa "kʄS<޽$ƃyNF //f ?DVylMhZnk®,\/M;'!.p[SG_#`aLBֳ;PrR6Q{5-W5uI]QJ.Cї c8D,3~X ,J&KXe_W Z]nK-=endstream endobj 299 0 obj << /Filter /FlateDecode /Length 5211 >> stream xڭ<]s#q+Tybo o|[qJ~QoELR[ʿ=`03$S-șFDGls& uٿkzW9?[o,O t`h ZJ=?~}rIa_q;x@ڰ ?B1rb @@2{"mg #~f]{?3W>𝃞r4aSb|y3n0N%ύljNׯR!O=q)q'|7-yb yG{Y:?IĈܐoã0~4c ґ+ۼV(%VJVGy:Gy/Edw?;j`o?{;hB Z#/8@}'(X 4 SbP}̃Ia-Pl8HjX0+P؝bNW1w]1Ӝ4fr%C g`z$ZS`>` 3PhaՀ,)xZ%ia%FϏNoV\^}TFJq)^q",Eټ"djZQ@55M5A ԭcTa|BU#2G`v\l¤seqHE~. J7 y9?jFsahaF3|jxeq2cH J?Ib$,$ "N RrrNJ˛I 0vD1d1~ӐENPXz~MK@PE?*WEL {9/H= i + f "ĪDX6" ,OCY :#EZ%EWyNAژy v.͍aZ}g9F&2ib_ e&&cVJ2Ԇ_{@HgGj@R.LњF6Sf`b 9?6 %TFY=kk"eUOXISqPxlKyeLpb'tPtֿE N|T nz2@k(ƴdox42`V[+Ԟ@J϶xP|ԗ@C\MeuG֞\,? UldFU6T]rUcB_Қ`Dk?STvF \qlӽd 5_lAU:^2/c}@T+,}kEb@m2^$\V^06H N.sVpwbZ4\P'bHb0 [پLzD_$trA롸c' b WC%BmW崈uE{!8ɣbn@ K]erO@YN9 1 9F*^ <3ZA~e'Yeca@,tN. (:j2w e Z*xpq؇/o*уs=f[vK N w{9A /8Z <*|u >@:"6~RqudUnA$`Gt࿸t]{G=kmyQwn!a 7kiˑglsQ410R#%,I1&P>"MBf-]M`؇ F zKRF!hy091!K'y`ܶj'S\\S JcVqU:Wez#gTN(JK1dT`, D+S !6ԅi+dE-,3C4cň!;dt ʨLjH#M,*ؼO’| ht01=T95/UхTtѶR񧄵HGYys6\L08}08{<>9?ӞNyEXø^po (#I+! J3VIb/YjqPH)9s)n I02a\ d4ɣ/uނ@2uQacuBXe}O&"`T$clU0m,30ljC єƇ]$mI8inL~榰9L+<:LlǺ Lޞ+!\/\MQХN7PqЖ!1 ȓ9;k)=esͲA%,oE냦yc $dvVrI<+x5I$9t|ɰ7S{LS|1X-.> طQ]T6<ʣYXD ܿKeMXLB5u8dhэxǶ.<e{H'YVƇm.4\aKMPzW=vnZ%V hy1dҝL |ʵ M[p'Fr[4k9T5"éGf1^1#⩋/?{ d/u! B:܇gP%|p1*CQwqm>>mBP*?Z&)̛֛E:l\Z0B|4@F0K-ۛrV0G=w܁EE6 >*#eiHy_!+cֳX1+ ~5h`ihrGxh6XhAv1 gS DϏגcδ'FnoD!D b4^tykۂZ'9,CO5XAv{?h?m7?qcnJF4FLp㬪' VQ,Y.!eHsHK4͔djǸK.s&nHO]lTZ-\U" #uiqM=$ %pR'o|`Ħ{Ūv2mgE `5ejT=޷v/"gB)(K5qMKa%)mmwߕODc\>y}:rvC]L$hu_.8aÍ{|Ee4,yV2} q#/4ozJ>yVuMuTdH@ږ헱>"5]*C'WDgZ=r=-S̸)jor]D z؎q_{v,xܧo~򾶟6dUf\ML<{4籁J %.WF6Bb|?~g@nۏLJGSuk@!:`C)6OnJ᳧!:d2Ugӥ9#]Dv̈RF5 }|d:8UYfTF9b`íF7˻Ia8}%&]vP.MV!% ܞ[Z66a1=r9ɦ:ڒxOU1>^>Q]~UͲXl}(s$MsT5mӇtL,2ϪEuIP4K:(f43*ezM' 缪yY1o{!flۀqOiw`Ѭwo:ۿG-w?P ؛Vq<j)4Fr4ilc/c.c 'ڽF>5|#j1r|~lNί{|%7??~]߽10:jfե 6e2 wۧ3o7OmƉvA^œU4w髽]ַ$ȓ7s Piv<>& bT,e5S0Uxf@ZmecQ1˷`T;lA/[K׿> stream x_LTa 30QT@D#)t7ҍ1>H AAvʋO54ƒ/ƲAvWnpXְ&Mi|ۙ2=ܜ{}ρs[-nIWZ6E >>\XZZJ#0 -.fD$)1Ǐ/+ku?h'_pFFbffΝ;& BX^onn.ZZZ$w<$fgg?cx15HEEEMMMNN'-T.]tqF[f ;;;7# ‹uCrP|E-脹9VyJKssVEi ڹjռXTTh F'B[*+BMM /S 'l&o|Ԣ5$ 9{wXᅌ{Q+0[Tt姂j{<_%~E -Y2' Ɉp+jPhgkkҲOc sMEWZżcWc.2? S0 SR/))ڱb0T666vuu!e S)F/ԩST1Dv+ XUUCCC λwfVɻKk0q&&&$r F9rǸӮY:219ر_UT(޼y4 gQ@ B JJZ]] a^X8f/x}xrؤO}IKCpp3uuu` #H97 dȨqAp-B~kk+l5fE&ҹ| YoqV1<⵪V0L]d1P,,,&t0-P B:t V K:Tiˎw*F[rl~ьoK]={ )xǎ@St%t xMT" q0H_a&SN)9:AqnU:_n}Ixb6Ad/bS0oA1EIy 餺yYEO<nW]}Fu(5Օ41+@ih]v)`5>ŷeS-hyb=֜s[T#_KΒY S*diĺ)b",kM7\M<^:CwPh>?Wuuw *?|unMJSFpԋ&LDsDZa@ ,R(ʃ9ˍ1f|~w[KR K\*- }e Cmx*ˍ+:z{{!/nȾuV^/))Q&\,3Y\>>HY/S#sr>˛ n jj>RJA[,x Դ )rP$}> LNLB1(#dYLpE+d]VF"R΄١d<Մhhw+P Z(ohhFsVTP#ڴ"o(y<ݹOa?+l)jr6儗1[=c|&ڀ \ERtD*DZQt^Yzu\\uU#m-o,},//a.T<[XFB0'ݺasݧv>kTM\eO jK'$3Ç˥ND466ZO>@gTǺ: SF}}}8:^_:hŕq\#37}`*۩Cxe]\ u\Dƫ&ɺ$͔FDž;$vEyduzS%Pvl!X*GT?ށٕg(KxĵB>+UD[SL:UBoox0Y aA!6xaɽHS֭SFOWy$Tb$BQBIȔP+ltx%AhΥJQuI;&(1B\wP)t洏9nUJџfG煔6tg8&/*<@2&Ck9s_K91ڠvg6=m 6=b477M~&ѣX}Is*OnNcr?麺غPh/\͝۴9e8dI 2#ªYӧe㝥}.8PC/h4"o|/mRgDe!jpZ:c&RkXʽ֒:XJظq3𥔟SrrX­m۸X-_̰p~0ۅ|1^zqG 4^\9ܜYo9Ҙ6`,լ߿:7ofbU+F-JV#t>?sh%pp,9cէVϧ¢`J+ҨhZV)D+Z =?YW%[-nq[0pendstream endobj 301 0 obj << /Filter /FlateDecode /Length1 1134 /Length2 6050 /Length3 532 /Length 6787 >> stream xg8k %(ѣ轍(Ì!N$DOh%Q%z/т$-;gt<_Z^k6P"`7OA0@DP[ x:!T!0@ %dĄeDA S$ Pr!!n=#0F;<J..?Vx`07 *H*":{`p'7R?i9 0C!=|X@V"HZ0 QOq#~?#^0$@!j8=^-OC;y; .?07߅`OBf*`? qr4sEb!|‚"@/SsG@ܰc!. ?R|`I pr|0_b!A7'v {0􏦊?Z$BJ&IQ_E!0@4ID`!Vo C>[7X!ABn!V7BB@7 V77;C?GIY ! Ƕ- yyWN|aS{وioG=X3#>Ef nҌ!M`?#;R A0JcI^[י7Y}t}bc{=hoxRwSD2i7׉_AS7ӡ\kV9ͺ)0o ;|TGGǴWSLs_g3s3}IEoO=L6ܙ%DsO}W=9{{sJ-+uNw!ᷩV=eϔo)^4ɶF7n&].ı# ſ_c@AM3A"/kB_>4cq }x#5AJuF㓩M߭eϔN|T%b*/vnZS[+]\ ҝk,ڜ P/{{ Hӫ)":'U_Pko_O׉ܠ~Ry#^r"ܾ"If`cGg7:CK{(X cu$yaB5c,kOACw [ttݗoLUv5ZL֣{DV+H0LާM/Byn&[\Qx3K$dctdǻsE︒t<_ƣBP[:q86|ʍ@4,|ybvtOWJ fɯ&O½,25uT5:Bg]{cK5˗Q,ǃHc6K M2?梄+ EV\r/Nиd!p(0߃fA}#٫ŷ4yLlK",ռ7g{{@T3|"#@@r0,/)zzŔKqGu17hPQB(0-8@`F cpQlg݃kvTu%?@櫿[IK+eK0ѝCog3|~ N7uƑ:6E _s@i'I: 7֞oRX˸ #k9]\g}!Xbx{c&_vֆɀmϩ iזJ_I.-{AYK)=37 LM^s lwb.P~m*o55|.O~Xq+xK3ι$t2W+)c4\5"ƴd1z2D7)T4GRXe}N{ akȆBϻE?{vy>48(3n (k'*~C{' Q$WM rE[Y{F#u(wW 2yxAҗ-5#t}EN @"X }Ydv,cB-d"r'=Eۙ-%4F$4-@4dpAA$[A%;e.Tկtf,o'LJ*rcy[;jYtAJ(7[:TB,8b4St"?U}FY9hJyv.=Գ 'N֮˩4)->te5уM5:ʕUm@sf˦Y"c bɬq>(1!XLS&5~F2ND|Tתm_"8bۗ–QXVԬiVA=ⴕW74+n]tTȶKo4n-{ )_+9 $2(a J)o"6EfCl͢6X 医g |!kd-g*?7*mit$}Nylqbʒ-T+Vm/ϏyB Tk1YhpP!eA -T,ɑd4A.ۓ#H*뤑fa,Nfshh>ZX~C9iȳ[M `&U[8$2rTXUS]6+r'\No8') GÔ_LD$؝qA6Zn`[l1l=xcy2@t/6&SĹz&mV*ӎ+IFQ\%P2`*A'`yy>cJdXlz\۸L1s1u*c"dt )3 GR3IkD[2`KknaefKSVlߧyX }MN<-FV074D$oKWIw~)[[0!ܤ7 UeǃpC{i:%!$#]Bz9tdtޘ4qN%X.9.ZnV H"‰6ٻ?cÃh͘F,xMJtK7x҉B3xd`(\$"vyG}uY&q؂ӭ,G?`wΞZ.-ckߜİ n_(<XFp5{G}}94챎c<N=J+qFhX7{Yn=? 50_(al zfǬ>s9]AJo|^`LeNŨrѷ [We~&w-$4#ׯO:v]CX=8 @˭& ^곲dneFʓ j"'{Mk>ex(K:Z90UذdgGbx/(|]O.[W:aD[ ^L3~AgZ@}dFzΗuRz(bإzMco3E%3~{LFQF\Qv:z?ŋҥ \Y:4Ia6v\~s !৳cm75C!r-^L7OH܇@3~W5+ULe^PeyƩpI@XlxO%C= |AԈ {Yio]UmQK𡏵»Z]%1(AR*̋w׺m]SH>.X 0'/@6;.-M곆=fQ>bi aj{v`/ˁ?e?;moҌxd`\~+-G5>76Gy`3߄ȳWTՎ͏vAmxZ4.0k:*RSڰ|$nOToʽ,bn:xZCYwd_Hi!|#S%;$TOKݙA{'9ٹWl1jli\(ѳ> XL~g]vv$~k rH&Jѫ~;r΢]CdO L8z !RMgݻ٣ TA`(.G=֜ .#'8uf="w:U?+Plj`hB¤r0{2ϩmw(L0D$wYeh''` G`qz={t~9{W ˉ!E{^zKZ2F~(U:[kaU6r(uxm@w8JM^MG2C.oq<}#~WC;. e~Qܯ|SJXŢ&~rp=3JXcgcK^i%r3^vtX99}VJIE\KS7AG?]R 7*<5$̉<厄urQ)żW}CctsQpSVkȘ3Wӭ@ʆC@{sK@(Al qHP7!W>^zQuN5,O;׸)&HE.gɹK.0 CbL +RSNZ It)d۴WKjUmպq c9ЀIwoIÛRjY+d >xG=<7.$H}>iC2>\C9ƊsID;)F^W_i1fY[kR>'^#Ylic d)_ tTC'?Az"\!Hg˘endstream endobj 302 0 obj << /Filter /FlateDecode /Length1 775 /Length2 1295 /Length3 532 /Length 1853 >> stream xR{<n vui9 #|t,dD2ͼ̘; '(jZv%Vaum9)[ڗN>y_sy}Juc!/X šq$@h8,( bH8!HnH`ԉ$${"aaɖKժd!aC|4Ɂ$1h぀b! bQ8`qpPUKނ,}IĈ)`E,,q0cWۿË H(0  Kw(#寯zK<MɃ-{qʑ0 Ck<$`dfὟzl~kU*#! _øHQ/h,WȺi&k'HĈA!Ax RHXƠ9\A,B.o0Tʮ m aE-Z[;<2K$"1߄LH $kwI= BbL[i e.Z S4(pBO9jBNՎTm˛aEGoGäMv gi+Ѝm%ҟ kf òFj8btJO39ҧqe6 )= M qyj/\]jA?J~`7w@1qkuR'.EkO>q2 }{xE2zh)ݎ˯k#̤VEZZ -<Ʃ$-jMṮKzy3℘^nҰKd^hZwJVnEiQy.xpEF۱B6f5ݰER{ NeՍ9jܩb7MMU:11\h'g٠E5aYVsġ٣X2.4zQ8|9dVys[&˿kl* xEd) ~ē-D D:HV^\ z9 .6'(|0gK#r^}J=ycqSLҬxDzT2 _'{L P?*.U-V5m.wawkęVtA(k|zT]kp؟k }=C:ko?:8!Ko0ճNosQ$SCNuۘؔ^7<Šm>%d➂k}3x]g[AB(qJ$zQ6uGjްr7_:[R3ś}vGwOVԐޠRɔSLs廓Yc~a 3ywO&VWo,O=E_yRY$)e_L k:A :obbzҙ;O67MߣdM]viwV#7yQaǛԙG5B{\TA4^mhM+W:4{(PާYoۖ0]+nZI@v V'1%qgZ '_̩OD_`_ ׽tQ..ky ZaoXrn> }tڕ1XHF.nZ"t Ss)H?B@IȎ)dn?T$t}7gd9ȍ8:G pvI#1r >{cك9!ѝ SY}196/l{;ű._763+?3ݵ  '0yC$ ;0endstream endobj 303 0 obj << /Filter /FlateDecode /Length1 1029 /Length2 3780 /Length3 532 /Length 4480 >> stream xSy. ~}j};$rUX|$b =M1—ۖXxo99ͩYc-di&⁏B4䈐ڞI'.~\_f"=%lxt[M0g< 9^?Tl珯߈|U-y[F,W3T-D8U䠱:CǙ ]m ϧ2JOZ {\0& }ޝ.ېAU lPzH +O*QN>;kyˤqZ>6 2yl Og!;( qpV2<^j_b%sn _)>yskq椞G/@bK#*Jp{wUQS+IףTVݜ" ΅0Z߀>be/>rn็%|G5(bvph(dh22R;Bq!iϕ/O{!/8 !.]A[:%frJ<ϲl9 C)䑢ϻ&)w~Ҥ8t_`ftXH\{RHAOb3s T2oav\P_OZ/UL)Õfe.}:oˉz)nY'#&\ܛ -01ƽ:Pr{ZV{Zk{`ag[U0;Eq\4cS+G_/${%uk{/v-R_Uݿ t]*l(RR4#奃sU*X>Hnk :WeZy3J5Ml8uP98S{a\QO bv:sTe>㑍 ]{V6Ym/p^rUULJL.5AEJk?V}U ipI^k4/6 &PNUp Gwv^5~j{!nR&G?,g{'F2WC +oxWƹ5ݞJ;t"8N>1Sj і93UHa wݐ@ҬW w0xWPWRlPmCc"S))gϬSorV< ƝW&Wᬛ%'H55Gô- nWsj|{"T$x}m, v"`ƾQdrWBG%v~ A䃰wT6ß]|rYhЫcpyqӻ!EdMy|ukL}~=u9|zdQ/NޓQé{ltXE7їfUu.]{9K[֏`uČ){Iv/{ M5dF2=mjKaC]2Sr/ajj0v]I;h_WY{h:",P`ܚ)qo2-Hfzy1mVl ѷ英;uj9 } xPJyK4猵b}·L+K=5\2vuXG mDR&~498`,Q*soZD %'v #'&cFŌ^Tػ-nĤtiԃEJ_~tf+;Oo=Oǵh$2l'[y,>r[cҢ~QXډLsfND( (eˆt2M嫭N_̤p\_8'w' sS%ߘVt?ϳ_<Ԧ[ƹj;eGK94viG_95A&oo34򆹆u&׈}';@5-ng/l (M .Xiָ8@w9:onm^|4Q)}qv@E5eqO\ $Fl1bIR'KgM֨Zse E9뙺1R.O3:suh^ڶ~w+.E=J{Ç 7 ƬPVl >Y˽RӻqP{ٙe-[4B-%WPS{rW\"7;X/C&oVfUq%F3c:LB7v=ρx4aQtnJ@Bv(qQ޴WobGclEyRٜϩGc#βC&#(|#RI3똿ұ^+:{WJֶyqy֩ʉr%:P`.} e[QM+6 ]X`PݡNبغs/r7D E#ȝw5HeBYWHU.N٬iA{5I&mIt)J7)fr1ɳgc凯*,%Ў^i먂h+{~{Nla"n˾enln-Azl>}AĢ4c Kٺu|{8CBQ鳧9i·j=5򸽜I׹ˣen]})lgHzk_{_t `.lc~0乕L#dTm֖Ņ/Hy33Jީ[4,qB/Hە7ɂёJC6m7J'->Nzjו}G\J57ǀFE_fzgu;jsè~v1/'-Zo&f< zIMcn5rrKUJg&/L<H<ɺMSR"&vvxMCWV޷)wwQeNS_mCBSf%fԍ<:׵Ȩ==diF`~dNʹ +v5e_AߪW\ȜJDߪ6zUOL&G6eTYjUtl{m˜=%?aN.}7_Z]nW؁_rbaMJ-jdxUfHE@^(T70Ƶ!*]Um1/0ph&yBwendstream endobj 304 0 obj << /Filter /FlateDecode /Length1 889 /Length2 2612 /Length3 532 /Length 3239 >> stream xSi<"FY)3WJv:%c1,B#k(J,wQwg9|:<__u_b'ͤ4Px{P#Ia*)IC!bbZABqa0 T"h]) :]h4 $'ЅD`3< (Ҁ ~_ALA"HpQ @$tD 2\9ş4G$qzȳ=" R1I~5%c[RQǻI 0@W3B]~^!!h ПQh p@`ġ~ Aoۏ2Wn\4ה1%cG2i]ОT х/_!(4~@@P sAG -ơ@O畑I%%>>Ly%@ $࿳? e@ P@W]FNLhR2Dtq1p@4dAx[R+ %KO) P"#8􉂠' AjiHNfW>cYaIMo%[`aU7b͂$ambDI[lbLK˕$ʴJ`3Қp=c'Z?M+Z&a;iʙ&:ppg/&e|{3%-~|E)p\G["2겤pTۛSd.tɕM#:/D{?J| nM۸)1e]]TgOov635,m3<#?u[㰴q.m:}<ҍa;e'qa<] el_ڡծ ##&CǍlg y-D0~gM6ՐJe}+B{v75.#O]h-KQ:Ǐ>;3qpޚɚ@kLIڭEA9\.,jK"_ƒ3,]U:weSJf6;=x2yLUyV5|m(׫Lv%UE0~+KJɸ:lɚ [HmO 2mSl&1"ݸurFOޥPrĥ,_D[{n"D96Q;̓uD6=BK}:+jE!B]_r d5:kQa#դ-' nNJR܅}i'<c`+L.#+T|R}S%Mz{'in?ļ^f1R'G=$T1o ^ft/ϧ#sg[Γ7>|G QqZ'ڻ*`QE;-'M?nI@ީU4 38M'^䘁Ui==&#M^ex팋=N3MLXbBT7w-?Vyd'Wz[k\gW]%6B{y-o!?( 8CJ ţ^Ϫ9u|bvGJ8E˩7ͩ;Q/gæӫqɔ]Q@V:=掳x)|VgmZM*oXPaX4R:jY\#gX~i h1|6NlVNw׭ UD⾔ro|Sͱ٭^H'3]eS,Vqqj%jy$OQУ |1m6EœcnH%KxcӣX!Ub8I6c;MZȎK %:[ZO8JPHq KeNybKҽAE9:{_DYB$Z鼷-|J;S*ИFD@N5 {QF)@5ڂ{f}+Kɓ@Tlfz&_Z,0`YDPRZpGS*J#Ml+sj"V?(CZDâT?aւso?m#9+ tQtܢ Ce`P4V{Tt6[7>Şg?vWXB> stream xgX۶Az M:!ҫ{ $&- ]"&H HHGzz{罿s?ys7c=.5DŁrzz@0?MB@]aOq XL AB6jrq$q6V0₮ac uB>|@gg_3<:bPb@ )\"mn=R@$'h sB WZ,tvְr_MoV.PgD@@uW[!P*#620{gC Ղ"l80fd``ovkL CS17zMD߿Z0W[( }Vp}0$0[7\)@tKvp_)(99@` An]r<<A6ή +& $C"@oBg&t]߄>t~ ߄  oB4GdЫ[&t?Gk` @sѽh]?@o` D[h ? Z7F ?5|@BV6P(lwu>xBSa1+ C`=ЗlEXbvsL( {Å)kQ>z7t&ӹhP՝k,xg#_{D{|͆FoqϺН{'Ow05J z{wr8m3atzv1RpGW?`!Is/D-x$'b/:\[d9I7(kxY'͔}քDF~|h}|XgUP9dO&we'_:mG>Yԃ*kP)]ϫHl ay&-" "[p9n.,WQɼa9^b&a$?IhɒMI<F7nz'm2{3sR=Id&&%ˤrΖsLeN#7Kɹ!VGU: Dpg4|iDdeK寪cI%Hj&$7U(KPc a RePPu{Q`O ԋm tf]{s>Y'nkސ0'~;PQ-}'ejIG Js$oR,BXNM[KuKONdzQ K~ u2DuKa>Gݣ Mh ۓK]DHjm3W$%#m$hZ*ShY^5I7ZPbSNmP=]^wu:NY/TzE:_r%*%΢p\+(#6ƙ `H2պqX_xG"m*k7済ZK gJ/+wji1;fQiv^(Uc(qyy^ˊr&jMvO5R3zq&q*ᅮ'[?bٓ3 oţ% ?zufedѴwRaZ;$*E{q1^RPLү2mupMjm;d ;+t)yhU6r96tAV'=v{E5@߻'=(VC.20D=/pU%rPky(D7#n L8R|F\*W-^WPEfshQn*r}or͕>`S{Jh7EV`mZ$u7~ܺn=_ǿڸ[4ӢY wKGS|U8$^D@6[Heteg ugcu4L=AM9?N"ٽ'Agc4f͠|lfJ" y`p8Pj2lLx˶#?6@n ͷryw-`O1ؿX_):D_aRy/;ˁX|=n+. W&kڥ ֍'? ŪP`h*OqRbS׆\Q7ªuTffc SWuV}U[Û(-KyvuMe=ܶg ^֫\s9i1=1MZbkbc$40Wr@pij2@.l6f XiA={dV4pIE0ĭp_4!&ǿeF' l0Jr~KkХ\[ZӖGQBۉ2XnU&ߝyWG+t&~FumX5vjvyBC>۟=j83!Uau|iƥ7#YS^95A`]ۃcg/86?vEcJ0oM0idxi\Bj6_;Я\5,q3%Q`\yLXQlsYL豉AO[eD{ޫOnmxͫrQ|[!IuWUוV}٤*ecovY7 +~B{&>/XnLy7[pT<(#$jU.4UGoE:vo](jcCg쎉8Ov9rکgB }jD` NА*1Mr@[ .|\;=[v&$D]95rݻcd7=i)XE_T!:Oc`ߌK؝'1({ײ^&J56&- JB?TՓ22-.=HO*xmЩcVsAWI "js#.⎛C_Y91͚|YG l{8&hI[CC룄Jdm"+KG"-w؋R`{ _͢Imߦyxύ|,+ȶ(rNȯύ̎qP;?ge\j:5WNljap--e4v 庛*]ǀ*?v|yBO,tdtd(3|WRn0Xv>rn9>(2ATJZv`8@W ›,D"&R$;5H,s‰/8]~{]uvLyDWǪЩɺW4ILy{y!'kV zFДV^"SĈRp"ސW\=+Y ߈1볋\[FHm];Q4H TۭR+Gwy1tzGiQ5\-^Fڄ wE~#U#vX$ ez9CO6SP_]\'SW GVS8O,Kpn|BBgג`#*!J2WH .X>}v.9DVXZ4RĪm5~ι3/D6J6%p 2f~tmWzH)ȟh/3~z's5[<ĴjtlV6͒@\8#桿_Ajm_S)2=0 =*6`xf1OG@L[0T?xVz#eevNǑgÒCa>]8;w21 ,2ۜO6nd^\f[NM$( 98s=e/;ɸ=G@MꃽoNr FsdOAY\5k(w~<| ZǼ[0yVl=]e9SgIuktHL.֑%c# v5K8vZW-Ǚ)dYCR Ͼ?rr1S ~nL_ t;{x˺$ޖy `UĦv_pa/ߴg%o_G2 ʘ%i-Ux5|bwa)^pFEwa8S6g?9uሞl'aCH(%g_ N(L˥yAz!'|fnyZlZs=1äٴےIl]sI[B!;%upۺ`3b`җSY|7jq P[%]ZLxkH.ސ$jUx^DuM[#p&pnqH!}4] q37MmlҕrJ5t$\F ;|<#xQjQN51LXm]8c].mra4di{萅8tρ\!t,d&9B73GGm"T1NU`T,stTn9aKާ˟RjؔӊwFZrh8bN?ʀ.pbU)+p+\-Mњro?4ed%:OäHh5NQKk;d!99 ĢGz茓!vq.rF>SVN+VO?J>28۩l[D2ՊSY0c2Ɏ=<壋A/g>\DoBWlM"} ى"WV 'v߆CEl:c7dRL~t##C9Q#/e"W^.]Gu"U}; ^+5_CMd|N,L` EI%:+k{,1 ?h FaU}n+hz_,mqoҗlsZ(}G !-iv?fı! ht 5 &ҮW{Gҵ 8IU%ǟx ڙc4]uB̤O~*%T ^~ʍUM IjrMs«endstream endobj 306 0 obj << /Filter /FlateDecode /Length 3914 >> stream xڭ]s~Ṋ*~ITg2mrII&s>ֲ׍q__|"vZ  SUSjWjSn7J7пI>|jMJ5eue-tGy/nſ.& U &xeS,`F 6EY9C4~pպ|Uz޴s& |Mu?&;p/X ]u6Zp`|灨t2ryO0 יUaw"b\7;]e xD<.aY[YNqlb{¶jT8# l{jl>\40 qdc{T2<܋XX8>ϰ?>'0/tq6880'M+]#lˏmwC!Ax7YW &Yp3eTs0@ԡrs4(mR(jfVXI:t@Z~lIC︟NԕrR{P}tIޙ% *j_,mD9 nQ0;m7&>bPLZmLR劑ҳvH ]_\lZ/#Y/e4aE.I@\@{r`$L! 6JҔdlG-hmX/x(H,Z<{ ?B^cF}yS-cyڨͨ~/ρB)0hsBЛ#>_pvm]/l.׸#D'ڳo:>2``Q5+i>${}9BۺZ?XYcn2̸j wWo"X&_-lhQǑ y`䝶>̅ ڰuvoZ]Rw˸sO01W'lY^6d63mt#q,аv#vd\йeu!GtcE3m7`Ǩ[,&2~EgeM[O,Xf"N@]= b{w\?k.fx\kfH7*k"=!"o[ @赂M*- q]9NA3$׳3QBedvYAW(Q8 ^0 |~YM/hnbN~™:?\AFk!ln,& Z8c40aXN`vV ezq.阳Hz[Reލylޥ҂ImR"96͸'wޤД}+pGq8.,= ?G [}DR!*RV*ZiOP< 1Rj^6 H&I@,{Vd(39^bB Q^M^,v4`i8c>D"f+,rPϑp _׭U (@JOLs˞<3 wbS;VPWGRyl(O%6F8>KP>B _t' =! l3%7C|;0~ (t}tj }H!Z "CU y{k 4dst -do:!iڤ\YPk,kۮi3nXnh߻Ia₤CcǶ|b!YC24 }NR4*u(uPPK<<0d@T+@0".4feV>p -(-\|RV Yg];Q\YcV]"Bş,h?e)Ah =RJf(F~.'X~V /8#BL&fS $Zwn y>$񫂸7ċOeCj? i?Dg,X. san0]7l胎0? <`?XL9( (M*Ԑ-pİtb o#ВChNFNepjoN]kMt8v˧$F,^pLZs՞21W9Xj/ctdvSҺ0¦TɦddS¯mri*,\]߱MK0sucm-TUzwˬ85{ǣPæ""zuf, ' tUssmjmԼhϖ-p1ɱCO]Q6k*kNF(qЖ\Sjxy58*~TsM:hrbVahep.)뷡AdR͇ K,&8Z^_U鼪^v˦RMDYVY7dHYԂ!XIYTd@/U/PY;t%R%(}ѐ:2ܕy$ {n* v} RG)U'Z N) x;MqIVNq%vx~zjŁ1|mwu`DޚBʜY >Cֶ1P$rfIW:־F5:Me9?$-QM8SCƜat)'%x.9j6 8(@Yט ,e!,n= : d+3旃cHjFz)JWB\4yQIRRYS@kWW88fX3GWwo1Ǥ~N' 2";&4tQV?klUtY/&e[N7nY6< !F9qg1hC<1L\.!A{C5S\hk{FS( .ӗE0 msaߌ0bIM=%V >͹HZ7ŔF/%abj$*u ͌v؜c2t9HKV8WJ{Ou:ۋu;Ct, |2N9boBᚫu ЌGmTZ#`T~d"}?I& d7ьneg38o4%ϣ=O:I}~fzGI-4,l o݈wn{, /yU>OyHnƓrEJ`HtH)P ,c|}'p $4tr[n'%co?^[ÿŰU=ޜ^sosc;p̵@WM;2뮝 D>Ȃ!n"x;0!Wm&H7>XS8rey:x_$ endstream endobj 307 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /FlateDecode /Height 106 /Width 75 /Length 3453 >> stream xOUIEEQ(*I%b$х ]H1bBԍ.7խKd2vL 1Ŭw2ɜܹ@ WTݪ:|ϩSU|TMTMTMTMմEFmZfMCC={:t\|__߶mlڴ)(OZONN*W𴰰SXȧZ$mhkQnaeipp͛uuu>ܰavttڵ˗/D-h(+IbJ>GDȿjt;w0p YbJ <' UњIV!TAvM}G) Ł%zݽ{y)yYZ;::J'y*oPBAimm} GCo߾sNsM3A>(C [ĕiaO C噒*gKK˕+Wz8BUP ?-Il#K$$$%?E`)ZehExAF9L5z0wdEfZe8O/_r~"~A$VVB@j XNOACG:E:!P:!z!+j1A۔aui9+W-PbSXDBhd/LPƁ?FPB2cccL)2y ^`@G[/E;pN͗c|HE9@@^5ûL@6i߿FGc*mW+ZV藹q/D2RFxJaoHJA*ϔWL{'w4dK/'Bbd CENK|@[t^oݺZfEZ@l/siD8?|D:͓ɏ0LCʫ1R3Tޱ.C/?NӧO޽IH&@`\_g(ъiHΝc+0v>Pvi1mE2/Չ 2#=s OWq`C` ( =*Kdi, Ka0(lǾ},*Xt/9[BoK{񤤹&$ԟG嬍,ݵkN[$[%E ! (GP>" ;  )9#)2Tg$}wȞkMx{-ʅV򶿿 $O+kk#_G|N%3O~nOz+`@h9/\RVś ԊO3̋/%ͥO!O ԀJj#V 77C&2Pjh.9+"tc螉 aL*ۚ\X+4%ʛK6|G"rd'WoED?)n0LmV'08^Ď6]4}閖Mi 4'~,@c$B3qml7vD`[@;zskY31|q y>QHx∹ %̤|4L&~LdϞ=ەOJiIm5<4=^dH?yd^eVt'wmh%6Ύ" cZdL~{HM4<đ,h~|E 7%/*x0TWE "uP4pQtaaQAJT{D*i5ձ@()FU2ױٰ @{n|+ќ,yfAr]B-,'5 ’g*=مeL<'4Twb(^u~Ν66MG]Í޹=<σLN.ov}LG+?x42 !r`o8L=p/yijjJ D@f ew"cI&uJ){ыbte$֘=^UȎE<?Oߩp<*Yٔ "܌hh$477S[ Z-ӧDB*bP -:X'W5,%oep[ ئJ eWH"-PRO{ 6haxxY&fwvvNNN.Q ?8YFcSW8v12밅poq DBgkx_ƅ$V{ O> stream xI7 9N ( 0 ʀ.6Tt,RU8qh&7mbӚ|rUyf+7)w:9sxZ GG-<=]m[o`=mqϤ(caP7s 苦s@4i=y9MCa,<99gc91 Oe w,EHu,ZXyԉg嫜Ԧ6AxX Nb4q1<҆L`yf,8@=DxplȟU4u4i]ɘb-,dְ tYSF6i~k fZNv"g_2mlx2J:\|^_QM"BK_g$I8 EjaO69iLc|}֔\}_IS%HNFAB>N"?L~,~b<1:0YYʻ.a2?Sϴ$)Uyx G> stream xXn6+L7  IgPJZ&=W{4.lչsY19  LXϴȘOdL +LS3--VQmGOLqlF!ی9ßSԗy+-=5~a0I'(J˴DдPhLH JҀW mGY4<)55lAV×C"4> ?CmB GePV3HA [R6IC [e(;nwpaɅ%Z($Vth 6)E #J)HC{e5THqTYxr8 ;D; ;O#@O; {9P Tkȁ"BȠ*=?J[ C2'sRi@ARTUdCJ"PȖ8s߽x^gŮ/gǺo5z|j˗Gv9=^7e^=5|zWD`*`ώ>'_#n*@ ke4Q1D#`Y70O雘<$K+ں?()vyR,+(QXkdiLN"MTӬ4crf&InTQI4zMRuk,toS4 ԑJV'TTkHՂto"`$I;eUo5N*d,ǤN-ʹҳKhvaKrF)uZy# SM~ߴ=e*_czp^扗D&a]#͢*ekf9QԚi]{-ue^}슮+ Wˈb]51dǼ+?רZؤdSN dU5طew+v#7eHw"{tڼbET lRwiJ%^$yfW~pޱΫ\Kv㷘Z6F+׶e267d‰\+6Ib'N^&I. mQdp.E![(3PX/M R`Xߢ] 0'̛d:Y[%/7)QVE_mS)񼐎osz's,̖g_55-db$nt<0yMKěg<2PU_52NPzj8&KhHYK4wt?k))֏cƚdBGV[.GTq:E):xP:EUΧap%aM.6]Nui)&ؔVF.dUcJieYZ}+@(%05Ҩ(BjJ--&:L!(?a*=&5^+._uþ`W=:?ށendstream endobj 500 0 obj << /Type /ObjStm /Length 3181 /Filter /FlateDecode /N 96 /First 887 >> stream x[[o7~c"NE\Kf[a,idWiwF],ˑ4J199LL1S3Y2(߱`LJ$SNdRȌI*gtrk$4G2EMǔ ϔedS*c*$ӒzW 0RK=[ev#aՕc&T34`uƌGVKfL: &F@5k c a4^\Vɘ)EUsffZC1cl.Ҵc$<Q[*l3Y$`|XXȐmfAb2FX@)0`p`X*Yle,jZ 'Y$@]F9EIΰP:bTTA$ BK,3WGFb=#s4yRTb"Q},RP9!]'e$fޣ%i X@Po3l>T^}@k )PH0P<4{@@"0 zBOX{JEſ4DC)_A"чOL<A7Lϊ@)ۢfĨCzeiuT5W Oaլ~"'/ϰzGdt!\;<:v|\7^O-f:!p9s.5g(^T\>m:qv y ' /ss]3N,:`q]He&c}Arq^v= Į3Nsa69nj`5\-P|wI.Q$.A^- @5d:YY? *wt樬at_y\C壢Y k~yzPΎsL'rn5 lYjYѿ*겗3A:As'ی=5l^?]n/`T2kJKHr0c.iM> 'fv4*8W&0W{~񙉺 "  fk<qRQЎ┉a,/jP@zy2$On(ϳB^LcW+s mZLAYRiӑ&OhuV|T^^bHe~V*<  H< WGg0\&0y),()NWb(Z#ŕk4BI!)4n1f'O~cm!\t[2:UG5 i, 4^ B;yRmw/2uuhB 5l: 7rCAwSMaj8\ns^tV`-.17i1YuKNȺPKn3 .ѝo}1:txҼ .4i+&Q&6jrb*vDD#!X!}<].v{\5x̯$Dq6Y$հ1ڶ&jj܌T_WpR 'X b`u1\5wZqBZ;¨$kf t9f(1L(O&˜vͣ3-SZ1hd 7k;u~hwvܘD<'>u䒚Xd;aMXȕ"6X՘3 _V[/Ӆ0ЮUCl~ Mũm_8@֌S[czAt!y"$4wy#>d{S3|:tT6ķu(8L">t F]ջzfa]#a#kw#WmmM6֦mSS6w[)]1Bx;6 C`Zt\kX{T ;:J jq=gx40-tB wS #jXaVӕ9&LAn78hޖvܝЄէfSTճ[=l ~my*m׹Vr"XEXݙOW$}` v%ܠ[oд|{i<u=1:pnW=Vk]]@TJqkR14_p|(EI1ADiށ<8dxǝbS*p'^`2$U<#2hBXeSUs%CxNn0jR(N_Qlqrbef'V>TS95Nf&#B2!!01!2mɴ!'΂L{2EB2=2*292y2m2Ewa4 K촪jHzǔ}Дq*"=R sD>@x_lO]OW k("fӆG:camృ@knh[)Mw\%t oA O (d;ˇB\__ި,dkr  fTtP=3B^5MbБ|~r1 nukv$㎛L_h603\GȢ+sMnѲ-k2H&7i*-k҂fMkD/}7́y8b`Yj}h)X|]1qA;~qNIFvg/0n{u/47{i̜|3~zG>mb4#`3%; ҈;endstream endobj 597 0 obj << /Filter /FlateDecode /Length1 821 /Length2 1985 /Length3 532 /Length 2576 >> stream xRgXSiUX@#"p)! ! HA%@ E:t Q&R4AzA##U@ יY}~y<dwЈBCj6PCPA,@!A :@uhuD8B <$MR 8,0ҽ@G%D"pr 8 @A 2aȄ -?J9&213 8oY܈A$Z`I!%"PH> :H) 3͐B:H= ^"ЌX" ܬd-pR4337v5V͞@AĈ?1'*!ps~94G 9T*6y`0LLsDB< X& 6JOD B 4/C D0&{2 (J 8V(@7"Ad m`Pp/] =v=Ged})[Mf*#EvwxX2k`LlorfW'v-GN,ZBmiKgvua2J!-~%WΣ5.OEG)"eO͋шߝPm媃(U [n}uh=l6s1P`a5eYQݸq ,{kC2G|•.tO}w]c:-k59=8TRι-Ӓ9HLz\Tim;qH ;&oU[co2D;Sl/cG#t/{BL)WdfKk0Tӯ  Ĭekg?| P^t6V,\ZŰm2mS z"IO^+̿KxApٖ sSHۏVIL__Ð͑mSc 1gزNt5ڭZMɂ.Эtsęe[1.V`\Uz]!~Lj?S-un7ԢKn[-e%5گ}a>ۖN#)f|N I4 ~4IW)p. `/z3KL݆) j"Kylx%@v3>+֨}S;y I:Q[,;;]MM]O.L 4u&M\y0Ǽ[֛TG:Idh~xW[v/xG_ໃCmD-G7=g?x-y`!\_ybO?N ѥ<[9 XMÓtxW%wǪӭJ}}Q]Rw湚7n-s@:RȺ}K|^+R3_^m|Cw (Vg]2VEG--1!M1b|cWU휗 J6 f4*8a½zh^L͖ʨKQKQVo;E>z-//??iގ+]oF-3׺4YAfׁCn))Ge N|ܹ:J+J)(C8Em43&Nދ[ KLٽ֨BoIWl( >[E/)< ;UW62Dy 9Hq7625lS.)ԫbWuZ7 LL ׶#?Xmv.QWWeC1ɘ{|R.!6!DXFرg%=r97i^L4KSFTDp d {`ov6O:3]7T,gd?*`jyg嚚,; lcAG͚߸c1^ 9psTFG6kGb^ԷZ@c҉aLxKu^g|O֭f):Z6ȕq۠$ z\xΐ= fssGUmGxi檨SD#KZ }>"F* j C_BGT:MLendstream endobj 598 0 obj << /Filter /FlateDecode /Length1 1313 /Length2 6733 /Length3 532 /Length 7532 >> stream xe\ۨiuncP:E$f[GinCD@zOw2׽ukZ  y@ 8`aw[`PK8Xe=@8(sqs9J:-@ K=YԃY;>@Y w. @@k8 lEp!O;R Ȏ"m`Pl ӄ!#MoHiWydǨa.pPfv;5 GUkY O]lZBPK o!mK(\埢͠ߌ썛7ЌLD>_K)Ba6PqZYI@?j|P9lIe \=`& a!~  W_B@>k0 AE'$̒M@>Ho&d?$TMHy~[~rYD| D.j""w"@DZ9F "` DZH+?i" DZyH+߈yH+9980hJkҤG29%ct${qR?4N~(S$銛cOO94$]8'Z庆 3-Ay|ÇZ1R{NBtGwKFG>;cZ01/Y.m&q K/:}*Hk󣊴d*MEfuPZy( u.Ķ/d06ӛl6#EZpDRxx[8s3ӡ( 5k iSlV)5HRw= i:əH7u#l&\3Aw݅7hG_(_wcYvIFM!8S\YIa!Uؑ Em6i:“j%|wloɛeKATcEDQǩ*,.rlUKy@CB3 ϯ.HTH-3&$W:/vJ*9?XR՛0-չ\Nns28 X:a0E[)O/pP"T7Z%Rܸֈ?1PķW̻l)X},^);; c%7`jNOc€sZ֭В@jڧ;MRf˛GKSʞW5ѧu~)nSO`ן+/喊>%-Wjc\z0qO/?xzIk`A\(9 nZ\Ç-M362㽮~]`-7E7J4b $Vk&'aect\U䛏46t|AEs"l.=.,ܖ['5nrf~eni0~LXC<06ߵ9pHi`7-xK$A[4XAQ3E ($[R4"fօ'D6&j ~'V<9$*Z _#/ PQ%4;`G5f8BzRBhN9߂P.^q+YnGoR39[)2/L1w-(ڔukPǘ[Jv6uv9ye(U(7d,wMU.S eZx(^o+Wqo6zYh,7m%[R&-({9滌 z݇LjA1pԘyGюQJj˯1.8 Yqx>(rx#(u]qu.i}i{?ø';OJlDfS 7WOv{Yt=aA c5;GT`ĩeDiq0#rsOp>sL  vRgmQB &tx,6(O>.[-Go=_`қK㉴p\ẸY0Gӳxw[4uϪ A2v Ea;c8FϫrOUCYD BU3Vz^tЦYD+Sl[d46L|sݠDP֌0o~pöV͉2GL{41u_{V5:yH_l 9d3DmR%0/lrL-bG n v?8Ug3uPSqmJWs*h=!c=ODD"He[=}Qk3]+c) g %iUNޒԝ!<2zDY|`6%,G.PMM/d5 R.@ t ٪jsBݶUEћ"lWUAV Eaw񸒄=K1al>z8WkmD~Ϝ5}јLCey{T _7_QZ[n<"6SlDմ)qI'ON-1Uw?QoI4"+,Y$ ,Lk,iϭ΁<8~s h@]Љp}c'j'Eu4*Kxs H?'i>>xA?!ؿ"!7%k|yhxiAӥknR䂮C=+SwذB KS oq^7M76N-ۧ,09҇Rvf,ZXj~E}D 5HN'PN\[]evrmU%z\ E^-bCTWlV8qF]X!Ƹ Eb:T c=r"\wFa]9:L}#PMR#s݆PC_h:Qh%tRj+;8jY=jI]֫zZ#msN%VFPJ[6~t&DQ B{VhJod ZTZ*-QY0l,#y̔IM+76Oi;zM̩Oẻs9"{#,^\y]"jca&?".esA oyzM['B? 0,ܢGm֓ks{1sDGX01tFxy&2=}׵_R/L{>Ma>-[!\Q8&"'m>tut#CAGmr8MSzU̠*`uVmr)FaMӴLZz1G 7Bg}["[ת % l UwZ_ř6ʀ9oKc#cբ4Վr バU֧IzHգ<k ٫\"!oyf`*]ӂySx@*bJ}AWt?&L=tA"\k?!4B *v;|nC ݺ&<ueW#FzwMGSbqm7 \.R^a7PxsQzi!k'o'fŤfpĦ&5 /r< 6zoKz۪Rt 4>&~nҔ:yѯ[Y;PJcw:-a#Ǟd컣j!JA.PXRL~~(6]ʈ*-1ecH8+GJ7h fM]NGgtr$epkeoT^~2dco1OXC'GNN+'ey,6_sk=f÷DL=!%;BdgйZf8.-lc|+q6ce|>c YG79dVhV9̅Gl~ */a}oIiJh0 g3i{{-?=]68 idZ*ϩ|uO=Rfl {xg/ƝUY>` iX3~K,׫Ɩk+QG_%a; Qgf)b3(BiqF|).L)9F/uAyr5d'qLL@ ?J1; DC'CEkp3![`mO2 p g`>֙Z)Pl!BovՍt\*PAVxYQx7cGOWp3J&~xvg3OzRߤK ERGb͛*r ϖ'. ( ,7t;""_[z=tg;\! 2X^?Lڊ74+q[jd@c];u(c%lEBn;V/9{W ߌsl.&Ḁyjα/rs9O-(;'|Tߧ9:=Z9$XYة~{&\Wp=1UG{ =R)j&Ď @Rƫ)+$oo0_HC9:9Hϗrɔk,O~0f?|'BmɚoZ$Cfu|摲ؓ79IƦ$X j\L8F޷U@e >"JӴ :.nt* 6~6|bU:f--l0e&62-q(y-qqR/C-|H8SV"U7ƺYJ4%5ɠ_Iyy1֫w!p5#%T(eTY/Ҋ^=zLr*_lǼ#V@F76 G=[Ok*)f?)&>tt-pCMuZ%!ҙ A .4O!}|ђ%֕եB / 6Y*.l'R /DGe2Yډ2a[ΤU/VC{ oRL{=3UZD!~5}TftOiYWi8mTȹd`.?' XCnpendstream endobj 599 0 obj << /Filter /FlateDecode /Length1 767 /Length2 1305 /Length3 532 /Length 1868 >> stream xR{8TrKt"l 32q!Ѡ\,,1B!QklM.؈6νEgts? UG']" tD%NN&Vа`A 3   r|}  ( `_?в8!S@:`"~PN !\=Hl !VDCa 7 Q C6tLS9/ &BT }! 7LmmNh! 4?&X= [!* ZA@L!}iLlQa46CtV6-[%|5G#\濺n71,L= + /_g̲ST.\#X % !2aNB(DhGg #00Bm\>apX&a,$ 挐s8@WH88Lؿ ) ## BPDA 0(E_XnUS*-j[s8A*TQڭN@*ղlii{ Ot#/OyMgq}mϝb5+{휑CXWI[akRuG-K_%4y rqxWR6%6J@Xg;3#MlITdWzjd~ߕR( g$&# : [5)<@9ߢ2b.l[h-R36'}p>EE f}MH} M|ԜdF =TWߣZݖ[2 LԌ 9xꗤ>ߨJ_,}9Y:t- CX_ͷsD4q BsiJks^Ž# <`P>ٱ؉wd͸Mi?:w8 ưW$(',ʞ7fM\Ͱ QUpB`WuyȺZ.QP=&N{sPZ!zꗌޅYUf塱alL%KŇё?qbbUƭ%}?l9ܝ;g,>4[Mt'+^i,ٯ#߷#;#MG%E7^e?NZ+M\ݰM愱KZ ܐ]ZRnI_6(=WWʳ5Gz=ßpS\bx|QEfO\_%mG(?lE 'PhB ,endstream endobj 600 0 obj << /Filter /FlateDecode /Length 4715 >> stream x\[o8~_a9m Rī ۛw3b˗n29[EV)jwx)RdǯT^^Sզ.W$WϿqv)+hI7r.ovp/fG\KB)5{Юϯ+,x?ϰ)-0l@2!>`c7pӤJR‹a"_8/k>ZCfe^tl#s(gJUFo5* c_uW0|ZWWa[/ufOLV[*7I(}v nq׭Iާ fqt<Ƒy~*$M(`ַ/Y&ͤxښf 0jY!K5s(z=G2vvoo|w׫Զ4Vr%^zr<YҶ٫BTA1g/A$фXH+yU"p̵p5\\70o\~@eoybq-?XqGG$]_ƕ?~#RK'm,,LޠM gel9嘏9P}C!q]RB{җz~q 4n1n(oAMBF>9A3Jc-a4ۗ`4yjط}oW`(OM_f'Gv[-g o-u2x@0񍖌ƻMBzv% C˽{Ësw׃[mx(EUW5T>л}C\WoN]EvVaæ⍞ncma%nlqUٳ0fi ^D?WGpukQN#uO<{1KB_Ωҗ~.+ɂ;*b# ێ6ږIDdц].TefG?}Z:|3o5hf*5Q9Cuõx(-8®burfvCvԋ_1)1󇺔*S<@^!a~`*a5S 4cu |YPB*)c˪6cmx ZYYOke-}%"=M =qYGpr/ЎPpD\u7nRZGvEqӓ%)(*4jM. 'p]V(6,Ɩf,kkUNG2! Qڜ>sGԑTd<-v4k+,t2 X 6M+@'-q+j ߽zb4_8{S.PTs~kT%,2(4LP%9wt5S5M6:}^Xq r2Fs\˲΅rb@:EZ)#yX"%@ KrAT +vYٷor~{lg~l8ȅG2.X["m4tE{[zҶ{52~(pJ}cHPuuȈ 8[M(Iޱ"d=.$sh(*o0jc?偩G߶ޙ9@u,ZHG[4dh0oѨBv#ф-+`@.aSrq]dI;lJ@GˑϥsG¶Zryo`̉-sym,۲aB6vv,IQoO G[lv}']qE:p '?&6y^j$\)5FLJ d/M0,LxzEo?yCEgbλ=x蜌o3xhsh&:-.)vFijd+ұJQ27rd+GrcjUaQtMde>W\E |_~4͝U):9G(1fG/be{{[EƳ}Eh"*{,qW--7 })t!PKi?uёu]~;"rEث_t,:cvT㛷\ <VCcԶ Q8}&Y7` 5~ ԥSN[ZmzP#wi4@MUR2l;U*P= Ys"hy; l|8G?B0*8,\Jf0m>'rdil O^ HKcAZHw fO&3^Eۧ5 aKϪ,$M[$U2Cctpc&>*R?x*o"" !~JG*p`{jWq75:~--,ZNf֨_y':joɎGr#`d&5TH38@8X-~ w"h #(-xRE&zXUk GՁש!U_^Nm}z|Yw<]4TG6fNVƑp~ &!{,SH5B _G0cObN"<8&8@}+OTːơ%\SDB8;0ό~AZJzIk?|`}ᡰ⮍mnuL8*R7zCxF$Qx B43ARi(Q XQS:EZ)c_z>KEҸ(G4Q,>vǣ?e Jз :'VW?CƣNc$l;^%P}WHDd=dDfGx;Qʇ̫ȑ}0҅dOљ.zx}&?m} x'TEMBb[;PtVmx`h#0I!>Ka.ӄ yj'.iE R% t-GʏC$@TR6cޒQŖٵHpᖴXh; pxʗЩ, q&W*7^:D)VgIyA'ulz$*d=.I'֎o 20tj l?qC~=yp6/n2o to;?R0gruh$DA Bk埯ۍOĤBGRm-58 'UCgSr#۱VO4e):=ɾ$`u=kg78ZXHJqh(*C]x8Fe)*yendstream endobj 601 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /FlateDecode /Height 106 /Width 75 /Length 3141 >> stream xOiQZ4 H шpD=LL44'8xj?г'/2cu38L&^؇zw+EӴvRy޷|?_^^^^-߿SIgVۻwoOORf޽;<:dU?AT?uDy l#CŇ_8..]t}f)}}}ɓ8;;;>YD  F x:ٳgǎكvttܹ\rgKKry*IgffettaiӁ^wa ;;g˕vO=)G:V.\a&%>3h:E<]VxFe50]MNN*$-j׮]B!N0Baa7DQr!FFˎ0,Xk`Sy۷z&^cB\DIkkruZKcy}<*, ĩÇD*ΑU' vp8455]t:^z,AP KJ+hO!7X`ŸO{+ ·j +gWe(m] d:bd#@|uuu,pdr hSSSp#Q d1 VMNQaL [R4תS1jRvʘ탺fmƑ)x8?3 g}}pDSBé,OS/{DA5j4 UJFld$U/`;=QݻFssU*Ar9[Z8ٳJ}\xڵTFk*ЧHsr@ ۽.axIEڟ°e-g 1PNy<‹j) tuu0Qqڹ &B:(rY0cd68dtQ fl@VaP%#Wp̚47[[>AGnW>3cMgܺdXwppp~~`PMS7J:%GŤkʡCx(!=s[[nr9PaxxKwJEbջ`*y/\P&UMl3%&xNmwRgzzzzendstream endobj 602 0 obj << /Filter /FlateDecode /Length 1889 >> stream xڵXmS7ίy&'NА $Nt:c043_ju}qt2 ii}^d*z8Q㭢7[sS'[{W*;9_rprGTϓ=#)Wmgu_fs 4L6\kCYRJMaOjᕗl1- FuXOn]Ǎסs-9x8,L$MJ 'UdIsF(K,VJzhhwF;Ң|4qu68q om^]r-e0ɪ))'˃w/a~`,e ,B&LɆ&.Nv\Au(ɫʆa2#< BZa<|gy2s >[aK)*c$3 (Fa̎'&m( [IwUL 4U$&F8v{*8V)#iGU^ 9/ވ2hͣs͜(NghWH4#@v7˘P+U<ҥ+RSV Ì9` *8XGUo36忒49AF҆?-\V .nS{B^K#rKW )Vp~pJzjBv]bZgj6stR?jX,k_o8~rL_+cVgF4Vyj֢l4XyIN]t\Ǎ~"7~ lf5:صGMVGDSbc 0aE)#)_[tɔb dY,{D_R: =Q }LѰE ܋}d-B  װ4;Xzײ\umf?6o)U ^r[П-l''m;lBqNNꢿo @mJ`ji\2HΘu㎝D2-6"(%sΘ"ӛʡ0JP{ЮnVyj֦NCꩄ1k`V?> stream x홿K[Q/(Q 3(Aq)&ɩRjMm-ԥEɭ[?>mڦrsϽs{`0 Aqqqqm!Vu.zH$2444P(,hooomm\477s+q֖L&[*0b1^j<8.WUub 333Jɞh4J$ Ҋ^Rf~~l+MMMdn O___&-Ns;11J$UYYpLEb2wqd+nբf4pq~.cwO&r}N*K;Vv{Q_+(H app B2noo+DTX zF`dI,ЗȊ \ R ?>uWk!W1'!P$Db., E0F,A T u+xZ_)i|^Gp \KR,rKKX%&lnnOFMMM-,,p+gP( B`BTf23U5>&yK"51r+'|7i32LvV9qqQWqʮ>Hc/Ϟ٤,HtS Jb3U@t;;;:ɠF( lsʍ"('t! KM/ CTÔ؃2Qmdsl :SR8בs򞼹מ䒰TJc^"xG(ɤtbKʴXPzTǫ'OJq*N\ / l.n: N7> *Iq ¡Wkv;|HB p*@nj:T&+Ui= Qhb.% HwIAT +n帶ASI$Om=`No)^p)yOƼ6ҪB#q&#xۙHD \ ϕ@ae[  `0 `0 `0 `0 `0 `0 ` endstream endobj 604 0 obj << /Filter /FlateDecode /Length1 1583 /Length2 12370 /Length3 532 /Length 13291 >> stream xcxdڧcNŶmcbmÎm۶ձ;N:;3g>5UaZ{zV]צ QVe1s0J:ػ20dX,̢pb@cW+{qcW / 48rppQ,,]b4LL Ʈ@5LmV@W/F-@3\*@;Ќ`fe 0ZX1$co]>Ҥ|H9z̀pLiZ?S۸` t(8s&fVnv9*jlke*boa 032WEhljj 07u7O˃IBE[CVn]ռ?,IV].|LxI؛:Y[X98^p?̀1)WV?!;;Odh S09~lٟ7`r9L9upsS'O GC\&?q7q}K!Cl&? ?">\ЇpQC.J&?>UGCykn|t G_ _ώ>ᇐ_C/ ?4Ǖd~h8G_~4/p ?4 ?~hx~Y.VDE<}X8 WǷp/3Mݜ~Lͭ>?@'nm/:9_pƅ [dwHc6ڢsˌ5ڪ%R̜cYwG)du&?S\l4o.'xzaǽF2le&`jw >݌0N?C>q?Bʡax[/m kf1'*O|)C|o %ˆxM+Gq}Y9cU7BykHt8 N1܋ݱ FFʑ{ƤrYeP [wanAv@aJe.ؼٜYBqyxΛmxꎖX(׹}{#y ^'{bnV%@eʽAJCM2o`12w# vM)9ڰŸ;ФV[>BZ휏lr= oǯK| ߏU"'۸ev-Z{ Uw6S7gM"G#ֲϰg{>~'||v!%),TbJ[F萙Z<|Όu_PYwQhF\+M흉]m3nToA_ȏk&07M4o֝q J5 ]u${"^{TcWB7z݊ΨT"V*nPEb8/aѧʌDΩM|Q%FC+[" "kLV Bauq y@IO@5ҽ?ctҒf&#?ѩ w=P>BS*1vC59V&6w]k7rZ?~,p-}Jҵ-\͐StRmlBh)65Czc $[lzsSy2./}:ջ)o+& ס9oȇA׌ֶVY t2genGGfc\Fś;)pગm@=D%iVk*`}3)8n﷟k@y_!:V`:~_B| ]\M :k+e~8a9j]up s*H;"NynAyQE戱R&"DNj" ))oC2Jpz+g_u[]av a Xʝ 3dC1[j}a,Hkg#T Pܽfv%CS$J>L` C"L(ֶwL*!nHưq>Pq/z٢7"sQ])|}EDiI[G87$8:P f!#6wok|7cS ӨN\W~$lt+<*ȿul,( ƃUH\CL'xYh -RGTFI[vB@>)<^o9~ 6Pq fQݝ\6eփ]<i&FF<7|EX8|{\Oxn1 Bt$ ,$u6 2w}r{2J!Y|_Q=/`j"/э֡T%.?$Jrq59 { z[$wv 9iFʝMCé&.k/ڑNKu&*>;PW$A˩B)A.ڵ|Xz#:Bx:F"ɸ | yy 7jc&q^,W/!mΕ #LKLBϑjT,^'oJFJthKtxbo,7߱'V@!аɛJR9TO?=;^D~f+ GpAK%X95] ENv 3oE~#߼-k5ʲt@+Z䁄WKDyA[`7O}p^~*`ahǃaxQ8-fPFh {>"_SՈ1,׹WPq SD!=J4 D %5/S$G˨$P< ?1t45K;A-i@ZjTet9dA޷‹/lc ɘ I)O8#H$>,ەTOWtdiC2(쮉⫓zQMpXУP[#3ͭn~=SBt -{}llr蠭^$H~4mJ콶n݄k7ApDzeT].|{)8̣A_pg /$S݊]j6n|:=DT?*% @jl& M[@ㄳ隣sa"23C-  >L"(:T*_C ;1"4)[=0hmN]|mP]U0 &AUQKVǻ|Y}@ԩflҢQݷDi=]vt{I$W6yD䁂@IXc | Sa@r {^9QDSny% gBzlG5J 2EQ&+tR#Xq@x pnAmmx.~I,[ \ɩb9[ϳÑ3|D-Tܝum?g%~N;8ww\1mt6:M (oΓ˹[&f[^`*6>Dԉ@Ku ^/OB pDfs.XlƳ {B.<Բ(|/k3]pVWJO\S0j5Ѹ~mւ>31?-pHOX>n"19,M&LἶSe/O.tfSR(90 "꩞um_@7 4$l.#,5щc\EERD]2LxbeA$!diKYS)D4DLr|bmWqPo#Zc{5 FozgwGOÚ×x'"HkbUD+̡5~''i#mPT3:ʹ0]]RM$w;=o_ˍ:{z% \F.l:2O:>äw.*n|׻6OcO9ZP F7B}j'L%-uo{1j:͞B -DKDҵ"9Io푢!l'+ew6^dއҮAb Ceo>VbUQ?SNÿeq0Ly>?evN*]unW3soXҗRATl& Z_4xGtt,b8A•r>Ij\!/jVb}7 nO_O;6w}-G כֳQ?}TQ7,ɩgwi#dϬ?;Jd d􂢾U>^& fӡ%9@[TZTc(hjLZ}[1zesZcZ4s2Mp[[UNj1A떚4jvt8xjOa[f1L3VǙO"?zf[(n|7hSv?ېՎS񣕈iixYd,SjՈ˱ayo^ygw 6yIv+\FFeFR8$5~BW9:d3U#d7UP [ŀ@(' 1%ڢIgyTuBľV+3 Mi7C-LɞF%46YRC-3[tc5co&ZmZ4<5~ie4ٳ8?96$i߈|07q8&s.u]lmkPcNK~O 7U՜GSϼb! ~j'gs*2Myh\ JۭP&Mk廆l>Q1#ol4rMd>J u.l c0Ze*6b.4KmN5~9](P"ڋ:gQx9b,~!G{$#*;=wshu*-P9n"Rנ C/Jϡ=_E]Ώ **T>>M)>o`E)q:[q\Ggص>oz[sj‹y yNā UDӋAg'w䛑" .|V-ߪrQѷSaT4WbS+YjSQL <2mm\5ȁETy͖Q &huur"aӆoWY>zVxɐQLFH0K4kܓ[jclA$ŖzF$T"C{&rxPy@kⶼ>fQv]MV{]^liOrM5-Udճ{-2:-pN]OUVE~Gf5 I)^.rC']\!$|#aEN[^30O6<:B%ܬڂtZl\$ע a%5&7ۈ4)w"/6VΪ|^6en2DphQn|yO5{?0 nи_P2*fEِue5RtFJP:!%U֓iL~-\anff~ؼW@?Y`Z mMW͖P-x"4r=x <ބLնDSݜ]~~#01BeMߦOoPGjznО\Y(eIVP]az/a*,PIm6a}'_17D5 J8H~27M3Yg[~U>?)H@OEa;eŨXEPnKP'o:jAԊ7PD ZK ^mRf&͈t"tXԮ~ac~ubK/6fA @ ~|gLʥVCmFn3Ւ0k6p4Uԃ}^)w`3UzC#r葁f1ϦT<Ԑxy\eb|@ h-Cʾ^Mx7n@rj./K}`B*sg=OKU8 fU9Q$٨QNw!^agwC=wrs@|<#+n]ZXxU)۩"@D HӔ :|M]VQ Ͱ"~<",":[2(64#Ғ">/TĖo-?)e~ũ{l[.ݠ sb;x̦<<$ڏQpE9O$/ΚTի52.5$ٟ ]4"~z \ݵ8Zfi^XGzZ!ΕH_##XVMa|=iKvØ3v/c^+$.eξ RC#B7 $É-dKRPh]Ee3oΟA []_ll>FoMVgHȂD"?_%ᤌxX%Rt{M҂T,M=;H.e& PHB^Ob>܃.+qIMrwDы=FZJYTIJ9]tIq[}p25(1=fq eMj4ݾiC-~/ٝ.Zx_ l^[̿Ua·q:Fd:Ƃ(E3qkvRT`p A<: E ^U{%ɰedK=tZW yDhmP_2Inkّn*QSf˃C- ׵ qpXtQReSz`ς lжO).#D4Cљ%1B#>LnUp> 2>󊆘 ogM2!|zt'#i]#;UZeka)ӄut|ϕ^'N\GLٰϱOmR$n |-צ]VbϕrNo%Q,҂E*Ο0i1U"HZ.bp:-T*8w5RaH$GvG.۝r3 JLK@xƧ۳ˈJp1YMׇA,f=[OB\Üb-f[ъV]r$ʣ-A:͝ 2U8]@Gw9}Ym(4**P4OâCmO.^N+A a9ZUӿIx 49&d>tun} 4Yag2tj(%b^x֞ $S<}B]3cÞ\p^+|tO_tͅ /XvY4O[$|dlKNW%uNGCpDk= #GHFb0^c9)Y[9&-Ԅ$XE* t8I~W03hAnZzEXG\|]>fxYҵ ޏ7qҰr^Iw깐X'Bsz̈́!';ͦ읝K FLZFpNm-[y^hEb{D {Ǝ_f947xD4!XrnF@G_C&̗ ŋcUAJ)+Y#W]?M'neнM6Xxɪò`- WԅBxQ%Txe(rO{P[lj\)ve_7:=3Mybr'Vɋ0NJ⧴[wTOi ǘf5vIy!v$Es'4U2.P_:q DzUb2AvgsJݗ~jO̸Y\Z§?-~"áP"NZHʸ{!E٭Tf##ݙvM. HbRtxY %37%^V_%x ՝La:X\ubH:kAU!T#l P l 2o/>KHuݡC.ބ.&I]5ѩ[FD_%ˮ{%LrpE9Wrn)AnQ,:1Ft|{d@% _̐(]èd=$FavEX"p拁cNMuY7b?$ O&GSA9I/A#('@7!t)5'WxP0vdLT !E>bY&5SU<9wq_\0[$,ɝ/ ?ћJwqHUe`Me9Wgjv}4H YJً(7sH5]I#S^^ ÞM8 Wdh>.܃FPboE r2LS~O$a=e;79zևxrR?KQw42 16;Osbnv!eZ+WSfAչ퍥(b.^2`8y,HgXO>x]&dM\@qLL_TL%>qa.5^)N|=O`U7YV 3WPQ|.(aw?qQhʍ[-Tn=̜mq~ ~")tod-*7nAOCLBmq[V ]Ks-Ⱥ9Iγ2W`J vBp WlS\`SuD"Z61x\:󺼩\ꕨkԮ4Zlͤ4k7d@?IBaHa'_>ʘ8]l0&@I[N?8MnlH=1㻚L3ߏ_p'0;:;ZA endstream endobj 605 0 obj << /Filter /FlateDecode /Length1 1065 /Length2 2463 /Length3 532 /Length 3173 >> stream xSy?x,H]A1p!:{5+'2)߳ ,  _ b= \%@_p> ܾ/AV{/}AS yD' BwRq4 )D*33d<+QB?}X$3A`p= <@aN|Spo¾7 [CdCA! ڟyQp.?:k ~ ۨ b|W+}l0~?wʊƊ0gF1:oBN ޤog"}Ăp>,!0ےG+Ĝ'1{3E\R,{F.-BڔB8t3Lbk:3`-Wy*Tb[E/⧬K, (6ye'(sbsx7.2!v)~[.!vw왰l =ч5xb[} >%fKA2O+}zDU#noy2B+:ng7PC7&8rc_>M ˗,36Keңo|[Z^ do}r`C}WfYN@ƿ2 =/}GATBԞutwݚPzPjs/E%+NSCI q Utɕ40r}m(a./T';̊4l-B]@Q*\]P▢uiQe'EZ_`6.)7aX G y?e4zvS@nֆBrHA+Yr:Mg[oF,ŏjh4X&;|n-Ylfu•:l"rn=*RDYI'h[-4oIθ)~эeo/U" 4{"f,|6(򦆥PK{C\Bki./*e-8au=5BCj_iZ\tpےcÔKS|ۑ.cuv k>{,p89ĩ6be]Vp$Hu&)$H<:TFs=jxLki* jSu P{fgRɔ6;`ʌC+- nh1CUj{b&ɪr(i×?xR}&UrdtO{>3!ĔbR䲯79\vz" {ivBG[<T|gBz=+Ih68>EvnoU䕢sLrz1.1O(&td؃qiuBɫXXA|;zǎAsV㋃o?y-]o} jhwrXVˬ{qe@ yd|tj(|1]B_yh^e+'}-&;ӟD-?vmܢq\rLAB3;kjŘ^Rljp:0,rʠe_=wHl Yg^U݋W(V&'4>܁,MͰ3@<&ԇqp'RcɃ*fRϐA[M[TMIR雼ެ][ۯ4Q}ܫB)7^Y*FC~n?޻\_Fc{%Ƌ\jq7v{yslRmȻoڐFyBG9壻<#RDf_@?hܪf_iy];ZUpP!}nR{幊Mk֐S&]yW7pu?,[Ezwv:zL &eLZR=RW$R3J+>\topF#o){<%|E26׏oIƊX @ٟЖ"(/O-΅Zs&>7Dev(|㏬;)idwz؅Kjٻ)%KfzjYaǑmQ1Oq)5q˭gj!?ňWԸXUV1.^۩R;ii*7%*" #EN$/eaFX5x6=8;Uqwx}p.청ɆJDi1)ɊaZAs@NjQ[DkL {{I@$â<~Vc I]#dMMYڗ"vA~ڣQ^IsS;? 4|+OV'm\i&ֹ8Mr:ٶqdڡ5 S1& U¢w_s o?a#CX:FI23xendstream endobj 606 0 obj << /Filter /FlateDecode /Length1 1585 /Length2 12321 /Length3 532 /Length 13248 >> stream xUXQqNظ;ww-Xpw.݆>s9s5}jZBFB'djo scgI11>DF.vF.@n;@`fcf#;x:Y[(E:Y\,5Ll*&@Oz @3@gД `ji0[1Ó=_aSWr:9P&ä'h ow7GvZxW go tT M-]msTDcbgdWYhhbb03q33O'5yme-y*Yڹz:LINFzFFď9`d q> 3z]>>: 0wǾ~l=?0.X @#lӟ G$NVaв156+`sRC/Pp43C`ٛ qLmJbg:; |;`bidcjif'aAЇ?E}7q|XC}Cn$ЇГCzrCO})8?Їh>h>h?Mg?;qRᇾ_D˿cVᇌ_c~K`~9{~:.ᇮ_!ם ?~zC ,lM cf{%:9\~<'f/4[mol1O, ?8xk}Sӄ*W+rSJS3 F!o (T*&Ǻ`l/84ќ/ܮuBV2qF@~XO5%MnMǍ-rM%a$p܏rhit<@~n]`Y3lWhEh'm_"G@QX,L{ĝXt==s~ٝHQh:-f' ³?Q<010ITՇNuyLi%lrWxOǍP/l8@5-5U?pť rs!@{WNգs4rAw<+A76W{݂+/_{ i̩f!~aO"¬&U#e]e0weB|p.N8Zt%z+nv\q¹KT-Nu4&)mW߉9qE%3ɝ*vۦgvs.4-Q2K< DX3YAWvc縉 E9r@=T*r/S)v=ԭN pKK{#YrHl#/sC^Z&&܈`GtDyc>: v3fssIu ^G B*BݺeH &]='P`_0ڍwgD I*&1+:s?T2Be8aoU :++tO2 {WUĠZ`pOU 4vK؄G}f."h[en̟JʔsTXQ~z3#pnZwcÖA֋B4sGեpGvѸ&ocYn+E g_o@|dIX̊YlY-]_z+{X][,>cK&=Ƒj|w.^"F*5b> t:o&CeR{'hԫ#Pܭ"2K'*_\¢jk11#X@y?x`W2ѩQto\\\wXOI%q/`C2R & 5E񗂽OpsXd@ԪN\ 6-'~eE(&Nb,:{> -UOf0k-~rn|9Dž+۔q? L^fdH Ok `>IP 0^ g_IV~jekհc:)')vI*x+ UV8l6_X}>ɔx>j/Doq"ev,zX ?(;QXm-֒0OSh(1$Nzƒa䖿;0lvu 6BfƖFdiI gv+UT7?{W_ yv o ^?Ǟ}F@*I};k-Vt(|H1ܫ(n؀m'bJ4guxE1u9>ܡBצ|Tغr7,W8.YV?7ŦNCX 0!.,e\BPHT^A#ռ$ X\G}2dTE_3iZz;tXG &NI y3hA,~V>NsXd uuCyb<ƍwlFUXu QI&t;$LkwܻM]j+S[3Bv?;)HX6_yZh S1pwzCZCf\LJj`SJ11O%'hncgҢxb7PZ&BⴭZ>t+gqNgd2d #2k͚:^]mp~#|}S}bnIm&3CaR30Kz.X>m 38/q~],e x֩ACǧGY#}zbK9CPqQ;~*SZ^m!M 3w1qCv}><ȑ!~E a W8J-#]c@d_}>ϔž\Q6Ѹ NX"7~@g>j1 SȣSz:ŧ֊@::Im ׻D2nAf-E!0CZ`q`b{Bl 0ll}"jM4Chd?rVvFaONOpX z7A-ѮS[0$9&/0je_(ކ4S(xi}O%-:a6AV㾒k8;D2(M!khu;Y_r+|/1*DcS2H#$Kl8b 2J gnnDs{-=CAĹG._iSYtN rq=!Pqf bvD??yObaA/Cc2y;'/󨽎;Z܆I~x ZA\.t".UO'FGN!I9#THlS5;,W5OZ" :LzMV,;MS!aFX.K`O}( 3G28Z|e :z-z֯!i.{Q/˛A̘BWAMfM$ H881w(,bPz mU,lo(綈/^< P'=!YK]m[3g1EQ5>S^÷+;vMuw^uj]Ѱӣ-28ow*~FovSH'O  kOњD>+6^;r; ~G ziąaWSz`C2 V7V}.A00?Ll"0gt#{DWiv" ҸGo @’,-tzf:m㱽>xW:"ƒ"b3ɉj_p;!.ҙ5;p܌9R$ Jc R.l.k$l9g\ΊLMrQjoJ` DBgȈ؅˖1ICMnss|\bpaV/rJxA|T@3FVũQ飠I>3"eb{(H ;]-)7]!'Y㈹x쌁(&[L1>/:'ad7z)=]i?/ȼ5ﴅf:΋5&?t͟fs(PuDCʃW^(vD۰pDY(h%U$W<؟ߏ;XdX+B᭓Bseakr*P؋bfG] ,#-kEKpnƬ2xҌlXL0]y &HwA8KF\ ZLYU? ,2"e\LȜNy`K0Β"%p)cx{/ӈeD#hrl+t$;4P aK :Vg,xw΁jGgǷ<(~lan*uS\KcMTtQx_p䓌VPĥش!~47oĭf.IbFh$.Iwtnz"nQ`oQ!AYpΏ}( PO2#e- bں8w5Orq~➟@OsBd8j4h|yOby_U=_:\- u"̝-+YT?\|IoCL4iO[U^FR6×ćyٺǚ3_Z%4*hwa/,gXQVrpd!G͟e-LLO #)=PBgmqdX4}vj1ǿ>Y1iz&J¼:!eFD-2Y00>q.5vU \+zWϟk4$fo+2~64fO\ڄ3nniO[ݽEbq$fcd%X1%T gfΧ|'S MAeFO9dPYm߽I5l 0亍M5f0!K MK7Vx6\DwvQXSMS lt -攖M=KVlT3\aN{%' WT~3(3DN=0QiU_ZG̖D *Ŭm\B5{O ZdKmJFGJBW Q =sE9 6J H{9W-q~7-(__~0Bsi/*nruUЌ\wtԸJ KLYR2l^Q^:k솔#81_*Ҩ! QTlY<:r&S5%cįWSKC*Akmtﻑ]qITw26~sGEI:E2L3{} ?V<+=i('Uy7Cu뗾\cDZqcڕr${gKk0l)jI&ѳ??z޻ toPy^J6|:j5i6}lP39A?}dG Wjyb/Z#e&k]4Q:Ks3M`D`.%D[4g:՟Aq#M8u|`.,K'`\%V6tMQA`"Npy'$毦nCih@Qi.nrZYeTJHXGgdö@M&jQ,pYfsb4x>Rs@µ"##/+oo+p[{z.<\ۉ;YIn\c$o쇥MW+H@ °0qΤj_vL-ɞ``mpB}j zΑ 9Lie[ ǮO=~.)ÂI֬55An_V{DQgRzf7 Jo=qB1ifV 5\\ mG}F=X,rG{=q7`Gxc:-zFo玴-,94%7W7t$gҧ9.|07qQŽbIWߗh](- D,$ۼL/A/6h4qv]]e%ts~ı`e6i4Kdh$0!1دnQvYQӌ{E.sX 7w3k?9ƅnӏf[TiQ( zG.%W(SsfruJ3 7j?^AF][=&ip..E_d(Dv0UY3} =~]ܙ9xIQ6Cmy{ HVN{qaGA9\#dsFVYu 0/Z9i nKNL3yy`w#a4ևB?_&=9L?IUSr;r13:֯E.wT77à ]_q,rЦbY5w)W X6Npi]tĸF*(AFnqJPBwkSd FR}hN{}rTb,}b9$m{hiC.ں0)9dmb«~ xe-.f`[u>BA#@!lgM.͵YK@v:L ihq1=G%"B]$n[KvX_nQsrۈNӏŹet\pyI' 8\p6JG z>iUwŖ87Aa1Os⣙ 8nl4/T;sQ J/'+,QJ#X?c7(0ꭥI֬-,$F{l&?$LªY]Z0I>tZ,)Ejd<-T[FF1 6 oiI_W>)68H o};L>PS"idZsKRXnW'=y!/nJ1N$lyTl-ޗ`uhhM p*վ!*|J7K<H 甚jg Jމ=86=8#\롧.ݪxTA\ 9~AeTf>HiN/qxY-%&ѱؖ g/2lЦ*.3Nʘ[pTƐ+̐Ҷ2<٢#+ֳ֔OGH Ä_^ Oηri`tRR6}{%ce줐PvP=z03$P %v&ϩܱu%& qb>>9gt#~ɴ-)zʾBt;ic9$[:ayx)b thM6e =쒊{N`2KWzQ[Sb 40 5qHg~WG~?_&˸ht8в[R +<0#:S{ɣG-#& %SrcM<5|ꌅIG|2%n4٣wMUfʂe\f8c)+1 #^JOAmIj |#`"hd9ޛwQѿY<يb" }Slv9tX~ dөZ`gmUzB5-3S_`yxY| Зz7;`k;XW1}b݁MF*Vw'cib!IJ;#9R}]Y$u/i$M(R ̏o {0}O HѵiNĬ~+Iդ[{8R{IL?efBشrATQׄwC gF?f)W\Q&u@(v%?mQ`6: 7:t3ϧ`)KVK:!{2GaX5("hXwVL,cXPr۵#"p-Y-RVV3n+z6IcBECHn@/h4 `DYa2< D=)m# cyque ٪S}I/#_`+oj5X}E$&G}~t]u)](e!C22?~sh<(5Y79.r\%^6Дˤwd{%Z"NAYdkƤ0 J"o%G[B{xQ4)#iF+y8x'謃1,O&VLjK닁*9 ;C/OKl@s6 T] x(V 8 B \`nfFz+RHw{'?be'2cllpZ'P _M V9JY? i$u'\M™rOO1^xQB eޡTy}M9K׍85L}'rvb')݆Uk%'|O" ';7+\#K$fUh9WEK VF7HTN>1W!i<@ }V.0L-p\6i)v0v+>Z}&v6=HdԪ9Ud"Oʰɀ_'kYU%ǶhlcJ.RnfRS?9!Ƅdꥀu96dО{d=hLH0hׅy`x)mO{&՜\x;GCG`Fp9ُ }bjxeLx$!t) 9TQ Jw!YRSq 4X0`*f,U'=ʨ/s$ҡ gF ͸A_TlI"!V>xy%3E/0{xIsτf$0"cBk'Sz@3nCPM!in :SS]κVUW|wx.<$S5EggKya͕l22} =Pnyߪ3 ʊEuihLħ0o8}e%"RD'(OV󃰈,Vb/w4cU@#S-mg oqh]T zD*Txf{)Yq]]`HZҦ]_"UU- ΧbC9:Qo #_Юm}(3V߅[e6ev ~q3ݜ;FQS܏G`I:(4߉" #~k,ln[BD-s_{idQ[uuvpn҃L~i8?8,!3ΙWX"Fk /ʝ"n6T, р*)pRDwa)ZJtP۹IkJULJϲV̜XJ9סlŋ"mmćh`fkFF&G z?iO!d@"!6\T>2Zyo_Dñi78pF/_c@ȧ9# H]2TXů″s!f&A-Aa˾Njj+_YH5Of/(+m#gZf߇} re;?A;4G n2Bogj1M*}&N%q{~S02hr5q(2 6۾bE m%\~[Cݬ*!3tyeI8u;x_sN`LĊlD|GvN<]3y"S3_l:PI3tU%IC#aO PJgݙe~tz<FR d]H/aQA{`><7ڮtm)6 d7w*T <~Bu4QzKoQE,t;)gֽ%6 ~xd 9G87{Ebb c$g< HP`=ZEbJ~[oj\cIÿ61MqB6i1:;ޅ`G oPډjPؑ{nt>V-ܴrA$S?504wA#I='>iκ["A"i|^܊0zŢsfwH|0Wdށ%zɜI"s M?۹ A^D/eT(Q7`.mjkI^a]һ`rnK(kKܧr6N $U,b^FgOlpEyy\NJ Þz=oZ4I۔mAW}aw\]o"!ѫ3uY()iV0 :k+Dn#1pa1–Wi:_`%.h2I<8D=]TQZk 6RXZ ɳL_fnH\Z|zڒn烋 ܝ:IQ}sQZc]xt:"w08E87>D;O.y'Dk,m{:祬#2WjZw6-Ҙ^CMz67ʰ,?*`$*she :8i05Fu?ikYlZ."@嶋AzT.&K eĭω6,Ry]Gk\j.D¤m6Au^x=XV$z=jw9ҭa EZR7}T$;>t,:/9xoc+ܨ0̝ yى!N#0h]rKcՃԮ0qٶ?NM2fL//EDؕRAʁ+ƒ.Zԩm y dtw3S8 _֪"$h ?_&6@#'{[#'kJD"endstream endobj 607 0 obj << /Filter /FlateDecode /Length1 1941 /Length2 12450 /Length3 532 /Length 13537 >> stream xUX\Ͷw nC h-݃Kp`kssU[jjrU &1K9PPdc1#RSKmANf@~;@ `cd gW[kw:fN%3w#x 3 sps@tZ2#,m-@k['D䜬@-=K=,i rrXYAl@Z{pie3Wv3G[rtp@@WUo9%ns7ssvXuZں[]= ,r2`QRUUgEWFj 0`ח ߿dRN K['k;7}c:Y@o0 | \++?+ `q-iwb ^e3; `q6s:9D+Ovp'9:pXl|mNB[WV͟ a.ןv.++X`]7n@Ͽ\f9sUlg?qn vx)>7?+t C`?C`?,#&2\!?vC ]+!pv?ήgWC|O'ٵ8ggdjfat[?iC{W7 e\,-xN<'+xR?B__  N!X/[9A@!/g`+׿/[`+l ;/[`+߿%]qq' g{f > |p7[قOy hKm / ʟ.nSP4BY< KڄGOӃ V"ݛ %}g_!hMg\*cIz߮OsU$5!z <z2Cyt3"h8\J?qFlx'%lLE0]:<0$@>)ዥu^l]cTZw>V܌}~aÌ,/,쮠 56q<E9G /yƼpt3,>`b-ya籍UćGGv{}3TPʠ;Y+(˔qVJ >uT!kHkrv qUlϰ;a}/zT4,F3C&;r78)A$y^9/צ5?y2 xvO<.6ldi1[I4?3nS#z" wHLH3Tp}oc1d]s73MF VcwJ(w>`*G[ wsܾEi3پf5IL(yy2qu:=a>ΆBh^qIEL2t}M2we74ZeSe15`B83ގc!_ij(]v(O{a5𫨡MMw p"m78ڣuzoB[.Xd[Fؽ~FH"ec}KmӁI[fʄhh^B ȶUOBVR>H Iy\fg"O]s\DYÍH7>E|ɛ|cl&A >rV+u[y'l(H1LQUG5< Fʓͩ|t! DvV}bv[$/Ysg pV؂lF(^$"K~r,0ߧ2 2P';V~Hɴ+>b-ev{y--ȑMeA(A{sM.lacJ+Zp6,xp>׍)F{LC xٖ?٬Q$3g0 n>.m>M\SY ^0uǤ*g,?>E\Xp .&*}"ELZ¥O[1= e!Q,+iϤpF؆ATxwk,d(RxčRB?IӞi+y\zҏ)L(Yp[RX UL/.̖,xTChfQ~ ⮙ u\Z7O?k3 !@xQLD'$א,6(kZeI:yU^v'|#XvE,]'@*wI`CF.SLѕҲ}Nҝ*;Ksc>F|A4{C9aPt',h!%~ Xغ gȞ]>XivJPv>u'gٞ 0w d | :b1cT%;teYǍ6Ni<QK|Lfg[/ıu2H0/^*u귥qvnUۇJR0z=#A莭mv|=J $ rw nl jձ_TF  r}bx A٣b:;ŖhtODt.)fVk DިoP͒OJߜrVi'3dg{'Qe2GnC߮t?8E 4ۖzOildKaW&(xMT(P "1mT1 򫈤F8kF+k?::po-]Zה:7F!p+'BAzʭ}0R>yEEDhUre/OSe,M"įkѣe֛E#rC17.ȠX4Z %a@T<}_7|&aE]د|Nh=SEHhi{oßr/g ` 6,?˧_$z)* ~"Gw‟M j׾/Xorc28)&[ 1Zςex8j5~.3~ A|'B#8͝t;I+ ~[&?TpGOL@6{lXI73nE4}]K\sI/Q^ $t[X%>TDίpӋjv:SIPSr:K s Z#7 -&H}CKM~]pH*ul)Ӣ9V OjCkQ)ۂU$ O8oHVRܱo:r] s/K"hkmk0ꊉ%%J{}3Cz,1HZV0g3^ٽ-".5{=)]{, lItv攪<'|ji!'|DL4B_xoOq^= y͇s"FyZ7C"?4咜=g?XtEƳs͌}x12e^,KnXP8b/@h 7Ұߔc 1㧥ٍ8qb &U˙Ps¸⦰}}dXJ" w`o.\KJ#Bb{S4$:LԩNac y^}~>ё9\PE-m8;ʫ4NDyжaST o6S(&xJCmð`Z=J6 ?\gQ/T G?g%zJQ^>A{=UgߙywVlD M ߇T0弪}Ԩ8ƦCu<*nQ)=GE[ ۶s'iӟ׏JcRe!JqSy$pK>W&/n4>how+ hĶpd[DO\EM@f,]=(IS+t|:tEA&;f}G,="K8LxXtA]a͆y`M|/:XrnN{CԡZgak@ü.j6 yzJHu(``/i#"巘 NNB7-awWJzkל8q d6]!J.vY36;n[Sf:7';|*s'%Gvӓ+kme#x3p^/c l0Ua`9PW~ /ޙB!qRОۂ,]w)+F9I⻴68%1m+mb{C,&3joh P;h;ϊR:dj:U.|',XTuGt%BD"HD_B?"-B!/r%BmoJeÒ!D @)A܉< 2JFvz@4b+ @6=w۝zT+BR!}'؃ZxXw~;oXf_q!jA+lbۉA`Om<A8 #-:wpjzF{,"=7=lЈ␖O"7R%hcIbm(Yx&/9 {āo"DT|gzq~$}pnGeП@!Qlm};Mqn~Kn"MhCEddfs(Yg@VPMoӤd'SĂA~e&++Y&Wç)߾(:hD QCFox&fykNRQZND SyvV[8OlXh^#Gf_ߢ?ńU](t]Bi.D_/{%#ZbVbL}0pnÛgqh+c: {if^ВTV栏$)e"PNq@& ?pZd멯iXS =h޹<nu ݮ'&&kؔ[im@l?!ɷ '=@umX2d0faFA%]5dn]vj¯+N7Ӭfqw.thLf~@**R*/tN2n-Qeaa3̑4v5 H}^m|)eHNվ9-I-Lr} ]\;,(R>T :lx=(D _ƓXpms[ڥȌBo;63('uH![ EK}lS>"}RwfY5r7I[#: "oO]yQL%@ ȈFcfR2k{0`w36YM i,BuAsm-}eKS Z KR'O+ïݢ_^ ~,$2|o BAWˤoϳu7 +hE;tpɣluCx6[ QX%ЖU7OԢ㿧2!'vXX\f{[Nawm'-*:^ p}KU. e`2>8'*lu,BްVU5-VڦxnZ(c&| m_io{*9߻B1p-5/ێ5/O]OieTd&|MJ7KSl+p'Uԍ];('yҳZxY]= +F S3Oc_rUP}` W}e'EDG u1.Trm#Դ}*q{dT1)66㬌82et::i)B}Jp'|"i-u p'to]R&6ՑBF[9?M ' Ut=NcRGpkp_ֽޅxE~ztfپ-(E5qHT6QTd|nhvgL}_?=@dOAi.>ϊKZ0bqj}˨eNoŗx*^[M96+} I"s |cE@fj <\ [bi+/N{JK$P7jsZH ô_Zrj18y&)uhWl 6~Khwbn3:,lXW(כ˅E4(#uDchMwmDѹSDOe旆j9QwTA{<6N[%W=WC=ٌ'C5'$f9&}uwZ/~Y׿0޴{:3)/wϲ7"Dj {ލ#A?)M0i5SקP$EwQn}Gȇ 3ԧo@ T\䊛n JFlY.iG 3> H̷듐1SJrT䱞-wo@':̜IKK+?Aw_iFnh,!(ivۚK/Uil`>N=t?2uh6}H W±ar~#s"{V0;!«IJz>l\fnoOYe`m*`S`f[ƍAeeYڻEfxM'`{N+ν|#?D-ZV-:*jة:M[b &طCT~f:dvr0*H9l2TM/lUG4=V-p& LEhAxZP %obW1 tB-}G,|/[H^_أ |[mSz ']\/G$iWw^4G(D3x+(-"ㅿzXOTYվ /YsѪkÉcQV>ODk0n?\_4jФV;٨t_ eʋMSN~ol'>Ʒ җ=㥄gKܽ6mރM|hlUa%\s3KO&M_w wۉn Ǩ.K~AyoA)ovXBuX2I=T_e BqYCp,9:7"G^ՙ֍>rG7 dnP$k~MCfE^h𽚼FRC9g:͵J:"ªz\A.f _As?P/cF~Gcqa QFH&`~ʼN<ő4r3PȾC jMLm0pYPV}jӳGݾM9Ia#=Iy]xH:Qm~O#ŌyaYcu?+4w=m%m2_(s%Y'Wv8cJq_LLF%N>&p_/ д(yMEG*Py2N)]ru6HPoUmwIL5tO{N+ "(J'뱫KH|e"%o)A l<i)ui,|=ggF9O̻VHc(p?,\}CßJ$@wFz $N&Iz.w(W#X p5^8e\0Ү@{U ^㼚w?3@C?90K1y|^ M&3GvO2Zm0@ȗ0;LúՁ_%.ϛ8DtRu08p7_ʍdR,n3K8bl=2 >\cJ;0l; *Mfl۩pvQ uZq ѮF}Uv!~}ei7a:P.APR):p~=8HKv 8ctif,]H٩$bٽ6xm<\|lwrVߡ,f G3W{Ls=endstream endobj 608 0 obj << /Filter /FlateDecode /Length1 1249 /Length2 5558 /Length3 532 /Length 6342 >> stream xg<[GE-jAQF7`N(D'J!JDQE A9{{ޏ~;ekŦT`@4SPTHTkd$ !RsA4u?Xo;`!"$""M~P8 {$ww/ b`I/ 0+,B{bGp@@ AW JQ; K]!0wT_?'W[DBHaGC Ia0[KI&_>6DEr+;\Aqĺ:]A߇_Al[+uB\Aoj V}WvWqyV+X+X++}bI 1LKǸc(ٓL9D|Cȏ>AUpiD."l%8bp'Gtcg^[|媗j_0Cwŕ4lf۶$loQY^G~V> N׻GR``82[͐*ݭ(J"7Ur"ׁ HZt# %_ں#ۦzX""vC#&{qL +]Lӈ~#NјK2Yoϕn쓞3ZIsH=Kދ_f7I!ck4#R+\8unUFİ i-Tqs!{^*. ,WN񼢩_(xsOΔ|9*Y]ܛ.㸃jy]yeNZRH,~3U漄Y:>V|A`.׼aljNc@cׂun䅢:("38J9ɽ.2]f #J!GɄbc=ͬ4\e=C>mS|0|,x!|꽉9Tm g=yTb߹ Ur](7|$&̏;[mZC:[g^ _I󟴦$VսgLr!+=sTw-,Xv80 J<wѶylo3}iHJ8yQL8^/뒂D-jb:V]6(W0T]H]9JP&03kL0(!rs]րc~Q].KLhj؟鿰!n|eیtgΆLm,Edžq:Ч#I5/Cb_jj|y}-ܵHݿ1(L)M:#)$k<6c||l!3yMа"93=91D+!KGm&{V ?:zO"0IKG4MBjVz?hƎۖf¯-| ;ܷT|:yu6mbrT%΍DxǮjՎ? FٿQ =. @ot{ӨP׏7O} %/&)ݭ \*/Vm>%{ueow^}K0a%+uVZѹƄ[ 7_`9Phр# [w)8tɋL~$?kn ̈]x87Od)مO߮5؈8HW*b(R_vV̝F?%: Te%*r!uE2ٕw3`Z&vhl:"rE/- +м~hud/' Shm*'7߷8Lwr x= C1ֻPkNSo:z{A/ϴֆ]#Tؼ9"D/ w5(d֫DgbH Y޶5w߰-J T܍b Y jգ}2R:=Sl-a?sCn9抓>ԢDDzKmU}^ rElAPW=.V9xx2м֍KwJ?P1sᔚ)z~_9.u^Emccݟ)rVl!}$kjC}-`,rT$f<{L+"u9T>о@%wWlV'>u;6֮\>\U*vz+R4Z|'j)VN}n>X&hXnXBz x]kmC*^lu?wA~ #W]hy: EZ_ xࣺD5?Xَ;_ ֈ DgԷ/oLD-O/73 Krg|a{%f}uEx}'GwߜArʾz`G![rPq!Ɍstk Oh&MjPԬCB|?z`rcZyOkM'ϡs&EHe#E{5f'>L]ѮrӁR/~,&dN3PPzcT眢R <%}SJ`#+a)྇\j9_-?>=?Qco%FoOm+UeX5GK&ᴕ0=(֧2yh [gASWʣ5\B?ZEBaPѨL]ݗDߗ$`iYStSGňRMҷל3jDdj?k8  n7N[e,}&Z]dT sWm7`nm!&PpRYpFPu[WT4jVV>bnn@֐HG%(deۛ))3%j+=_R,IIe% FKc,䐚>û>Wﺸ\EӾr献q/+U!Wu>#CM]U+1SѲ(0d}ڨ\ntbƽ1I槗ǣ-bS<YNOynπTBCd:C [rLK g"zO溑A뚙^L<˲?4?Eѿpʜn>sxC? ZHi-Abf΢=@Nǟλ/}4h5;f}jrΕX W,w#$[S{N_!o88C58j)ˏa SMιp>wnYl ܲJɴj' #;7oYQ[ůŜVx*oP Tތ,1KA:4ȇv}o^__y.PoYDf6'Uc{SKYܖQf!iK 3syo+}zzqDSJjZ"mBL'CQU&TN7ˤ2o+CLd<'`AFD',`ar39zd}3Z:/{Igp \:\=7}J=;(g˳@j[e^"=}B:27_bjeUݵmGM41RdPf@TpID'IeFӔ"75ӳ9Zr6٬Cޥr{:G-r[3Ѷ /9)ӄFv_ hb=/ǥca!YXѩ>0D/jHFY~@յ*PMu%zs 2kPAIZ&'߿I-y8Չtpyr!D /5iT:َtC?JgԵh:Xnt"9\fUqIfH:ty9o| *_H3i ;1_׭fN^dHfa7)cWKN}("xY O/Tm$OgH<Ǯ&榚COG h$݅Jendstream endobj 609 0 obj << /Filter /FlateDecode /Length 3116 >> stream xZY~ׯiм٭2r!P!lϱٕdϧXd3R?jEVQW.Xٽ k__]fەO_~BAdK"Yj{_.Nl|188s6Ƭ^#Vx?`A[YO膉btK@"Wm9#~׫?>o˿')/װ5|;D]QГUA__ee;eN [C!3ģ\=(t\լNӛJkW4ȏ𷣭whW;3_tlOz| St=iV)mm?Calz"FC+u}xɼ5VJ)RʭaSHSfu<цp@9ޒq~#2V4gfH;.J{"[܍\[WWxiv7gz^bFҤIicvȮd-ZW' i1$re+~ρuv_VґgPZvkaO ;Ƈ Y*Gfȩ$ ܱ` }hm"Ʈ/M;AP -,IɕоVQhnoO,beiRHiugl[oʹ ){IcYZoW)E]r9(y=17, q>T P|>7oM_Tº @1ELS` OxJ 챋Tnm|J{(2j]ZHl+IQ^lQK:OP%{i(|.yleR48vMl01F 3h2>~vc9P9>B!ERnyfX+v$ OGM4( >Vk׀'.Fȹ=T5cdYVR Di(OpCRS98Ѹ{@jkN 1C1CNכIG2j)Ļj Ԝ+6 i$JPmRyh"w_iLC[U!Gh8 IO7@=bڻ;̏ӑ?v*UtYt$USLJc_sHtp7x] mWx:-htuŭ8dj-'Ϸ|UP,SrcOJV҆kObER#[[1GkYs7L/a8-`j>oin=]Lэ~zl)Uuw$XJqS{Q:T퐮Gb=T<5%PGKj*5:I 7xfVN0K;se3UVz?f;Ҷ(4zWGri[ Qy U!-mTΆ~=`RaSG,…QYIFLJhM磏g%xB)zZUա4҂P.N#cEB7оR-\'uA5RjNOśSh؊*>p.96jEGJn;9,O-\4uۈn;RSlϳm8c6AgNH5F;Ŏe?!2hhIOt>X­c¼Om26N3w7Sm?sZOt P˱t{(d*xA/YpY} ƘB^hpFMbsBoY`AvEZ$)ҕήs+,e叇uϤl9d"r3fLs8+;&<+B Ǡhj|B`7|8< w[zC„en q2F=9+|Z~/9r)GMOm0<}M)7\)wx5XhF/JS"ӝ'YbƸ`%'$ $1J2hh`ǜ A:0Pj4/,$GL #}NWt5y ?M>̀lDxOCꟆD>>뺿})}Bߺr!k9H/N@z* .BI駘kZa ntMso5V|gx8L?˵d%~UY  endstream endobj 610 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /FlateDecode /Height 106 /Width 75 /Length 2362 >> stream x;oK$6ybpJ$Ad H Rv(EMRk @SR ?/F}ak9hw|fvgΙy̬ G9rȑ4ⶕ&7RT߿pSMnR.&^m㣣Prjj}zCj8&4 NښJz:ԯvibvzIۣ{ kT* g252.X5\ xŋ\omm} jhkܹstE7==](n߾gffŢ塴Z[[vddDJennùN'<|^Aa޽{AqA=% rrll ! J3i˗}}} ˛\̛X={29abbb``H&P~淡Z utKp:[ڵk0M?IbX^^U, #FM91=NCZ(KՎjeiLpGW=ܺu 0d&‚dV=hÌ 7%yRL_++_oQ:ehNMKK$4tAիWħOz 3#Ȃ!N)zni`@qn@KfocᤉEO(gij1?3ch X1.EaB/%PQ3PҨ&9q'K- rA6rF+ypMyrrTr1X@"\z>$lc`[n9?I'3n6޾} \\sA *ۗJc"sדO t[VkNKFr9M&KF×W3$*,{8t.!"8Q3~O$xt É=~Qq5P!>頷Ԡ{\ȁT!sO&NT'ĭĤ9V`7&<.g .Ob$5Ҡa1 0ȯc'Oÿ́Ïd %2 lVbUk|= 6ܿ;wPrT*(W,u @ v @H 2H]@XdL "Y|Z^(Gǫ`ihmazI %,eo޼yxx3y!ԌG(ÆQѣGVb# c;`F3y2uҕ+W`<7Ij5zn5@%vvvJq͋Qy~nll0VC@aY ĺ>Dp~3W3>="L!c(gUE̯HD]XJD 2I&A #QV@a Qruss+x/fG?-<'#dQ-J* aD&{ϯƸa0ΪoMRR(q!1=" <5ʜiХ\u>~H| Cuq_.@J%0$E-:q;r #.ΕI2Գ0ԯ0c3 `EG==]*4:.G]%7ǥ٬nE=l{vbeD Ah%60NKy2w@">u)P<+#ӡ5V8+_!D13Yl>ԑKxpP@et,=qpn:vl}҉y{1&w\'c:nfU*>|QgoiCљXWWW7669y\ qk__o4<$ȓI6bX$XN|¯j΋r7BC@ #G9rȑDuiendstream endobj 611 0 obj << /Filter /FlateDecode /Length1 806 /Length2 909 /Length3 532 /Length 1485 >> stream xRiPSW*AeeYHXR({XE&"ck-$)6h"p~ N SRi8* 6*ä? L $x[aJn0J1/.BD\Xż L()@Jp h:1#0X8SENŨp>jSMgDb*aq8MӊqoBB$Q5CKqy8y#Vcw_촚C-r֋kX7TO:9#M1^-g:f箒FYJG*$pF%m]mFC̾&y솜-O^exCؒ%̆Pm{,LxyiCwbDאԆ 9/{YO^|RU[ТzιS& z6m9М=x+UmۓGz*#KlόЂ[y- 'R!CTR endstream endobj 612 0 obj << /Filter /FlateDecode /Length1 1677 /Length2 9272 /Length3 532 /Length 10252 >> stream xe\]רNNnnQ[xΧ;k4Y%,@2`+' @RYSIKƁJK+ BlR@H) ppAy_s FH:8z:XYC %$A6@0@Ck6 '@. d 1@V6`T-\=rvJ`DeWqߐ2vv*@O+hoc{GW`rw.z Wl%Vv km\dl<@j6sk0lÂ]IYZCVǰ t8~93M6lDt`s 0d@OT`y@Pev6 ڙKgԿ50kvG3lr+;`@mwk;;`tx_A 9X].ֿ#v/ y4 a%,U1z.ど}E,U?w|tݻZ?bM;P>h'>)Æc!, {_~*=g-B{߬G}:Ab=T-qD `H?ܸA' N yV^2y3 ky ǦC"M Ruvk@ovtFkxa],fJu"6UiMqpagT>g8ziLIԬR}Q3ܸ#&ܡl vp-b-_6x"ɹFɨ!mb~һu4N{p0K%ۗD6Q??%0}֛(ȣq{Y,Ҥ?)D!$?kFjN6 8Fre(7hxY6DUg/b x#d_hEDf >mLp0:/Zy򶙮4m,!ZgMTE6K(@= xM0xB_XG'aӿPa?'ewl],W"fV0KÉ)&ݏH89 XrAi kfUn @x7vĀ=L)Y]&ĩY1GU'Iqy_Kp PF4[zH[ICf \KUɺ+Q8 %De#7`R^[+BXh8lVV"n w3Nwno)(OcZ])]ӇPzyX!I6 Q..?4GYyKPAs#X 5 ٞ*2VlܿDPO(4;?Rp]|RC}`kvѩr1'/?|NVl&hBGA1Ly9ڔf9LOx1\s0뮒&&kz$fO4%+P/P&ͧY9;Eۀ*@]pbL|J<,Kpo{r| ԳYay ߏ-pIPc>}}L֛u!2m'mk0Oֆ-j=6.ހ94du&|@]R9,b5~DDN D2,} Y]|;0WiWG0Q'Ane)" EbݖH]-lB. ]-l˯lc=JjJ G:MOFMna{-;q7 J+ rzƛc4vP3[ LGk)/y~uDDM,v)NURx;L5$[.4BIG+QxV]>8Fxx*^F5luQS}Q j<8|CL=P]8;FN^YTX CN1%C4ĕt[L506Vik`yq.d}Mo=j:7;Baţ{ZxDRM0wX௟6x,e:|r.LDuyN֠W~~ +HcCy6Lqr_^lܪ˂\50h $;n<<= S$֥/c`19aܐ7/R *oBu3G-w|X[+SQX[~?X!D^faOԔ L$.8g?$11[˜AzXh) x0FR-8CgNXz|[_.rׄ|Ovb2I&r.Nx~2H@kܞ;,!QKe)3- eH+5*)f6,Rx7eDS1^aH/!ABH#Ԣީ3WI,A!|b4FWғpsL(2\Hk b[ &h_5 &Jǹ$=>PsҍO8ML oBg&{I]Ͷd ߐ.j1;ʳ`+a:tfKjxS3%'JbO%1'0hX'^bӓz1c*43ݹs^|Neص ,:EDIC(j(#Sd3BM3J-q_|k4~8 O|/2+L~^`3Z]:W:s֗8DEn#ŗ;V[ `(&GB XfO uZ_U*-Obۢdvu˯>^VdҕȖ3;2x";k6cXV4I_8]zf7c[K}tx4۷-O+FUL vZM(HCF.81w]fpm+!U$]K=H y¡hE6lee"NeT@w(=xLjMbFꕠAb4IWeL@z$GF90Xʃ&E M7nzΈ*UܺTCΖnK-ғ+SDnvB",y>^zBfdNy(ekCׅMYqvAWs&Z|g7co T +’tj n^ [qZnIRx7mѪEW%J5E)StC{痤4?nN4^cHVBGוmueg ,E/+@_.ʿomuFhlACUIBSႳ$[ ! 5 ]?_1$TM u$3FzY>aQ[I~ل$(cnE)$NN?7as*{lb KG8nk*wy`F0{fVr|+ێؗFȺj>3yl'q/J̘GmЌ VZjIIKtZDۈ;-ӋsW,I"Wh`nB_sy F*rYD??W>>]+ɻ?$dPaC|^H<.ycPS'^"ts&v|+WEy^jG}Mm+tsnho~ϗ_",)`cN])OFWycf9O mxvv.@n'PFM7 msW>;{(M֞dXI`pZHW _r~jC`wNrQ CIN-E. oZ1c'~ă3BܭH)^ճ>SngKv$c=g؇E{UEqlO˖QuvkD)krT@J!H4)4%׸Kž#iDZOc}p\d)X-kaee]vv="&MCL%_z,uֿDc.,!Sj`8'z;za9Ӂ@!ټG'$P~Y(Z6hHuغ=ä$ѶjF-# ,˫w{d㠗]Adܜ)& ]),%(^~qY1ק>RTʞ޾AC%:go2^&;{1\TxDxcB\\^a7bYV^5 uz)E'D;Q3Ru, #~19n prJp$JĴ(Ŝ\Qk Ӷ}?- <ڛdn sOzgNO]Z'k[&pk%ٮt(`KiT'FKQF DpaY(cD2)7!MEgUEYtg{TbU3)6etP[~^ v'r)XB+GPC[ 1#YIU\U(WFv/~"c=~v~Eʳ^?I>483}z_s;_9z>%/2duC]G4Tl6g@P$^gϯ0vFo"KL;s Cvinrhr.W,$#k5E;Y7҆lLunz4Evobky^|<}.(j/LðR3u_=s-s.&4KVbӻLIH܋Jb~m,]B5<|W7,\F ASb/+<+\}?@jۍ,vbrH.˱`|_z=GɓՊ%%8`?(S!Slʈ=~Q"TfoN~\3wvm'v|${53{0\X:Ya iJ AP90w-H3X}} ,dt;.73_{Ҏr.}]LCmWE6K|3bw1#@v.鏛,Qa'#F"%)1?/hJ:z+ʜչ~MaDgTYQjfaYGj=}d<mIWX/'h'^(.$K\l}d̋]ő0_ά_8ߕ *+}wn aPʇ/ݛ0'H)u5[LܨdYO ѣ:kzB._!UDLad|^AՉI{z7Wd ۋWl4|HdL@? 3?399R]cVO{C{+P"hi8/-ĂeB3~,~˅ݚ<(V@?ӧ_󭾸A-*ϣXoCd=65:Wg\}PwEXw%9JCi<-ݎAw2yny#t3QN2$t.Lh؎_z >,inoO˭Q#Bܔ~cr:ynaFp}=W2&Za],0S}WSDR8ԩx +Kr"V $LZE.;Xw.GMaHdtZ91/뼜PHi VOxm-{wm{ӎ)FFI. 4ZuO{0` >zIR~J(:_Ԙ G3n`WOF%KKIJeeJϗ _2>:70c`P>EYz d˵LQ+h l1T@Ec}xV.|y$'Pޕ@vVjc!3^Ȁ# svGrlFD%xZ{Fla9b_h/8=Zq,l6ޒZP4sPZ2\"Kli )kk%G*-#JEy焙Oeזi)&f_%<_*$t4I gV<*qUi~kt.A-3KgsK NObȨmɆ<#70JŭuS'MWtz_̈́f|ZW}vlmV d6dvSіo66 $ ۍGƙ_QϘ ʵ<z(h vdm;2kFtDXbPLWz})[]dVBps蘇PΌBUd89߸m+Tʼb^A*n^g6Xzw\ނ&5\@R$"xlɩB=PSuX^]oKy7G _KȺg#E.|5T;$NcSL(b05]dZn<|ETӣ= rME(CWX5kWʰ(+_KZ}CzɹN2N[  .-+aZXׯ_TsZ]> ˆ#ۚ(/]u`뗬`afD|BdB2Y:,}dэMՎFhŰ_Ǧ8Z,c:2 gh@FҦd% %*5".G6:re{ѯiLd1ƣ݌'|ee Mzx0@$;Cl4 6\ϵ`jQLA]mf.t]^*=n!iRV2z| =%(#1Gǃ%k X9H_%ǘ"xꞿd'9EEr~;6aa+T+ |bUB1:ak·<ͷ&;.*iÉ @B!A_L~}Blh5|țޑ_TRTvE#_ ]lh*B]4n,4I$L9PqZ(QO ) C2HO> stream x[Yo$~ׯ`ãy% Ď8AaQ4Y3vSU<B3="++V+-1gg"6o}S|ϟ:iV.Wsiq^]|\=pQJ|C$?w{|f9֫QgMI61`Yb9 ui$8I#|"-xko߅v `"h#eR}.cx&'KQ yDG|Ʈo 1HhV,ELw2-لq]%Q4;^jOQcT>4/?Xre{- !f낧!hᯋF`VO3D>@W `?HIABE/Kô 0o2[P2S\pf* E8%D #SxdQ/kT-3u[,܉T}~$> )UaIÌd>(*7q-Ja!eEsx|u3ΔU>΄;p#)ǜ sVF:p?Q_3YըSqG5fIdsQ#Ռe3/0)M2/yٸi d\r EF(!J5esY#!-clsds9̱1">qz6A "4.B6B8B) MEocFϙ"\DC5hF:̆Z)nc͏i"v,LXʎQ@ӹ6kZT,1WvZn,^gQ\Iwd>C*&Iq -!~É?k5ٴLjog![I2IȶȲlU C6"[I xI.֝!aƖ%Ѕ\஥_N]hg|/w_Dq&2'MI6h2Uh\Y按fi WʹCMg,5>샣<䰜}G>8}p}-d.LZrQ5s ŠN%EP3Kɶs>׿Q{ߜ !p4ҚaTǞ/^ZݲG| =-=6X\h3 FG/" ~&ry!n8+&˙ !9PUIq.p0'K4]s2>ԟ>\wG0p"hP#Ո%zxT 3¶x '}z?҇Ci>4ʮ?L-Q| 徛05ہ̊FytLo}/ZQ>)1vȁ!ƳҘ!WbO1P|f&A!.pwMЩ#F:Bg9%dمZr!&.6ױC%.qA7NH㺐w52d-PH?I$T]E˗r+v?=]8B}ɣ=L )<.K8b8 LH|9].)<v}f(QI#@֎aaphziƞ8סi+fWQ*z{B8^I>mfr9f30 UG+nM6IEvI?n@ZwށmCBxGū2=0d%Pj(޾0%! Jh-!QxzQE>D?ٱ9xW +2'.0m>I?ƭl+ u:j¤9jLzc~|C`wO!+Ďy\q23C*qw00n'ff\8^1/n&MIJaxޏ> stream x?OTQtGEÿUUP JcaG$*?) 2њPQS 0ށodeuw򻧸;wܝ9|w fI&dI&*?w;7n^]]) {{{);v,$4-oQD'jojlnǏֹ#G`'N$W\9u># <:66+8ŋ(g֢r5n XF?~|ttt}};;;_~m b(ɷ&8jBիW:499 pSnQϷΜ9 (HTݽ{2:ڜh 2T:ڲA [- e;I(@pr44wywvv.apGnt* FBK߿#5đ΄ eUre"+a2W{\?~HA333ܦl ?~Ia~Qq OE h "&+ӞڏܻwܹsWXG?"05<+2=z4Ϊ z &=jZs-nDP#11hM~yy۷@J~zzzcc[ǼuQkWxȥKP :M%14qsr,m?liotP@2sKQJA"Q$䖕+f_ ) tbmm B()*'O l>aRbcރ6Ո Ytuuې 04&w)0Me2 %$YBH>U)4FF*s^)5ѣG;h'dMt'65Fa_l#i\ eX,p]/I;0#j>l͛\A~kڂuvvNLL-..T c8+-t r{f:c-,,,YJ/*BbC޽{GW"mdVwQwm!}.IF{2iӜC{%%ׯ_4|<&(ܶvCZwR6&Xqd{K~kqV144BSt4==MU6,mP4u2 a/jhOVb6aڔ餁FCʎ"wK|dFFFXiKϪ1+++}zkk ][mS7#(:A҉hkC:4-0yޭ|==$OWVQw,i`:EƏ0rۺ3Ü'.=QCʐ5={,swoWI``VLn'0-fXb`63Fq9:LS&ȅ }gGyJbhڛI! ;4Ir9&Cr6fN3kb jʖ I ve$X0:f;GZnqμѣG$z4(㺼!9ϊ9q14:eO:jvl8^C[P`WGm0vmEV 2jdMjEB?& r- KgϞ=|B_*e )>.рD$"=G!\:ug@2Dc 8e3s M]+$=VjqZT#K7Ud1>CrBvvvX`SSSR8O#JbQ VPjIb -3E7Q[|Drـ}_(3FI&dI&dTp1Wendstream endobj 615 0 obj << /Filter /FlateDecode /Length1 1201 /Length2 7426 /Length3 532 /Length 8199 >> stream xUT۶qw \; . . a}N:>v꡾>_}֊Z]MjB\ظع** [NN4zzi'  1s nF:x8m\L% $@'bb?kX ;@ g&dGX,\@k/K +(@_aKWM x6i {,Vhjg/?lS\^ _of`I\]N% T]࿼-A*ك,$!@/;' gY;Rba2wB,}Q_ pN~s@NvNNwFbA|3''34g)n>>φ9!P)Nh*7bW7 lnGqp8<%wHaq7spH&~H!=L$PMVTCU/C?W?\|~b?:|vd>[{\u!s!?٤\|.G=^6n%,\ o=@;maj!lZ:oٙ<(x-.E}EoکqLHyk *I74gDl/Pph|yH{ֆX%Kzdnodm=ْv+(E9;t{ ̓.^?.v-O-aFfϹ q Wj4V;!bM"@қ Vh Yl_UW${<Hܯ6l[Sjc^.~w/|af]CW:Bf[׮/cnXIL6MhmOuf6, L⛚/6_`iIMĵLE{_0^GZtTxRH`d47$ WU5ei bxY&6WQĮoM/unG9:Uy=!E͇9. j\:k8jsl5hD-J_Wh=x|^j^b;LYS&Zd*Z3G6:Ǥ! DŽ@!!0hMj^{,Y!|Lj6A4R#' ( \ؘl'<_|C;CEzgwhyӛ+c \҇ʊxKZ%9轸(z/,Em#Y~W8QEFJdasΌ>#h tm5\[?Vѿ^i2뻋W 6@S d Cy;/58[AAՎVIgf.h;w#+F~tgbx⚘ڦ6\n'+CYSg_u3E^ئ{VNr|+fmz75Df|V9B6#kS.isgP6Wl]YvZHV |AưmZ^-2~z`뷴 /hS~n<,&(\>q۞^U&pjk8i.%%}dGRG JZp5Zh#Gdիo9ke>xf )*-0) cFMFW y4Y ι VU`W\z8Tu%HtA]߉ Q۪Dm msC)1҃c\L!w/[ּCC>vvlRS*gPDf#b H0zSeʾ |s?vՐ1L/,HAgz_OHHdp?ƞ+QݿJv>TvFк@rq HM{һ H5/Ui nqTjowIӰ;JgK\mg]e,U2Y}'|4ݸe!Kzdti-Dpb-FK5YHfs('Lxi% ֬{Obzt;묆UݦDs0%Z6LS0MR By雖dﮀ5_˕f&?<#]uʳ%$KqD@[pypZ!T*w)$T-ʔ]+j apqz!cb? NԵS߾oEBzG]!ƃT|]Yl уr g>f/%f,eM%K\^H1pWC.A5b>1_TrrV bMnZypkO}kG\ݸ:DwX*UMB\]Ӑ!D. 3"ނ&<~uz|rmyո&R^: XsY婙+iax-R;13YP{|[ϖACj8{j 9Xx_"3Fg&".dE):_p̕{}P)?WeBhCk(r=OUda_k3dzܳ)S.fzV:qs%g6T?,h29 V$&If'%"{Inŗ|ҟCFQ[[PyÄ}~6X 5,xv'f/l}՚ǩ5*&W3&8o>Jѽ.|͹$fMf'f%S̎"I.+Ej2M2dT@!7V34~=b/OԈXG6?jOƬZcxrjXkYX㬃ESjfT0bZ?Andeϓ qݥ/D[ҀHx +Ŋ99OԪސ6,zh`-^{aff}1NßQb3T`T2zn8 T ]m溺gʊC&3H˚ˋ>CQF[Ŷiӊ*3.QD(]WC!֝mYCw~|ȋ]X_:^cB-KNSoW ɘ\l.(_zǧpm ZU~geO aXm=m>V}]#k2YoHf 64|龘.ߊ&Hq1p vn IBGU.iZ!Ft/}rTXIK)u> -!x>C1?0)]nW c/vƫ_2Qx~nJ4 7=3ώXۄrS=٩{zH̎%S!"J:R IX0;_%(#SكF}^yba"RK"b/,u 6̠2vżOa_^& jvڒ2VS1^Ig~T9?-jr$ѭ_]W86\̎( M P8f]ƺl΋ /k>ao4Y3v᥍-߀G\*iƚQET}|@DRlF]U ëlCݾ~KC]姨9 S53Ҫ $KڄZ BWƊ=)L"Ah4z@ˢOn] c0  '\<$Y0FB\0>4Oku9K=Jgi0Qvy!3=JD(̻8u5gpܹ^*[[_i2wVGnݼMN+5}lp%J٪mv㽏ihDXӬ 8~3q0s!Wk<|{3o %<"#)djDןBYuY:DH&__hWcd0b}y/<Vv^K!76 ̟}SkV6a 1 _kD,[`;u];K= PNMLl M A]JZ?*$_IKXï'U}]gIT,2T#`#֯]UlXUD!S7Ը\E 8 կp#&`v9S7$x"n7Dc7p~Օ<#|)*@W y <Ndm=.BԵ `>R9!.OPyƮ-#yf[ϑ5w(I[6U0±&RF۬w!9uȮ#|,gbn̲3~erI$Pc _ZĒQQXlhKՅ僗._ĬPiAE `ӥD\YuedloRݓїG83a)mS6ZXCuuC!/ i7vQS"'LuwB+F8,~KЊ4M8%_EsqG+q_=3r1$Ϊ~Dҳ Vp b9Ո$z@9"Y0{27tw4mf]{fdT RfI..[Eʣ3]]T|KA2ĭhz(Jޅn,&fbޖ)\GMӝ<#盧sR$5Rս\`aKKeL{;ʄ8]9], I4x' Zi݁Cds=pW"[w=Aޓg r #i~崳x*n{F߁HdAab(s\9rzeB)F WOeM&T"#v=\y{N_Cev녭K^u?&{;膬[wc`S$;6%a_-XT{_O))\j^؊+UH|7j f*Jз32 x-=L"B_^aBo!ǩGlNKNaY=<ܵJk;‡z.dSxOȣΪk1 w6N J; % :c sD\6irpT~SMd|fy!&)3˦%Z]@v|.9(&6ǰ~BwʱUik ki/ͭ\Ѿa϶B:Q׹Efܴ<c,JEga 3Xn`9?ɉ_փ/~c[ 3謳_[~:%8h _2ff=^nBGăʟ+mP] l"h[2RO9!`a4sr͜ ]Gendstream endobj 616 0 obj << /Filter /FlateDecode /Length1 1046 /Length2 2706 /Length3 532 /Length 3375 >> stream xy<{ǭQ!Eddu"M3?fa]T(f)e%Kd-D"*$%[d<ýz~~96Ē.z$"E& SMum`P& Bŵ #uP)+#O<08\EInd >h@2&h H% )&XdxH 0#Bv$Hn۟ ${D;*O4X `A'DHPkq=O<&?8A"yR@2`J‚d⯡gřX'WjHAqM3pz8k`\'4D"hە oic #R|]v'z׆eCvPZa@/?%bHX+d2B M,D\HdD yaw'@7K_H$ߓ~F 2s3M쟉ڡ<< ݛC}(GwIXO W@FcwfsZZ$ W>IS$ƓL;-eJVO G[41oHpפȼ@|fIz-8dua79må6R9N0OyoyDw͇%:?Wg5wMd5lw"{gIXMk*oH ?d(=&/lwY!jț=&PWXkcZC,Y?~Z[kcX#^L ;It ;|CԛAKpȭ>L"gyM) u(WZ O i0widI(,BE{#V"MXN,,+L %^aBZ|{=BH& yK Wʳ f^L}Eж_<4%B͡{sJԡ0_l,]wfGl(]E^n61GOBLi50_m{8v*kȈ-Wl%9t< /DMQ *h~6VLsL8Չ^at5@̦{vb+`྇.b*gFozKhscud[\jV Q\0JxK$G.Ͼ919, o ~l5^czr=g:scp&gq5಍C~4fr'6W, M -q~,eh AS*>M%h>!Kp\Jx93+ b}a5 D#k.~!-C-7NqQy̠jҼCVHӃ8VDacLĞQ`aM]-6ޒ-IGaUi39rqi6~\-ʕs?F1F?U-БM3>ܥ4W1R2NvֱT4_w1V:jDoj*!Q:2xGޑYG!lkڮIf tnH,Qdu"mZWHWG,ʮ-r"hQG.*i"္hKl Wj{:4lz_(rY _2#(xg̍2K2:cSdFE{Y\okOȳz ꝊKeޣ(@q-BR;;[+wz砱.3Sz=$b.$;Wb,YѸ%{!z4[/D/Xq}Lɣk't{ў;Zl9Yi{3Ek=45W! 1 3OKu1oo6<6;˲HQJduMBg?'DY3WN-_Q jMy<$HbmU3"lS$[5(Fgڥ9Wj^(29W?I~4'ͣ Cbm.%nPWoUrjh t/7 .%^a_fGy:yNM#ʯ6Ufc9бALov}M~-2_s Ajϱeo&2yeq.vNXw;p+I\/^e/WV֜S P(>~nI*L r+Irӱ&0A3L 0:봦i&_~nlkԦTLA*T()9h:d0'y;|Տ y 7+C$eįk̻`A1ǵstt_V%82>(㽍^<;yJBY.:ecX\-GâUM (K-e;`9k(9e;d^$pÛxܧCE\=dmr\ՠj4Ns:1}&y=#[ yo﹍Ψ[[Sh { F/os*]*Tf.j㦁|hz2+jUbYl^ڧ.1y])C":bJ}㍉EaӵS']E$/kzU:x;`5E%%MZbn;&OF ēG7,L*M%o('\)sretDh?ʭ^կ{kK]liaSuR!aݑF #)Č ,j(6fv`FlNyN>$њŷ;̥YOޢPcmWGK֩WmB̈́+P&_Njm PYFEeu=pdU;|x3{?7#ƆڗoC#wŚ=VWB2VD钜3'jZ_J?_k> stream xk>Yɤ6u>ha-JNVs _KR+}u Vp8t?:LKM,j>ifOh^"xq䛗ffUL. /K3hBVA~`f;shT6Kb yϿy@ڰ rb @? iHy{+\>kx^G:7|>wy +x􍃮H9IPa21[R>- `Ԃ beU(#IC1N%Er B d^t0 FX#,_lDRӔ(K16ߎX Ur'e}7˧`U?~KP Bd*|ЩڪPlCo+Gh`zS>ᕄZ&@>q/KuZb;7)UY["4Fc21A%4+%Gf أwo/Aubh3uA*ө`.>r1D/~/HN Gڃrú \e`V ?}iE8O[{ߦl4Vq!kD-favd =@M`m @)'mM8N^{?d_Fwmd]d/7.o +9&[(ul59;sXF-xv⺸Nۭ%[:?x"kTfoU4!Fmtpoћ蝗'w0yī09%`Cvy`+G߿OAŽ\tw}dkFvJlG-'2R8?꨹(̊1(v>QӀԘy3N~""$dRdXHyI2X6u dž?YxXULmu$1`"E#1&CdddoPC/ɪΜʜ%ӻ$vl%Q?NPWb6.5k3P=A8$)j^(HaUgY |HQ[}ZcDv+PcՇ`-W=9$L[t7~;xA9Ugp_TwY'&ySwd1, 2]\)zN\ᩘ5w~aMh.Cօ+E @}JwZȼuW-J Bus!V[5 V*LڧZqo-/ ; #~颻,RtgMTޅ D v.|P- $µz'SH7ԁгqBC&BpV͂a(}vZ3!"\Fj'DoK yPhqPQ0 (cF "}vW,2t3O㝺*_g~䯫Eٽ=cxɣM+lnPfjnrz5llQ$)3WhZ ̾M[M8Wp_Hh'KBzce`h2S&w~YkdGiJNi늂pST\pK!w#4Rרإ hy}=Tڙk(M-Nv*;H 1g9T ಃ3R>|BwBJI͌ϰA74BFk4BᗩwPsFrFhXMP h#U_vBj/XB5Ws{4/PMԆ(?IUڦ?5Z .=v򶿓_;N7[KYRH/I57Z{I?>^ߗa+/gͩ.HrX$LGQkSopww1p3[ 2nPHW}eyyo''Te#Zz4iV ?'-S|-}QX3M g߇#w!JjߺS6n JKwyr4۟<*xx7]q}ȇ+oj|d 9@^>&QICS t' 7 gQS'Ruy ޠDJ.kA7eD[endstream endobj 618 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /FlateDecode /Height 106 /Width 75 /Length 2163 >> stream x훿NTKUP !J ɪ7Y 1&R j-팯`eCe5/@Wv]g;_13gffq $H AG6 |`Qaρׯ4Mexxx <޸qիqzzϟ?3é> rԡvU6כPP^rEQ}jjJYT*###kkk[[[׮]rt o޼w҂=o߾uZZ~T*0\˗/(2?_ZZr%۷oŋ.v:{=}v\t,CƲ} |2h2߹sV1v7x2p , 6ǘz,Y πy(>!Gx$tEy\]] (?C'OȄP_|ar@A).#Ce>|Gة 8a~xOîr2!]+yG(A>M]s3uE-xT%&G|=Х:s:) fG)f@<߽{q0TbsL[_$d,,666`!rivZ 8P C8+6BO,I#R"*p̉ol-55R3;!n{{{gi`+$-q%m@6CSmؚ B7df:Ru{,yiU `߼yZ(e5Fo;GrttI ܼ~[o,' ^Y!DXa8*Jb 0Ӧ1oݺe{]hR=3͚ϛG5M($^9} FchA(A!^a'o3\7(>yPG0;;S.upԊf6  蛙L6aO[ o<7Nϝ)>111Hl¨b)ɍ."{a1p~}V\=S"DCŲh2+m0ߧ9d$b'_TKevYF}SVS 2Ϊ8_ޠHƔMp.Ni0 }tȨz"֖=zdЇ NNN၄#~$"vT#.Yҍ4$]{!ũ{ͼgZ]?{ ~3}E,a&|ZY_@|~DXTɐLI4-ċOT^|ł!1xWx"n|>EcHJߪb-X!qImJPJ#fThꍿ+Bf]۽I28i:3?::&kсj)]a{)u^ZJ)폣q쪅<^%pxw姺q-n ɬfR"0睊'FUu GpPT @{Z?W?gG&9 )dO?~h8cx.8DUS- C:qi"VWvFGGgffL؂3$4.䕆_oz-B,>̅wQg2c>|>1[wk#Ȓa-(:j{{tNߣᇵA0ɑeoh/䅻 fbânr@InmmynWT̏;x'1FޞN aJj\l`Эa gSZL&ylM^JbP00gLA##)H%P(qC0!`$%]^^R.`zUgr.= \}^8_!{uy A $H_>endstream endobj 619 0 obj << /Filter /FlateDecode /Length 4841 >> stream x\YG~ׯ eX,]k<@a#?oȣ2dm Ab_DF/ VKe++x* ~՛Yaܳ ~]~/?{nXI)`}~]#y"oz'?>wxߏ߿#pZ0*踕&Zx_\5esaઃ'RwRµcFKKv+;`?3]qZ VzDy!Se_C-(Hn|@1}Žz eWpw|cMf7 B~6^KiirwHn&Wdy!K`H"Q+;u-LCCLgvM`xr}ݣ)i@YՂKM=70dO><اt݇$A=BӱƁϠ0NvY7_*j:%0CP+ fBq)*pjC;ʻaH"H|Q f'gZeB@Y!/b u)C]ǜQFWXuL(ZY0w8kuXgwx>މr`m4Io-kZxcha1/o6´6wF,_;MZ7V42_T&r6|P% Њ~ϸJCM5gP%@n+m* >Wa2>%Y C+0<;ʩfvL?VxYٌ7_q }cHtds-UI5 blڠO˄{,:gNG7< @ @=%}$7+딑A)@qvM N֍7hCxi66iZG|:_/_ ۬Fn5NNў'F{?\3|u°&_1hXǝ6'\e3:aaxXcB@͐by-a#p7i+^ 2/#rgKu .'mx+#<y# {mօYeO߯(]1̀v㘕2",PyI2"-9Pxs}0Dc^"k2 +SCLd/JM[5:w㐣b:U&R_ _$#_v&̣$vX3 [%Vˢ-&)$`oC )T\j^B*@A{%0!d0PMV%̄;˲Ko'䨞-4TRĊkmݖ,s6$ Kjc'E%-N Ji/gb8{W@9fK5WiNhh {h iK&/y.ݶׂwAh.4/&8hivffhe|WDWM+s}DpiD SMSh}lJ"V&P Dv\ކh):]-Pw%&\NZ LaJ#VxܸEAtBBVFuPpŁscjoMm`)2n jλL TCGcs_ѭNyfH,4rV 1g)14 4̗%9.˥L3T_11O )%qJC89*  %O9""H1u B}袰pSD1;RDg<'7h! g%^CQ%DNI285s^ǘbS->Fl4T19 ,XH 1xU lM' L 3_.2l`%4^-s&ė/uюF/_Pp$S9"`.E34Q#8ix8W. !ksUAvFA[z. a%g& I1ܥwQ>Rq)6N;tXN; >^OPڡ&xb6#01]_"b?ՐڬI#%2uqUeID-*m' cA IЂ{[?D":L4 ӆ= Δ/6aH,Z۫gKph| XpzЎ{ƞ2߽7En2*&)g uU@WN1WRI1_RR 0=[;@06W!M.@jcr=^XA# h3؊;@Ĭ)ZbsӮaz8nO YC+sqk#+'\G AXuj3 fV(n32l43j;RC!;?G׵)a&gp'4wr 'wۆ2P|y2NjNi%8-Sq8٨ut w2 Ӗ{̩rS xL5X?Y iW6:)B.~$c}:]nf3qTc} +d &E嘆XXiCtE~W}juE$ʮ^\ۗ(Ensf)A!+iji/mSgr)}F92ՙagtSH 1(adѱ']4pn5& &i7R^26E^FO-r` ?u$d.p$bNnI>]:D5,&ӇM ma&a\bro3N͍ސR&ԫVyQ7bK5!/* ݅wϹ!S1i&DŽh1i@5c&g!m KuX>ڮܨ6]n+kٔRGR8#"9Tt @n@3ɟq>~c(Ko֡yR9*x }pxHPʼ><^'ee)5rJ{,H6SèWVMÆ債L'/XylA,8:!R5P8.o_<T _w;kSz+jP_" lV>_@m'iTՑl9D7ٍEƇY8ә Sh0U -}iVCZNqT!1J16A*zv0*%wf_PPy<NXg%,+Yi8?%ܜ&ٷc`M-c|9@TT(U \x24W=/Wbb)gjܥJ{LufYgVc!g%kۜ`uZ$ 5"qǏe&.1G̰ΊBeT9F"@ۤ瘺C!~pnm8,ֲv}T!U] AMzЇ}:"dC؈* WSAOORX"x_(]/e!Ŀ?c;즟CDkK^uCh?'gM]=mZrlrU7$=Ԟ-> stream xMh$|5 L&`\",b &Upnn* .܈ڢ)(-pH!-, -Xo/5{3ޤ^{u677Xs;gWWpGGÇՑ9tPy!PJ|rrNlqhqULy?Yzq8J d8w_&sM[ss38n###"~JfġղI 5*>NéT]]]^^^VV矙loݺu#GP{ԤmMM rĉunnn```vv< pzzѣGԡގ?N''s9M|A¡!^й 8&@O>0ʁ 2"(f8Xdwޥ< tO=8"Kg2_x544PNe,%&s=*$Cyg=?j)Tr%qrħ_ŧ$` ?vଭ[Bh#D$(1s&J)@ qnVudΧutt޾}[$SWO - ;490=0ztgrd6?􁼨L(&{zz܀'defme0h]ZZ @IE 1 uQ>\zU ʛg-jI(eKR%(V%y8.swc'yfCe|POȆ#ڠraaA+5?C y*h l0 R³:H;GTbnC.<y@ޖ5kg/Z~*RramKj]@d\yy^eH6+jqD|ܧy6D(Qȋ R% 2,Z둕D/J@2pA㜪<`QtvvR= iYUL[2;gdGKlqlz+G {L6ًNjWMLZtx666P'&&T`n ưEKPUV?w]4oRKO/bUHF5@] j:K {ˬG:|&rl!ΚH4&!2.wZE (à}֧$ A5<R` BAH2lM%hK—5{A ) E(ϋ麣jgC8֢P-a+OgΜ=EE(8ڻIhǏÑ}NmѷO:UUU'Oij 1&f9JM0d|pq(<T\- B~rr@&9GT`3Rhd`S442M?n;IH{( ȡq{@4zʥ?ɗ"6BHQ׽{0k׮!Doᠨ+]z٧q$۶Ua@ΝW$Xm`)ԜBيѫ 2]V OWKV%H濮+ek If$o?~|_ݾϲ_Utt<u骲YO#G~37]Kۺ7\?~W^I'+ 0(9O>60@W VO&_B0MMLLPKŋޙ@(]7!<ài$=jj2q8S !%7n܀LҰm*Nphmޏ{h;4h=z-L{{+GCRc@5=G}&,08U=ZAљBWmB]gRByh8x Kٕ1Ig1RFa,ܲ;X rTF~pp=20$L_iyOt+JS:ILT28%J8P 3?!sft-45flbKiוQ_z RJnhgP*mFflE ܶau`,*b~5Ï\a<ct44UP.~"VX Cs1!J'5 `J(%^ބZHiLrNM r6^Zdx&&22m5RCpUރ3aZHs6G iZK0-kjA;qb*29>i(B F>E TPID!-jZ!ʕ؃KAyTP`$> stream x6ТQ5)9JʭD(GBj2H;_aC2J Gj6.9w㦻2U!U):F t!hg(CqɮM3I}lqXB`G T'Vs#,tڋ1a2Zo41"NsJkISۋnah6Ot\|sx!h^|$Ceܱm騈 /:S[Is, sFX/ډm8 fqL( F_8ePC E5tl˄_ώcصwl{ ĉ6ٸBs_P(CE[6*j]i;!7|{{s%c25ذ3Dk&f= !.UO[1s40ܚ&JM,{?%)vؕ /!ك~Hon"(\킕/Nmł>oQC!⅛UA(߾CbdA } KY:OY)b=Z|ڝhQ0CLm҆mzY,W_nu\v]G~x%a# 'ȜW~&Cٖo9Z(P9ԉg`O݈s /ͼpJ VTa>dTF:rJI<ӢRGs$V'Ths x6C٨U jHjAthTJA2!u*#`jE m  2LOqElnw/ ˬȩȥ߇N˲>#(fjx{>ӊbmG0?CQUnQD[nΉ.{M^8sQ ]S8A-Ӄ޿TSϘf)긙d Lh2mLQ=!wDOL}TЦT9|IVȟJb"uwPw9s8)oޔOV#ҁlFjB-X랱I6?C1CL1JG:1Lhc;YN]Ly_ҭdԼKJLydKzlKXh@hܔv8 ,.8XڈX#ՂXh7RG}IC? saϿ[& oN$eVA!k!% 5͒TVǹO8m|J"P`]3ϸ{Mf]7iܒ ՑB7& =Í"rĝ]g&/)\mR$Mq_;-e9"pY9cox&KilJD=hg[Jh<Q`&-qb7W>3ri >jݘN'>jaōǕbKmOx)^NEj]MwgK_LN%4ta:{C( TȐ` )SxkfY.Ÿ0}[.';-$-M`7}8+@p)g KR>f6[!㊒iɜ sSx.Ɗ~AWYwr*':Rx=$hH|]8'6[f>Dig0, ӳ;m̀m*KH'# Mm!Z4zaRU( BW)[wP-WJ0aCkqmwQQw㦪ϋSi 5ZSxS}DmpgCnI"~{.c-8yyUƲPc S Λ~H2;_NLS ܅]%(:{:1TVac儂4^'0ErMLsCLiK:r2ΐmɖ014crҘN3d0DPi4qB98@w~H5rIY& G%+ɢM$J0&j G-(9tڕI|Wqz9*,h&]YP $`b =i0|#tơD纤[p?Mбb6"}C_ K*/j.ԯ,uFb'oh\6lY| |q^QtTv>e!w!7pMtW4F(7N,t袗F.\3j#ѽ 39EU-H*U37"q+G?MG>^9 ϱԏ_S"ׁ 9Nh}%[a7͟|nT[9_bIS*+垱?zR0-e[]yƵĎRE07a&d*6B%ɓ=Y59ʀaY[ā\k8Ykg81ĮcNb aV= -_HcFVsp`(u1%J/{kS@pgaFHS U*=dZfWP3teቖʑLjsdEdH,Wz2O]90V^Ԭ=B* "!ݓSp{8g6C٨5A|Ɩ3:~ ᙊ.,ф<FFɀ8ES=ESQЫ#0\E4e{mՇQR-8y| f۱X' !_Knl;j`[uZ\8!JEmq2W4gR* ٍHCsԜpw=lƻzG}:'Qm0EOҺVGYjOp>$vŢM.2F RNhɄp(06[bV+p%|r0*yGBjj(`@hr xؾZZȚY!.ո2d %.u 0*LƋssÞ;'mw2†wꭖ-caACx 1p,*\>'PTt1$1R9) Kp$X^;"3aV|lGPh|Ů+骝9L6{q?; sYs.e3]MwKYKuW K10吁>4+|S-Mo,,ufW9Imu0BιӔE]5n=b>x i"^qhCe (%Qؾzɷ=^DK Ģuf%]clqTɸnq+0xřU/a^6~xlv"OC>k{W ` JDtXH ˝N+F0uL+$ ibUvM۔ 5ຂCgkj=X05-eϚfuWai'cqjYi9g78#j#x+|R9׈|jX3ʕd ҹ`a{ZBbV9Q9O/ߦ\G /wa&03D1cNMm(rSn":,g+q# :b- |`UY*OFZ?TǺNfTn'Z۟z%6;`%106dU`֩W`֚FmFiīOf`2$8{r5[&#)-j} 1 O+0(FJ2_B\{Z8pCZA6f-0M^8mꑌӹ"< 硜ח/+$~NzAU,ibps%/mn30v:]/X $28 [-^y)܄\f GjBA!zF:"|VpeѶS4hj}yc>g%>Ku&U7Fa)Gʜ1,!n %'Nڌei(0$M W1]ٚJo .7M" >V" 561i6@3ECX_|4\ o@W'Gn߿p٦Le6F+!B1 Y,endstream endobj 622 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /FlateDecode /Height 106 /Width 75 /Length 3385 >> stream xMhTY+m/*$&R5DHF:qMaj ){@"IdNЭNK5|4C/3-=4 z|tFzߖ10zߣ!qy//۶m=DoScǎ[Ҋ2i&e˖{mcs Ҝk'"(T{$8k t#O&w,Dᆆ"9Og c(Gܹ3Je2d2Iʧyu%.') :@&H$K> wt<$HιX۳qfoSÕLE G!(7[>]Mx2 ϳAK ccUWkbl6 AiXAÅU9@\parre_`hNg/-MXcqٸ:EzPˊhl PxQuSn `#0dyI:㲌KKK}ʫyDL V+惞i?e4yՋuͣ™3gd դ0oAYC7~JXQύKҍa$!2?|+W(A"={V %ꠜ+Sx(-SMrwO;kDD=#z4@kxxK8{ե!'oV0๹9 vil;j +MX %b;XjQ'B`<|0CrGP  !*,pvvV(+r`Q-U+!Nor[Z5?>i(KKcz .ȒqB)""ah+ՠa!TTd; *FGiӉL,`P6L߻ `D555!P~a8JؽB9m7 *\VӶX6D~)ԓ 01h/텱@pe0M@ŝvtS/̢-Y9ŋ{exdrѷNEeU  /Q4s12{~Q%bVR=>5("սQv>A:ӳK(m<'ZPn۴Q + dHl=xի~ޕ.Ae$Wh%̳P0hEVז&jp-2l%[c(R|31}QMX.)O9ZtkxxX;)'WKRj Cx̎o'K*w)Ōo7'dd&Pdq0^ STҋ)B°acw2" *dx7ѣP9Nb+sEP<99S:gU9+Te/lUF6}짻tkSR-yFfmw-vDMjޒH<~+ぁssiK(6[/Z|Ol L[ɭ3 $Ɨ2*[+?_#) H_fbTA16TN%(ʪ)SS)see7ݱUz ƍy#(k~NLLܶFe؎)P2G)ŸRPq5¾d*^`DuŦL ̃71yeku%]^~tH6ȑ#2͛7K!JvޭGv氩\``_N_!y-q&J xMVIGZ lI>7ГtLhff&X#D }N[w=pW s{ftUS}mm-jR}@U&/qlޒA)J]Th<he+Nxc<#8)d۫Lw%:$,!Kֆu''NX^^ξU~WI[vq][^<ឿlG/FG>^X,uO##z?]WwkӖIt'^g6YstDKK0#kkqrl<~'OT*UJUR)7endstream endobj 623 0 obj << /Filter /FlateDecode /Length1 744 /Length2 582 /Length3 532 /Length 1118 >> stream xSU uLOJu+53Rp 4U03RUu.JM,sI,IR04Tp,MW04U002224RUp/,L(Qp)2WpM-LNSM,HZRQZZTeh\ǥrg^Z9D8&UZT tБ @'T*qJB7ܭ4'/1d<(0s3s* s JKR|SRЕB曚Y.Y옗khg`l ,vˬHM ,IPHK)N楠;z`;zD:iCb,WRY`P "0*ʬP6300*B+.׼̼t#S3ĢJ.QF Ն y) @(CV!- f IE Q.L_89WT*Z(X([֢K.-*J+'`PiKMHMy-?ٺ%ku/byb˛"vL 6^G[g_J*\Eׯ'"5[,`_Fxes4<͘HjY:7(ן)jq iMR2.xWH+r6ϋkF|ߩp0S1--:[?k[aL* )ns8hYՓ\{Tc c>=|)$yfUJ)-/4]/vgNk=,-W;߿MܲɇW8mq]1+_{yu@dYO̷u"-L-jcg$)w,ٱVu&D<7n鋏}}ՕM-/&Vu'0x–Ϸ߸ޥ7ۅ[|o,nfr(9uWnv!vܓSgy|Ewl+t__|߳4%L9A}Қ{uTp^2O/q62`=F:n{Jb1p0, HIM,*M,~tendstream endobj 624 0 obj << /Filter /FlateDecode /Length 149 >> stream x313P0P0Bc3csCB.c46K$r9yr+p{E=}JJS ]  b<]00 @0?`d=0s@f d'n.WO@.ʛuiendstream endobj 625 0 obj << /Filter /FlateDecode /Length1 821 /Length2 2211 /Length3 532 /Length 2807 >> stream xRi8FH#YC,k2 m1ØoBB&c*KaBٵtxQRgoxΧsܿ}ݿGugN>`lQCTU-i NRpt@ "Gp#$%ԠpO4,57Hy H#q\ < P$ɀƍ i A@$ aɖG ?[ -k ش pMr8@ 0G*5aWqk ܐom\ /50AiJi_o@bڵ$9H}:?kRHxG7 n|>`nw7aq$ 5<?ɛs#€#p8%r'_BSTB(=G!pN!`0LBs`"?* 1U> !q͊s /'Dp`o¿aaA ;Ct(x  77?; *8. lYS.8/)^R2&o]L1riy囨&eT]Qs/GZ"U#\jO&nKg? N"ZENXg#уXs*;yʙ*1?\5^|-dr_7uN:w_^d?I=Uة2fECveԄŌS3޻=;S##xP2{UBӼybU.$zU-ckxϝvSL|``_PsbqSgGyס6:6^넝.t9yb'9Sc |[5hsr>B ;! m2gקǀ%;i33ߧd/j 3ϰ1G/*Ǖq|SC[LVʓ,8&"cȸAu5~oC*goF ѫi͠r42hB`fElJXC{$8٨ǮKEjgNGqڢ,j*=~^SӼC`=b'ھ;eyI=OYRZڤ3,}fCIȱzV^,f6JFRG{q5NbzDBEןpzR tIh*/[Z$,X.cH܎0-1}ؕ0ҝӨITaʶW5K>g#asUibx͓Jԯj;\;QlЛZ2I|%:b6MT?}Sƴe&;W“cV2bGd0,`V?ju{vùآε Pi<62+ssl ;uԧ)I(]vϠn'w1 k7XDsBnOZ``Rs [No;vz̸uhѽ۸s?zq/Q"^~u$sdɶK^Go{|)lvz] ?*egg b!!V{K̜Sy|=vNGɦ?R ٸwQE\.g\6wi=-I^hʸy: g#n0wL\Ojӱl+?lb"OْFd2;ItyHذF^^!gᏼ.33sfu%'UWͰZULʠKn=oK`Xr}=}̢ĹD\ԃORffnvJ%]{&&V|>QKPP1?!s^Py0YL *}49 90t^&=r6JPu96/Qu]qЂD+WO-(2ڃȐ?2uOԱWncor<.0ԪU甾G?K MxoW@X(C*Ś$<ąR-"(e?;aA zlj+3pG<0ALDnL苮XItΰGwlXϝ1/o>TqEq ͸l'k5L8_=l[>,m4RuRek=zY#3LCM??  8up_tK}AD[}f]ӛW:8S>S+ko/<  <@YIendstream endobj 626 0 obj << /Filter /FlateDecode /Length 2489 >> stream xY_sܶiC AOә;6iUԉ']|Sijl!.͉$X.H5O9M-]Vݸ7/M/_k[TE5XT#-2񹸾9U޹~t&`ݻwE\w4 ?Ͻn> BZWN1^D]҇mc-2@Q-oEay/}7\5́f~.|ՅT燐_Fv=l{?CGX-!~ VppiKZ%tQsA),XJ)j+-9gYSOZU .h(e\\Jfn\uj(k?]4::q(j$F&(`J<2lCdО ͯȯwQ /IpSgWof}جd0DsR#>o .Fc,ay,ANm)TUup 8FU1#|f2͚#5Do-PwQЕZ Y^Οc*U]qn*'fn2؊Kl&Aw0c\d+] g!}lYU5@:VޣFg %406$0SJ].0TY gk,aakZK&jق, uWF0 !J~@ZZ?-h۰$m.- hD>ڥ5Yh ZhHh/P WH9qxHud h/ī}@H4*&h==W޾`b.j`W y]\ZiP|#C}0q*m M*@^ԩ C4wY'.Z$ #{SeZ YU2bStOqEZ߭lJ3(o_̠.h%[Qnd3 ,)*LXڠQ.Q]7 lY iHcN;=(LSي!Pq& [dr!2#@g4 QOrl;as7AY[ g[;hW> J -o>EXtdMw|Wm O&BN钤!;OK 9fn݃0`uױaK_b p㿒=f#&A\y/Ӭ+M>:J ݤs/6B #jhjm}QZTYf*\6D9@;ʎ~fٻι2gʊbʋ$Wк V.F]xN.>8TR\:j,RǓ_<02wXk>e~Ȋq>bFgjNp"j1Q=j'CPSK2 $ćc0~ 4_?=#P&li͎c @̇,)or]zcE^ߥ^;'Zen\v@oxLh)*OG|0In8::i>De$qDk8\(gP[ap%:lӅ)m"'@x`pGxvٳ%V۞dؒ\bP`Mdj~I\2ʌEJ?RwKf5Lh wca}*4{E!C_?s} a5 2+HY {a¶;Ye&_-pɫM4def#by \  Y \}f r> stream xK[Y&chm6tFAq#TP\]څ;w.\ GaBe.$v,<{r}8|9w,Ydɒ%KnJWWW7,Fn|>_("Iӝ[[[p|"D"dpCEIT =jUCGn$Ua*].kll\ZZz afff||<z ͑T9==9D immm&[]]UYUUEf^eYSH& wccEG׬q\iF2Bhˌ<jIj*ɰgfAҮ.ZS 0#&l0Ouy\^^zppr TaCE&uJEqu y \k N%:bIg`jkQy󝕨AA g&󎎋訹R_''W55<~K:6~]lmG.W uOO0]fi"hg>`Q h,̺Ag[ ;YB 2w0tww~0 i'sWVVFGGK!jLShp4ݻFX3+RZ.Kc&|z/SJ*Ce+Q:y:ÝT^gے9?U*=HYL˟Y>߷Z>ig) 2E2>D2\__GO>Er2e% H }EXLVNN1LOO~ d[T@+9MA2eͦh7GEL]/j MZhAWa8W -I^^#(4b1PS`I3 0L.ŋTD#iE u~ ;EhGfխS6Y؟:-1$œ BDSA6ӽX0`ƵydNhIK ~}˄t_"7/nnx0]NQX%bIK^]4Imw![++ e*f e1,:<<$ŘofҠ5;x׼|!L$AzrrY\9n]3L2??yZZP %# ]oPB̭kP)@ \lH(Z$dݹD:!z{{IAȴjB0kM%: 5Rrj&ώF*0sN$|cCb@kRU1؆! Q?o9$u'vddLBp"ՁN ,,әxԎLu)h4v+ցZqvB,U`$k0{Jd)m[16%:27zB566z(ZQ'\Z}X2V}dvvayRE~<w%i UX sGnH$ωM dYn %N3endstream endobj 628 0 obj << /BBox [ 0.00000000 0.00000000 720.00000000 720.00000000 ] /FormType 1 /Matrix [ 1.00000000 0.00000000 0.00000000 1.00000000 0.00000000 0.00000000 ] /PTEX.FileName (./kernlab-005.pdf) /PTEX.InfoDict 512 0 R /PTEX.PageNumber 1 /Resources << /ExtGState << >> /Font << /F1 513 0 R /F2 514 0 R /F3 515 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 62383 /Filter /FlateDecode >> stream xO@$tOnHH`G‸ Z{㲫\eyf~1YYY?;l߿?۟;yvzq˟?]???x?;gWTk&vm.& L6wX./ڔiP,mT6Z~=/l￟Uڜ~r]t~%H{.#T6WIzDy}˼6ߩlk`6 mI:h|>f=my'yyt6ge9X?h3g2c$amu?ɼ[ɲ|6ɥY[Y_IƟ6~*yCe,a~y?5_X]".Y_RgoE=\/!{ E~QE~QSpO?L엸wǿ'~2}@ϟڟ߽.秖}i_Y}q鼖[|.˶vqWjyɷ8ƫܔu q{ʊ9e#w{G\l{]K:Za-}ODU,@|ާ_IAwĎx`"ÿW<-o-w㡃ﻟ͎БX 0"0!_(f27P;_Q_ s?3/s n(n2h:vKS#o(r2h:vKS'. yƑw.SV;o4Zo0LZeysOoq7bA!~,1~s{,B:s? e*_~` E\@'w#?i@l=MMՀ!.|5G;6 ayӫlČ"S Lsj~hL[Ƚxu Ba qo_E7+S<؟闩]^SQxruq)!6ƛ4ʸ*Pv'OOf 2~L%w'rw_~;%t̥SB~NxUn~ƑS6q) SV` {'WwKIRkD~a*Pr{_x~iA˴|po!Ӿ8,w9?<8A˴|`$@ٗ"0{d:hr{j:?,-f-Bsrnewߧȫ 23IQfܧބ>ߏl,,РM"Q<{'!ɹiq0lAZNoybʨ1,xoO^M?! -SqU|xQU( E8/Lh0CR}sx@Nso*r W߬ݼ_,xqUO `\ߢ>9Sc@''79jk& -Ey:vKS6[d+JFCArS;o^MU}OMU`?w.MU`?Zu'>Բ?}RR472<C<g-s  4)˔ /jL[L&=ma!*7+o%x+{)^+c$1'BvSs}sQRjB"7ۈANe+JꘁDRu[L_ e\\6%(>rT_Y&IA.]g"go, 9{O h! N rQ(ms\P goi;d]vnws}8-T*ɝzd,tSSށ2`oA4SQA tr 1EЍY"\&=  rC.dVRy@#; tlYhj* &Rv/e "yWz)ޣTuQ7xs2Szh $7`_x.&@NjHFSWn~eA7nt9U ,7hʬAmt96U h . [M6j.ņ@}J>9FʧH!->%ׂ#cSKҗP G7}C.}96 ݳjox@oO>$<97' 7jwOckޟl7}v3r?{Z(ʵ @T8ߏG $Z,O$FV^GY*ncݧޤ]{QZG" Iyr*w*M W^e1XM0Խ?!(-qJ@Q-m(O>! 'm `y=Og%b?]:oO^ Js?J fT\egalFy9Y@'w#"r< mUe5"79{_CN3dart{csg<8owMgPɊ^Hn2rvyPC\-߿aQdEupV.ֹ}]@qm%QH@Rܹt'>+IMQ-k45W.) u )$:ДY~!x9|ȃJ w<*!v:ДYJ"*TB 4eVH) Iaڮtn;_@e ㉜2B.M~lo&3>;Dk㭨 etݧކBls5ok? jm) yc4]0 trrzӏv2Ij0ܽ|YhmN̟1~5ekoO^ϊs~̂~uJZ!! @ڎE@b p'A@VRUțɴd_Қu1XMj@9wGi,l&!sp@<<֜g0~m~ețɴQZ0 \w١D y,AIiqbεO<'00 9Ͻ1PS\[y- 乆dsP(}8B++>&#@Mq'%7k+ɝȷK@ 6σ4Jj2"79{K!"*K2@6۸<=6Y%.S*<fx/!ET^@CmVdK) %[MU h,hʬ^{u-N j,hʬA{eɖ@Sf+K2@^Y%ДYʒ-*%j’ޑ8L~MfRౚ,OOyFTؖv(k eM6*u_ kK3IbM|:DI/f_Q誃+I B-u:*K8j c}'GN"[t~>h2M'!%ckH.@v9W5{˱g\۝X#mH/ܯL5$(:Ф]CH4iW5]6 :˅G'Jft6W@"zHDSXC~e!AdM@v &kH8Y%Ф]C(&4h- OŒ~+GӏKW$ 4>'%O40F֐N>]t W*}Ԁ1x5Q.RZg>O(cW i aDy2y~"b|`&LEV{Siu:Y>1 @)>R(]bO!14-vhIr8)Ľ:d4h̫aY̎swr.<7FLW>[* \\orl1%aͫ$DbnhT|^?(@N3h@*;/'e1Fy!v3g5!G0J fdRe<כ)sS-:b~˓G"\vHN$Qvy#L&G0J fTd q5dL4{w!5r&B$J,1%ǁY_ORlhC:=G;TS&.K)*$&#RbNckHnY)&Ф]C]4Ubˤż5=!Ae7 6)G /kkHY5&Ф]CtcMU$JhҮ!Af@v 20kHY?&Ф]C 2&5j4duBAWDA=Rh MJİ tȥ%) ZGRkjI1[^#.1Az´9pAG_NQ=S J%I sRp\D|aB@Oo`E%5B&\Z"b(|S#q a2_ӃtV8b8a0!t5 Stì9#g?wW1?6Jwq֬0^tڷе'S2&M]DIi~?ɼ6Jxq'(K-b-὿IcbF c"QcQӖBusLc Lp{v+Z6!QcPZDgEnȂ#-+C'92jDvȢ#-+C5mYȨiˊDGFM[V$j,>2jڲ"]}lVOGTi) ?~1\7=rO@d9#N`V_tȑ1=<&Gt{"|0c| Z;Y)F.oK5V a1"`Z|<t|`]_ &c )#8p.RF _7byh?A ru X.a0!!#//>fǁ6}!1LN"v9_׏\3bڮm͆K kriI9H4O5c~Mv}201.wRX̒ `vO1J9$Ert42 +e;K kriI9|(^:kz5nsİAy.怃hky:-Sgkz5uvwʃH)L{0EU@ȑ܂f%ȹ u9Hb(!VPZjxMu|vRr$7bQƤ1ˑYcSP7#APç F%#HTY>KiˊD볈.[n}']Ft 4WE24UBM[V$(^S%ԴeiHTFJiˊDeiHTFJiˊDeihkV h{~˵+$y:A}8+[A-m!LG+s%C\! nEa#s]@zY"FФ |MGdke7 PU$JN$&ԴeEDbBM[V$ʰ({ԴeEKx]5/$a5u$G8 ʵq+˵蕟yIDYʻr҅: +ٓQӖk 5mY(:PӖk 5mY(:PӖs|Ճl" 0Hٍg!>>HAAs` xeB8Yvy?x'lIV"Q؍3U)5mY(v>kTIN>H1 br6pwdCnVKQAE|0TSCC' x+#3[!&=?s:dr >#Y)Ä[p@A.'$!;\]& aD )„H*L r ] o˘"t9R>03]RX .-81R>#ՋDglK*.)<=Oqp#::➨^, &:#`ktS.)Aّ]1X3YDlnם/A##Lbzk{Nyl;_CE%np7`OVkCvi S\˫NHb(ESR,/[iM`TPC= 5}aURWiIcbFn"Qu.-+S߭GarET =sHTbKiˊ%k<: PӖJp 5mY: PӖ%.-+Xjڲ"V'i -qDES JoJM<(@r+s$\n_HO^Lce@z.Cta:%PӖzSr 5mY:%PӖ%4DQJ (1^ b %#HcKiˊ)߿KiArG2dZS"\hnjRGPp\>91|Ď ^\Ё%#`Gn(!VPKĵ'~G"MGF\VDrfE$mX϶Y=b`[B6"pIa 7Lbgܽc2g#R0ʃȏ v1h[^ǣm}V>!kz-!pD%5Bap#ݠt{]ACnBi3nz=E+]jX.]߻x#]*-:b~9 i""AG8Q]~#*MR8-d4Vb0d[5mY(s8n QC t ĖWEdBM[V$H&^l!Դĉ-HLBiˊDĉ-'H&[5mY(8PӖZآTj@lYIkZrPMli=Y4I2K%p@O{^<HN\:~NpԥуD JRbP>|q#$6 sW2Haco`O`@A.?wT7Q19'ɭi=ҞX&Ծ8؊ "wltVNt)67x<gXfE$]7Ο./J i'PTuvw9Zq]zORDAS^PiH=|x\*N&s@aX/ED iI7".)) )h%8DIHX B( d&Ir1u+r}L[p\>Ȁ12`:3DcooFct@>"AiD u`Ol?:wWd5#T:ɓT$J:Qtn(vE2[(!=BM[VDoطGiˊVԔPId<<^.[ճ:  5mY(!:PӖ 5mY(!:PӖ 5mYe>_^0$:t tzB3/ёq]E`tg"B{ >w_#)Cq]E' ҝ.iqc2!iqL=X %8؊ brp1DrZ*"E+$-ɹՔ `mܙ/;LոCpƣ&>i\Zی)Ki\tQYȌEbcq()sUNmJ ȗWPcL;*Tw./CB KwC" "wUM i8T[4Aԥ"50QRƓ?%wthI(8DIwH͜tWwn!WR8 .U(9PӖ 5my"Bs-+8'jڲ"Qs-+8'jڲ"Qs-+]z"ݝ אҺiZ~ MBS {r+stm}tO@#WޯVw _ ~U]!۽"epu^U.zxCt蜃 ՝athEw"qw="4WE~,veE~۩'ԴeE~۩'ԴeE߾N=-+N=-OD췻zBM[V$zBM[V$|?٩ƟL"#ҶN}8i SGPv 5mYv 5mYv 5mYv 5mّTۻ?Wcp vBvSOiˊvSOiˊsו;wT:D}@D7 G} xO#@U"w!= ttJBDw*Jjڲ"Q[p-+Iiѻ<ƊHB5 KAF۲Hп?(P) rVt 6H(Q,[˛sxŽil9N*-iuCF@ "w7:]{ Y-F8D@oWv,hud^Y:Pt=ވ4F8V@MWzrC`wSt!C=d\*}y'@0L@|>]quώKHB"d'@%-AQT$*N!ԴeEBM[UCiˊDQ1HUCiˊDQ1HUCiˊSϖ2.LOirLfY#Kğ1BA_I 연/ !ȃF<8XPyATƆHgX;ՐmAN4WCvCIˆDĩ.lHNBIˆDĩ.lHNBIˆī.NBIˆDĩ.lHT1.x,l,"d{]ur#ݨ.q ډS]5iِ8ՅP ٯpK[QC)Rty2۵Nfm0]F S&PB.Lr5$DNa"Ԥ|yc+ijNɎ5ӡKc ҵ蚮5ԀXGא \ߙjm;Puu.MC;xuIEqxx.D|0Եb }[O0x]njd AP6볘91#=&t`rt0FW=Pq,d4~x9tV ,6헬/kp:OSȲ%>Po4,BgGAAD|@0 Y!:s1S2cE}9 JZt6/tcB[uW~7w]W `CW$sX)!5R>f'VLB9M۠ClVNjTWB>]cR:pJ]3:ž@.ϗSA (Y>wq}ʣPP dǘvtRKhGА(EFb`H7b#DE8RfW\dzt\ cDDTBӸe"c cCKѲ!Q`t$&-F/MjҲ"B`t$&-F'MjҲ!Q`t$&-F'MjҲ!Q`t$&-dz MVc&s9$Z%9~2`*7<\##\1X0L.`4M=)DD|~yN3%h @| .*'4&7(x,i::xb &w'Bta#QHe QӖB*KHRYeԴeE,-+T`5mY : QӖ'"T`5mY(˨iˊDI3GfSH *(ˑ #tEx*!%lL1Eb'#WĻ;f!ߢ,D|0x<\Ehl:DV 4WE5mY߯ۗX>JZ/eR'сKsX~EtK W-iH{"]>i_<#&R2"ӧ_{!F!KrAs@Z{^Ʈ+SNjtY'ȝD ƦH.LsUd/EU ,@99%9~ctoYnNE,~t- 8tysGjG :"0H]yB(Xىs .!*X{|Խf3 ]~D~?RXiˊ".`ԴeEb0jڲ"|Y=)8|`گ_,8"uѱ FDh Q8_iˋ5@:Z:pbk7 t=xa&G~ )o".MVdkc1=XG75mYPO="28:H'r)`]Gb+[ ԴeEb+[ ԴeEb+[ ԴeEb+[ ԴeEB/[ Դ剈W@iˊW@iˊ,[iˊDA!R~Hƹ]QӖ5k3jڲ"Q2jڲ"iJ;Y1:z]Շѡq݁I: 4WC57y|uq:ྠ4&xC,@>⬎BsjH/-wYzֲѡKsH =RJ+bNE,APmP6EcU$Aw*"e[Wo~VjDXJ +x))_LKhlrt|tkqg$rDcIB.kq0d)UkśS$TԀ4yzx's@A(4xE`^X|%WUDzr&wX(oD\t8V@KU,:IsG^|U^WՁ Iƌga)0O+$jX!.i':mx<Ҟ|JK堏@ u+$(=LŨﺹ[64ÈN*_3~-@xR:M&Ry1.u>drZMoh5/gSOiwD OJ0(=NJ-!݆fouqYѥrG&? 9`Af(; ܏+K.++XP߰Ht60,'Z,zޛSv^<|CRt_\D @)T"Ҵti66Hd8Co A;u+@M[ٗ@32]rtqnFu2ޟptᒂսff!Ļ~T͂s`x+ ~%7# /u+ [Et'B߿?ކ`eG.+@M[(د\u=#zV΁>%{Tdr3tut9Ȯ~e5\t#jڲ"jڲ"P;S\~>[rك܏q߁u8@..`>RMbZMG6K%p@O`*ap12@J)Xb!K+†{_/:`  \sbD˓!28,ՎN{"b1=XG*(p 񸨇钌:?6. p%#ܔ Iz]6{N{"P#kb9wt9#@cK=C?ŋt!:GH(`rOH,`rOH(`O<QJ5mYXJ5mYXJ5mYXJ5mYXJ5mYXJ5mY|+|%Mi -:Â4MDYt' P94WE~p޽6Y/,B6сKsT+@U݊IthEw")"4WG(bICD:ve4:=H8"\)"w$LsUd^JkVF\ 5gC@<(6b]uI4>f2]>\* \5AEb}@X2\x)پ+PA&`Q0.O װ3gGߥßL\:UzyU +w<9#'}|oLW$O/@ѕxeտDO)7R B9)HzRAdtNMѷ}ۻ!PySPѥM)T"2Idڙu46d۱5!ti =eJ6f^F]W lpUa)0_KeN׭T7ܸ1=lJѥrG ZhȦtFw]ݝ:|1Y;;3]W Z'Ujd),."BZ6ɮdLX5z\Tj>G/Sͦ]*}ifOJ!l(E]Wi((*.u%c~\Yr]q`#jbiNg+(/ZXTN[xN.6K.T(CzJ,l` Y6NkU= ˒3]%cs!|NJƴƎ>T2+4|atYNʦ]̋k:![yrhlI'eS̋b1Vށ[@)=*XL躕w[@xߣ2/t[yrhlI'eS=)XL躕w GƖ{ oF"] 5mYXDiˊ%~C-+Z<'Vd<.rtEx+R彦_(0|G׭}߁Ɩ{"S &х!?$1?2G6yǮ(yU`~r:?ʩTqɡ lXs:ڷA.?F(*OXMb MGe^+K1s@Tyݾ09HGH;GQEj5^L:ЉxOt4߽b1=XG*Ikx~ӱEG $Jԭ񸲌1oE.km,zX¢x]bɚAcK=eoBtP ޖEiˊ`VAQ nǕe.[.[(7 Pm,RRa[9rdlI'EnZDLq7#@!^G׭=4$ފp(^ 9 {PӖ{PӖ{PӖ{PӖ{PӖ{PӖ"p$1:Ut~pUd}\odE]{, :t tGKkU7u<Ut!w\[uu]ѡKc*o 8"."4WEḅ-+eRN"Zްz kL7:>_X|?>'AAD"o\)2tSk){5vC]4,R98TcMK2t݊6VX/XLfT1mRpfz"$ ]s@tɽ虐zP7_{9f6R98} 6ג)eJW.,M'}7kuOXη??_&f5XG|C%^BEA(}{z{ ׌% :KаB*ietP[ޯI}o"q%mfkTγ+ '*rt=EiOJeNՄȆ2JD8_}@U _ NWf0堧H  ԒUt0Oqx Y^WsbCzIy=pɚ T+wNUz"*Ԙt?>+gdLl X M)TDRdfe;$#hsyZ`l]q`#ҊC|::YbZV_xSN.6K.T"*i_˒ U9ƾvC-g_(qeE5L/^T(ݞw<½'/` \lqtMK2t;R4SaNяJ_+X~, 0: qt}m"O'SY><NVށ[@mmbZ`t[yrhlI'7r4tut;HFVxOjkY-0b:^G׭94$js[c,ut;#@cK="ŋt!:Gw!dx?;"'d&q]2%t"hWErv&?&q]2%),Mȁj$!!8|G($.ћ9Mu+@/J4$Y>&Ϧst@5Q,&꣟sܚ1oEXy?6~縘 wt݊%~I30^ 9rkSBM[VDT*{L 5my"XʏʪxXd#"~uT;Eta#jjڲ"jjڲ"jjڲ"jjڲ!kLyt;+)!:ttAtKϡq]EbIwDХinIQQPnpp*~5mYT~4V=$q)4% t"j}g/cLu'g%cK=)x]r:Jq>NMX2|t9$^۳w`M'u+v\xߥ Vw$8,R9($Y[`E?t%w95[[ĤAurI8e3R98%B{=/St=EyM;M '钇{Xe&d8lJѥrPW.c~-R)ȄKw0z.ݰ(MqO @U4ztekP{-)YW8׌s V W%[GByҋ'[uJɁ;-J B98^pqXLm=nnq2L+b-3y.SKts߳{n qɼXR98oU]ꅋ+Ēet}d)1L]GZ99Ȗ)B6}<4+P톮J_yݟ -J B9G7t腋+tt}""6r.SRq='c}>y zfk]* . d1]O@jP)d1LÞlmHW 'yXH`6?3@vޡ@/!+lE߀l8:YҒ]%T xÏo2гf]*z;>[lD9ft*,J1}nq={lХrp& 9hk!]OQDZmR`>WXް%w)G5,e.s jp)0EQ g98o8!p3Dj s@t6Oxg·X/yR@$R9)J/3tP0]".Kщ@d{6Q 8 qt݊$#@cK=(_}gMx]bu1}}!D o)HX2|t?k:![1ŠSt"cdLh;X)YtB]b/&!=h;(Ky~&u+v3IxOib:^G׭A~6 ċt!:G{?Q3c=SVVM3=|G׭yƖ{"Ј;}$0:n$ؒxOFt/҅ Mz_q1oEc?!^OљVށ|BcK=h O0οV4tut9лzrw='sr~?eehj2 q7B\u}_51).==k da t~ʗwDƓ2[:@㭈Ӛ,::@zY!:41NgNXL;?vWZ/ֳ.I/ bzR9wB[p9J qt݊zw~SA#]E=8m*NWf(L:uhɚa>iKuE+Ćs{MX)& -d׮q YfuΤŏT$OBEIA(gɻ+B,YLW@C[^B,YL'9"\˔__y * tr8(L,Jr-ԛ3;b2ݙrk+ĒEtWt)ӋIHk&2%:-/)ݟWv(^ޖoKp-l@ѝ7L%۸UOA( .KВUt笼Aݗ%$ 6 72qQZr0- ָ@ )_e^r|O.nƯ@ѝ'_hJT;\aEA(y*b}[BѝӾ?9}R]/3&z A(:X^ FtenxuՖ,pbW\,9 fɒgz fgi78HН#XwZH^'K4`M\*瑱 b6Mgu+KRTh[G}% a"VAHFg:!6 bt*aҰ2K)љNHEI`}Ą8sLߏ-|~Pl.3@_9 _JW7&9a#t~]RB0: i:3bz#мXV`M'u+ M{"IXOFb\Ӧ#4 ]"tBDܞ\tq# Y .;Ho~ oE;bֻu+v>TucV4s<{U  r-rj|ҡJx%.[,GhAVAW1ssh"лKx]r<܎7N@ l|VS`) &LhuIoBNQ49)\!x>Q ^O݋OE.QVC=B17 ѝV뢚!1N4! !;`3VBԪI÷$L׭=&46oCm5[KCt# 2?hH%c\EtzL.[WѠ얢 ut@㧼Et[>iH%cL]2@Y. utnjڲ"ߟS51:]Aw WEw)=>j$c\wtI/A6d-Q E.D4oOBM[V=ҘtTM j %\͛PӖOܗCLAt(ѭdDWؼtg"y?pQӖ͛PӖ m\-OD44rX*?(]nA0vCȥryJQ:^n+Ь(ύm~%Tx!3NA(9}hV4f;75TnG0wCSOiw|6`b~-d;6O=_KDu>vtN2{qx-@CWE0wouEw&mts;`\ɢ yy> W%9}c^B,YL'Y ~F2et?d|ٮ"ڊ`ClT覫(?x +.dSAAr/]f} Zr09+0ɵ/AKV=IHkh(-=vכGkuJe ~2}ض@ѝh_Pށ5Vrp_hMNp[Bd*ށ>p[p$ d=\PtpdmrhDkEo_W{jaeS<' 6'G. W\fjG_ΖSLw^ːQقKEw~g۲1قKELDZv3L)ӥuJ/"r=%-ptϋ]fɒgsWq]Zfg ɒfg>2֟K`{'KH~.ktmq{}$ޗ3 bՁ;F/orQӜI,q A(<'l]Y^ 7   2:/\t6.RL7ؗ/!D W[ѥrp.Of %~B@,YLwJiq%hg>ioKْGˬb()téT)YXZbt!]ؿW$.w9:+$G(6HKVN`N[VŖۋL@K ӝ]Io'В5tgʡUhA>P52]E a~MOiEv-$c~-R;OCSd̯%St=Ey԰]ftLw*tMɔb6MO TΥ{%.$c~-R)&Іt98 L5!Y[q;^:ngNZ)3`Sz8>E H䅕aä,ELw._ݾJ!-[v`M\*ǛyzKٴ$LwY6ސ".Kщ`VdmHFgIBAim2E'tJ}qԧ3`SJ0E% YsiƭI.IFg:!ҹ趏zTi$麕h"9rplcL׭?x+-gdj2l2]R_ #@cV[Q&bIL׭9t~&#` iѝm=Laokr҄Ts;!8|Gw!x<R}Ą2e%[2c~hrBut(S|fJư҉% r]P.ttK ]&Mi-1ފ`hhC{TcLwZ&'}d.IFg:!X&GRZHi;HƄƦ{T#&/u'=gtWʀ~ЩІ']h ґ{ @ 5ྦ fđt'@E@\@]ccY264 t1ފ^XLuDmQӖ߫I)ct~"" JV. uq'Md)c t1ކlG]68-OLStKb)XL]W9D&S2%c t1ފL7a,:Ao.!E[k1EtzL.ې߹5l`]GD(LPӖ jh5 9K*R:O5`e#j*ppU$6UBM[VTZ#9p݇D F%\mRHlu HlқճtX?Cz8 M1:rtv9:X-ZgZ̗gKt#(pz1Q A9Ч㖐zT0$8,R9s"GmEw^O\q5CA(=EӔZN7} ]EDw;c\!ɢ 3ikN4)9HmԾ%2edL4yջ.͸ZEwa9gkfɥ;SkK0YrpN6%Iu$9VpQZr0~spD|k/_%*- S.e縟(Arp.){o~ l//g .]ψ>q B:f1t:bD{eso!\EQ89pgGdO G;\)sV\Zl)>`/Ό^cj_q"b[?daKz㕒!,gg>(sV7=$99(J֍_:,) @RIȧn1ŖFkKctHǟԠ%prMW~:Pt;'9;0༗* כq9[BSyrRI_N)\\*2evL9:Y,wN\TezhF}]Bpq f]*纊I.AK|Z+q!q Z\WAj}tտ>cW +6kR7_ w5nwκt~Ď;rV=T+>eRO&l\-YLwN{N=>ሴVᑁk܆ǁKVы*GjC1N5΋7'В5t畴~Uhɚa>u+ĆIZ2'ݛtqd̯)utgd1LL38L׭Tt%?Ɛ=a dե.08|K2tgW&#]RsЇoIOF˽OXA;it%g­]z."'ݪ\2;h_g=MK2tʏ IQΒtȏoHFg:!g̺9I8 anG=:Kr S{>k,I]#PȄ& ;9:"tB*=GУ]% anx+-G'cL׭946oEs4=頥X,麕sp,K7+m˼K&$eeB0'yc}`-Iu+ K-2E2O¤)XLuv粆`-Iu+ 9x+=G`Œx[yɹ"+}ax8b2]A76oE~+Ujy]bFd2J[x+=eSqL׭W^t>X7[tCthyKWh}'L8VV[HnѕLo>u+@웾5[,n#`nѝ_>i%cgtBj|W^eBw.7[.8|G-E mRBb~[]]);E!V7~cJ_zb/x\#W@O`Ar\[aEׁ*Ccma߲D6퉼W=%:?TOlz&`1fNxy+Z$GX=jҲ!t1:Aw!ʲ1:Aw!o4{ҵ d i/Glt.ds'lDe@Ё源g:9`ekrRN:9kAD;Րi5GUNΤ=%ұA6س kݢXGאtg!ujҲ!ujҲ!Aek- ͑xb1/;Ϧp]}徫|u8GM'tx#ru>8Rzh+w0~1E'R\>ier᪷;zxI\\*swwLRIHk-z@sȸD\%\_wZzٻƆQU|k=yVrp< 6;O 5n,f{T偆ttd|1G9]Lqd@'&UBL.SOwNzu8K%i_5Wz(u(Y=L;DԶ|jY)ߐu-ztYBХrpsIڰp-Y3LwzKQMVN%kD"G'M^ r9&ӝVSl!ՓdJ|bꠕ)enShԾdD>}Ƨ)нFtt^'/fsЇo2FsbƗrS7$!';ێRLSxq$麕s^}Q%D`,&nCT afLu+@ Ml;g%2]rzb geu}؛u+@?%B` twA76oC[Q,t9nxnsy}ԣ9Œx[9zhl2ކpsڲ޼Oŋtao^76oCDX@Z݃1q^2%c1oCSS6Dؓ:2*{:=Omv񌢳dL׭*ݍMېUN׳3d[9E߆TZ;= bv;a>KV=x]RGg%gnx q8lzjޱ'ud|Glbt$D>S/$t@ZZ^u-/u+v MېgUkd\2zn>>t)4$Y$^y@YyrHe~^Gxэ|=wh>;g4iv>C׃N42y\1@^ɇ\~u!`l .|T_ԃN464I t"ފLDbz#-RA'cn]!=x 5)0XGl [U]a 3c1oEY{_`Mх::@mm)Wuֺeh)2bY7oXLr_uz_orfי1b`0[֝JbDFl &%Jg$Mu}N[u۹^n^a8s0,3|~lIבϵb9G`  r+۔~"P*Pn+]sbLvcgh(Pn+맬v1dE F@@w\p.=@=hDt <}Wn6˲pkv(*;* O%]E":8E|HsH:; {Yg'6A!%]:@x;l:%bfwb](v6*w*>%]!}ZX[$ 9zEOZ.iRҥرL.ٜEz .hu9:!8>{IWซMe˛?aSe!9P6y龉nȹo;4ɟnVdc?Znwߜn߳鎦ÝA$\i[f)ixt} r~0tt\=.taGGmگN!W8188q}5_xpcpplG 6æH{|{dHڢ>/g瀆%q䛎TtER5S fcD^A-qlt[d};ۍwi9]*08󊞑)_L'J:@`{s{vtq0EFeI|[۸ϥf.KU ' ձ]IHF*>@Nlj8?mY a rxvTyJI? @`vvэe1a[Rc@p}_%|9]*FWo"QZO1t=#|Z If5zT)0z&>ԼĀ/Q쬺|?t [R`t-1{ kNzL'JR6jm?K7SI|}TD+] zT)G&.&#k·Qhǀ. tdX@tvg34Hbؐ/:\ӆ;~IYn[ÏA9]08o~Tҥ 1ȋ?63^JzCȍ#~AHpಃpo OcNgőd ! \qɹ*$tPR;ͫBKw)wYҥ QmԚhxԏN4 [^uG98 \uö́-WSy];fD`#l{KwKJipOߕ·i v;↥w91p}׭1ڎ{Q>-AGD@o"mҹ)H \N}$ۖSM)t,2w3cc0f剫;}j?g8ɜ4_6>tj; 7púd2 "|KU ( Dt?]I6}k٩ |ΏI)DslIHۨQ<z jW ԿJۏǙWP0l7${ K( 눚_𑉰~RsN vSB羸Mfs_ 7$7 FPMwGtMDhhD5&LDhhDI􊆏LD=hhDf튆<!?1yɻMSsM7:J:?bLb0|NףJ? 7p _^s5#|KU S d/v% _vEG&j]19S\H:sM!-:-dghryz F/F0lQ vd=kjmspI)o"%kfv2ؒ#Ɲ̾fBG&CNܼ35G`]1I:;~%]G~f46iy(x:/4ȍ$3=t:nNgsN7踤|yN-~^A|)"T W"ۏ+v ƞ-7ȕ87d0^w9ϝ%ݨ%yfte%]GОFa{]Ns7I)D~#|م~Iף=4bb (_NףK~vҾ.?M!ݡ}?%+GHs/4 Qc_xL`ސw..o5$#-WfC%g+,9dJ79}>|dxgMw} t?vE}JGQ {fG31=8as5{bAmRHρAx&NJV^NK>Z@0E 7|g7TrxR:}LCw +6|r\dfc c:Ht0?HI/Ó੯dw 8G#w| ;OM\8KxPy$ ޤzE]z;gۂWQ5 U)om.v*It~fѣKȟ~c]x)" in,tVLvϿiv? ^3!\Wfcڷv5 쉆GDC ɝovSy3[{5}1VNզQK*KťS r?+1?p9p&wtq{[c6B%]k tp-WFL̏+N7ސ!nj՟L`w ܌^銆L\ y2c* r%Fy{  r%s<|~5`,^@w\W! \&x) V,Pn+17g>@ݭ C C𑉨1XJ C𑉨1XJ C_NCKF&"j)%4|fC(z/525K#Q! +>2 f|+H K:oٔt7n`w}KU +ǀlM3[2UϮW|n .kwY X\ ;3$aB@%16"]%Mg&>f3q ?ؒ# .~7ȕLFx)  tG@7ȕx6d;lGsv|ÙtVҕty2ϊkqƒ&7ȕZs9 X ׁ\w{_kS'"`{[N6_һ0} LtI#HlɚK:@樓I9. {8ݰ%Mt rGN$F-t_ǯÍdp"~}ݙ tE.M2r-ܚ %G3"4ߕjܝ^J_E~ƻQiݶgO(+N7{?s)r%w WOպfQS::+wͼTrx\Q:'JvoE!ݐsn%'d|JT| FL8<^Q H@l<|%]:8~6=ĸdQɍ:ܜ_˅> tA1hkiS?gB¥$]pC!$\8^dqRI'mG17-fX*rDžlo{kS(r; y@$\:Ʉ53nrf`:+$_ypvEpƋ RҥH3̈́VRKnHL XCo7cQO=x:5.ʃ&ym.=Ĥv}ONYKg@Kд9[mL)њ8Z$#[QԴOmc)0\)1Ʃb"4|d"My3n{JnJJ%!])p 4z٥kGv9^y@ܭ]KBbrHҕ V5 6{ٔZ %߅ՐW_ίf;.m7}Cr14D2tLmCv6e#vF4EdkZ5>@5 Zӊ𑉨5 Zӊ𑉨5 Zӊ𑉨5 XӪbihj_ǞtkHwm|靂T9<;* <Fp ۟z?ko$.t=A!/m3+:rލ_cHhDL>>XڣVuזqdqeN>} rDŽa@o1`ذ0#4|d"2/#4|dCE^rQ0P"X W"`YƚmMsZI! D>_e'.[umi Ԩ\uS羷ΏI)D; 0~N[j`qD &.y( k14^ts[+0U$ w@~% `>=囪-zT)`D1{ ?\t®vw+E 83|pmFnͼQ\`0tچ 8hr+#h`!%]*c~Aۢ6OŦMCv1h#0gMÒq:~ .vu9yfKZrz`J+i>s[+M2nQŵ=[퇓{uo\_kpᔎ" 5oWrt\q|ȋ] oәF\EEqzᔎ" rEoJ+JXzkb)ruͭM{'Rҥ* ׽dv 3"Ì18"+@tM8;F!]%6tlWwyV}JmV^6K  qe&1smvYU@ &}bI5!O=v3Lb"lH }?gL+9І# o3=%kobfvwBE ?XI@Rr 6!f<9o}o@Dh(Y.|W߿6ߟL%1X0|qE-ߗ׮:aT HWJ.6j9۬SvW"j],&4|d"b]GBG.##Qb1=>2.##Qb1=>2.##Qb1=>2.##b5=>@p 5।ӣQ%Ĭ̶G_1߹L"38`z;5**%߅ϕZ<1rDq,_܋3TO![$̽x&xn= %a?@\<`ذ0b$4|d"IBG&tf-~SYN)߆<\2{B:lIY|W3T4|d"`,)xnF,I:ې}{"a:lIp(>cƳbt7ׂ9lI>dl39HN7踤S|Ǻ`K:@ps&%['O-5︤S{p!,/-bY\uRY! iW4DKoFlN=5oIףJPbpx_ =$UϮ[ +ә Wl<@r:?&M|:8nγb Z7$=kkG )M Q\`"d!xƒŠ! Wz ܬiXr"NE9?/Bk] `JwOԗKjKUdђ/4;ߞZ6U@A^o>R57s>wJeC>rη'M]~/U.ݏ"?'pp-W ^^rxq:P:-[mIJiZ[{BfI@5})EDN;3'Xr|\q:kKv{׶ [ipm?7Nw uI[QtD¥iOg[NcpDR= ;O}Pr|R:Kv[Yt^8Eu9lKTҥ_H:<G$p`u[%hIpZ"^8 --ZP*r3[,6 6𑉨o8> 6𑉈o:> y SYLhDXLhDgXLhDUX^HhDUXLd;^G`,L+~ >~y"as/'q1_A P_<`܀GHhD X|LLms_1ҭ(x+'oC^hN[uvO#A{6r%S N[ug(ڊ돂+)5M_sK#4%Or%oaCyNt?ge蛸JCj9/ Mr&'n pwxڊ돂ΏI)D_S s#j{1r%o ߐDԊnآWRmjQCc_NףJ"bx|xx_KiHIG#q" jC8]*F-bzK~ߚhvj>dC>B^ǾT}~J)Œo[1x &r̭`"M2ĭ㟟%GD~˓E*R{jp-W;mcPpJ pwi=W"U4|d"/gU7ΩooÒ ,ץMqg5d;|\raǟ |#.1 DŽ\xؔ݁U?='SAkFb8|LףVW"A7KEG&r/R d9KBR`n}ۖzRW=_L7{ XK.=4#C3cpxv P]\Z s7K ~'MfKxDbR`6fq/D>2i/D>2uL/D>2uL#QDZ>2uL#QDZ>2qLT#8&ׂ𑉨cx- :&ׂ yKs@w)aILίkU QDŽAdEF5|ݬ{-߯wEb(^ Å% BÅ%@ϮQBQͽgʢ!9uPʶFvD}qAhȆo8. Ma#z BF6 }XViqZ#0nԢAj۟+Іp>Ebiӹ?m!(?s"\#0utNM]1}o1D: A\jq= E~O 8(  t7׿ T= 9]*08<A{;+r=s ϷEvrn-ٕt~Lh K%ݨ?%M`E8gv1>Qo",M$p>J/Q?wב{VTl~T#PǬilY!QwvQg , t=M3N;k#.W'HSBΪsI˜ʪH BDcjj8_QG#G"h}ӱAgWDvxkPM\>vE̽y肵's\:P+B46d0D6>tBpV( ݮ D-ĽnQҥ-誰N2 z!6C?dOY8MޘK]I&Z~E.w7dܴNH]Wz*gG{]Tٗ=9 'u8DTeM𑉈2Z4UFGQFYSe4|d"hDnT.jQw;W4|d"QhDTdupAlr6h7{9ʗHL YP7ub4JV7 jn2>2(Yd4|d"QhDTdu<~r;$aIHW =v(/_÷}\ra*=Hg0 7,%Foӏ|18<1 T]\2ʛ 𑉨F/g4|dC>]z2ey%aILuHSkT/D (RJN@S~ 8Z4UÆESE0lehLhY𑉀y/\F,Z)\<4,dƢE  (ES%4|d"M𑉨2Z4UBG&%vf4|d"3>@І2`EG&s4Bۊ*Hsݮ$I|AY/5}{.t=#|AY_tT:Go"}ct=jELvu߯ Xˆ)ew:YENݸq7vu羮G\W Α={VH"[ 齿]O:#@"M'8ƛ:duAbwnZ!k\* FO\oQOjSkcI 0D]McM4\O ]UC@ zDFS-VK*4Y{:Dk\A|Vg"bAnz p>zQ 5_۹A#Pi̴6OVgɓ 5u⧓L$@oO'odUy߃sUHL ؅ ~ P9 - iĤTR7Պ GhDDV#jE#4|d"*A𑉠OI5nz `\V+K:ې׹D\눊yE$4|d"*𑉨Wd@BG&bd0?ݙՐ*v 2%3!So1ȀLDż">2 HhDT+2 #y)##08tXҕ3]0x@T3>W"*MFG&"Wٻh1ߘ𑉨U1BG&W ِw/9~<tXq' rCv!kxv>W" ev# h\EC0lU1Æ4FhDT*#Qil#4|d"($p#-ɋn.JF*D@H?"gQKd*b@O><FJ eEf#4|d"*𑉈XVe6BGˊFhDT*  HGU4|d"*2=F'vlWLRdj5)w#^A}LS^9ZM"?(lF~TyV `GTC0ȭ{NF%J:|>} ρTŪ"1+[>cC M8WPnqold"Ek|Ow0Db\MgofmZ ~Xk.=ʵ;N$-oM$g@cRIF\YxxH[_h>s@$&H.ΆAK3 m*Z&E 4|d"_BGW d }𑉘r@hD~uT tÒX,CvMU~.|DthDtLDV>@h1E 4|d"_BG&=en[~LaIHW _yQ{o7*"X]\*?𑉀>Q+ B v_ÆY/#U>2]/#ѕEx]G4|d"}թY/kwHe|܏k_"{TE+<>O³(=䢿/wuT!<b@@1kKXUrŀŁp¼0(=䢿=}@0(=䢿Q%CyHP2#c0K.F*/y!GIɕA"*i VAgxa(XJCvMP:c 4`ws%My#QH;LD">2hCh1Mv  4E!4|d"m|SV=O7<OJ:t7O_um{ ?G6Pp 6[7oIףVG1{ 2ݿoh|K"cjͿouG a zT vF*2#Q1XLDŨ"c>2EhDT*2#Ql!4|d"*T𑉈P##Ĉ+UA4|d"*T)𑉨QDG&C, e#e!_ Ez.p~DAIPeA"l"3dkPW ,dK$fw$b ts%emK܏~/-q7]`&*?/!_HG%]jgR :Dk\/qgv?Ef WT̥>ڢ#NKyi7%g oŦ!m nߞK=]f9 6>DzsA"Tk""1)`z ϕ%yEDbR.l%b>YxmH"PHLJNq|{O]|eɴ1aɅ4,i .i𑉘q~}# yQ?,S|u%~Air֕te%]GtHLDW`~CR/QfAQIWY$(x4/bi Ēp5 囈 E 4|d""ޚ5&ZtϗKQ7J:O?2 AhDTV(#QYLDd*H>@P BG&B$ .wQ>2]jgZ .zMLy}Bo~jȲtYF0lȲtYF0l5鲚MhDtMf>2].لLDפj6#5鲚MhDtMf>2].لLD֤j6#ĬIlBG&ke5tY&4|d"&]V IlBG&+ެ䢏 ZʗHL Lj ]^Yr n"j%n%. Җq5[[D$foO:ݲ muxaEDb\/tn׋W:li9ܮtTZTe5.WR5xe%,kC{flH/IHyiy*$fKS{m-6,`)$Zc3C]砐=km5{A?`[CM_@I5#a vBbAQ)L3+-*%gk'([8>k`DbRr#tG/FdWg4MTJ. ѵ>2]/*#ѵ>2 ~g\@D`{_e5{ tt7\gu~rb3|NףT 7YU7c *r @̪z6#UOhDtACs otLtʷ!yr a:lI]U/񄆏LDWz<#UOhDdU>@̪zY'4|d"^ xBG&e=zY'4|d"_VgkLͿ_Z_^ v^ vfU>2]U/񄆏LDWz<#UOhDtU>2]U/񄆏LDWz<#UOh1e=zY'4|d"^ xBG&e=zgߒݖϯHbRr($]/ʷi:Rr}otn5"H ~%XZn .kCHb\rIce/( aF=$%֌m̊6J_r[_X2֐;,$$ӮsPH̖񷶴̫s:.Mv~#(NIyi9($fKemiWl{\A!% 'vBbvuZ[.m _DZc3LA!1[H2Q6hDEGQ6&BhDT(#Qeh"LĈl'4|d"(@Q4BG&j"o}@q_ҖXAó@SJc#HEx#GoRRet=#|)5j]ʗ`Z纯qu"Z a UѺ vF*Z#QŪh]LDu>2UEhDT*Z#QŪh]LD7k]LDu>@bU.BG&UѺ *VE"4|d"X𑉨bU.BG&jj] lN?wdPI E\wQcRDbRNjIdiЬ!4AC Yən#tlGrpQJtLL~?4'?šʹE ĸW>`=!4ACgaK|..޳BϮ!KK"1q%b$foko : ?dŎJ"Sudu k Ko : ?֐GH댄̓LA!1]~vKzA7|DZw %֘`du i!eϳyrFmQ)9{T}cKtBĤ .K^!h E񣔦JɅQ`Ex%3f(D$]|lQ2PRc*%F:KDՒ>2UK¹4|d"TbԒ>2UKBhDT-): #Q,LDI𑉠pیаP\("o"(f|tG2E|uG~o"(|dCPQG~ F 7 d))_NFs77rNh,zԒuғMD!E<0&T#t3-&囈*Ce#Ql4"4|d"ww~<"ٳ:6J%12g{ˮ$ 2MQw 4EA0\XF)a#MQw hChȆFSBF6D56!uа Q;lh4U!4ld"F)a#MQw hChȆFSBF6D56!uа QdAy}Čr~-G5`S}7MJ~.tbF"QjܘL|o *ʍ!XIJԝ{Mh˴!8~#xߒR)#aXgEVI`"fmQ1_gE[TKNtǟ|y#2mHWnt ĸ_|֖Y (8rI%֘mΊk-oGmmADZakȳVXiT]砒ǿmMG`%dqo6v_$:#!T]砒-oKbFZ|DZam/F`T'- ~M3Hy'٨i<ڐD>߾/Z_3iICu^Ԩ$ҚMÒa[TIaP*O9І4%F++porLCT)[~ܘ@ F%'Kj&5j֐.n` 0II(H|æ|3C-Ԧ1qɅtM5Dų"6!"`.;mjc?!g;r);Nޥ=e%]_)9vZHn Rc*oMףj; ٮt/at2Xb4|d"*(hDTPb)𑉨R#AHQ<#(hDTPb)𑉨R#QA(FG&KQLD% =(EU4|d"*/8Y h Tfr G HI/!1Dcԁ.UߘD%Ĭ] ~xvmx"XiHWf4IL[ nOgL&s I 3WfYߘ6>@I5#aO=,⦅+Ko t>o<DZc~t0A{7l{_˯6DZ$<ɴ㳤}cqonvՉ tg%eVd@BE!n, V@[G ,YH̚Fb",9]K%KDP&G=F3t_D%֘*3i!ٵ>ȶQfE[TJVt.W46(@$&%'kR8MJ~ v u>JmZOiH*f"tDbR sn0DTaQ𑉈& ݿzT( Yə#tYr(>dA&2M1 #G%ro 9*4W"*(hDTaQ𑉨â#QiE!FG&"N K;kĞYJR:.p~} )JٽvbqD\ `X t&PvS_#*M pɅ7ľ)_ . %߅ϕWź-Ò ]W@;芮`ذz ;]𑉨:Wt=BG&\ sE#4|d"]𑉨:Wt=BG&\ sU#4|uzLDչ>2U犮GhDT+#QuzLDչ>2U犮GhDD'[*d+6x@JNbRDtYrݷv"ZZQF,M1 #O%bDDf$ZӔ6 K Yɑh,y{${䭔ifL\r\7-K KTӆ⥃M qɁposV?<6{HkFž2+iLb呹` ҠMv{:>s`du yHmkϑ`Ҡi2ڐD@)9S/VxE(ϴ!)%)4*1Rɮ#t1{ XK.%B%J}vHҕb,ٶoϠaED0lY4LÆQ"IhDT,&#Q%hLDȢa>2U"IhDT,&#Q%hLDȪa>@Y4LBG&Jd0 *E$4|d"D 𑉨Y4LBG&Jd0 *E$4|d"Yb&JN4u.;+`dE* {+%zv[81ߞ/=(PJ.oU^WwVJ2JÒXh4}t4Uuݕ6&%!; a߿ EG0l_{Æ|OhDT/=#QLDE">2|OhDT/=#QLDD*>@_{BG&"| E'4|d"*𑉨_{BG&"| E'4|d""Wbr+>2qoEG&bSI{<.k nї'J(W+D, .K޷;?FD-;p(ifL\r*=~%˓\M YI;>Vr1rjCcL՗Fr*˿ӆ||DkFžX2-B'ioHxvӿkXڨbZ%gmȿӆ=,@$%I&=:;{qv%֘ԓY1Mc'ϿkMzduv b5dDZ$+nIL^۳:.VJ⬤̊Lv<(PHZQ~>ɂi2ڢ>]d!1x`Kir{_O;8d4DkL AǙT#y]Rp̕vh h~.| #\n7V#t1{ P\hw# Xŀ@Rra+jE37dJɅf|_QfGb@@)0P-c|m?L뽖D5ÒXh[ y/=8`Up RK #PX.0z`}I.y>8@l;t6o]MR-/ÒXh >bp޿_0J"\r,0jȁ@Bb^3(fy #t1{ XK.^58[aokߣ/=NZ7!a{$JoD(~Sn@@)9;ZxKa 5@0l@>2u #Q@>2u #Q@>2u #Q@>2q T#8׀𑉨cx  :׀𑉨cx  :׀𑉨cx  :׀𑉈cz  y 1PLDk@hDTGAEG&yo}?D"O~PWv@"1)9Rg;㱿m Fv;Jm\.8_D}tI/.(@$f%Ax*{ók#ԓKISOf4IL[ *kai2ڐL=֖Yg A=\|/DkFžzX2-bG߾첤ײ=ñ-WFzI5&I8)^I{p(%g[C5iC@7cm"PhMÒ1h-lsxɩ/V:d| BzIuFBmv3L|5[춻?/<8LeLC ϭ$VwPuPHKZbАLb`C}\nJڃ́iQQd!1mayPֆl7[I#Wܶ`JɅ|#$⪙`}ם@V.t.| #I[n®0D\52֋(OhDTZ/<#Qi򄆏LD*>@^DyBG&z JE'4|d"*Q𑉨^DyBG&z JE'4|d""WQb">2֋(OhDTZ/<#QELD啯\?\ɯ%J.(@$&%N|ç˒n'D$"QjtL@0Wv,e8u.H"%O2DbV7|}uy-̐Jmv,ʘ±6(@$%v| ,ӱ!ʮ೿(If$!M+$Ο`E,N4(9І$J~i{,N?>K1AӰd¯f$l;[Î`} ӒLI֎' ekj}ZFt4M@[#x',YHL·\hhڐ-9=1]'J:ǒYiC@o Ңgݐ 9*$6$!`lQP@Rra6}óKdOUw$ܚ@-棞Ͷ?Q?Q`ذ ;#𑉨L_~BG&2} E'4|d"*𑉨L_~BG&2} U'4|LDe">2OhDT/?#QLDe">2OhDD?#E'4|d"*𑉨L_~BG&2} J(W4|d"*T|%ޓ_^ѕ]"XILJ |_$-$P|DDcGPJN߮_ sE!` Y(@}ҝtL"A"XKN%/%uiCPF7r?L%֌m ߢBqZ^댎\|4mH"XKNή_pEe{ewtO2DZ3iX2| Ej{Ji2ڐD>aIu!r qA@$ĹRC-R¯ktEmF%ĴiX |JɅ yOFuE. p9jod k:Lo"XK΄v,O"?i",!`&qҢސ^kpӴ:ڢNA0]$BX 4f~ !Wn_< ߐDX83PʼCo|𱻼\5`kTME* E 4{91f}}t`.0Tae?龳.#t&PmD.mª.; stlUou@=k̋`ذȼ ;#𑉨X[d^BG&bmy E%4|d"*𑉨X[d^BG&bmy U%4|ȼLD">2kKhDT-2/#QȼLD">2kKhDD2/#ĈE%4|d"*𑉨X[d^BG&bmy E%4|d"*[𑉨[+>2#.H׿sI:eMe@{;J:$xlcJ21ۅItzbUśQP%ĬF >+hUC;&i (PJN2߯ѮV,M,HkF5JƠEL|ZuKjM9І$ft()">)]2g0|\r8h4M sĬmߐD@!1m6$|; #cRQ@[F(TI̚Fb".y"!kUr;\E.\ra()d8ɽZFM pɅ*|60]x`.0c_2"jFӅ #@JvMs=xM pɅhxb#t&W7;L%!])"Ɇ HxfE)\6@XPP<;Ter4}LP.y>RN7o"\%guB5jL?2)RI,$ ])*fnթLU<0}Xv3| pɅ*|_Lu ֋^R&P'm:ٔ/gj`ْvuE]K@䭢!.,*`UT1BF6D6!mUа Qmblj[E#4ldCT*a#VQ Ѷ*FhDUT1BF6D6!mUа Qmblj[E#4ldCT*a#VQ Ѷ*FhDUT1BF6D6!mUа Qmblj[E#4ldCT*a#VQ ՝PhȆSw{mI)b'e@lAMN*Sm!@-0X)_W.mCTjdL\ # #k_v˺kHr&|4ĬxYF[ZR;"AC\;P7#ta6%F |.괆kˌ `\rLܞVL҅l"TIJ {|_9Z8HG,%Fڈ}֯Ybw#uG6 pɅ6|HqL ˷!XK.د{Fkp4sʷ!XK.{w8}ӌB{oC\j##M99o{pXv y=wsRA {H%Wt|[H"]It}xN#t1E<I6:qHӅ,%FbƐo'|wk.+%߅]Ga#rnmKZ?XէFKtw,%)_L7{ XK߭>i_?qlU:?-6埏Siײ[ CYH]W1u4@ ')?sѸ7nK8H|4yo38 j\9^]Cjӯiû-Q_/s=|~x~.Z4x/KI_xehfa\'uh/8pmM^M]Zӗ'ߖ~`11'M*ҿcݍπ='|>j:u#j}^Љ ۄfxoA~#-x!\oI}X%tuxw8 Ox>'"m#iS6%yjkGwRoA}}hCbf|ߊwj\Ik#B hVìFװ-Pώep_* DzH0Ŭ}'4\}a5I&ߩBPfx>tZC}P33Rum4 3<'=JbMc6ø[#KӅxEMa2+D/ƴ5')lvИ)|0=98I/&>He̕4\(?&~(m/9R\>ТWty-3,MZ {ɎuIGjzo\ID*}+r: (#Գ' =l;/J:JFp$1{Ev *%;f,,LV}ܡH{\&59YJ'ֳd{̲ n/_޿{>n?~zl-endstream endobj 629 0 obj << /Filter /FlateDecode /Length 714 >> stream xڭUKo@WX=٢"@*"@(D\(uB&n4Jğgfw倢;fY8 2LLq'`e2RZoT 愼bAsqtG@>idŌ"DܸJnIaqΨ4~=g[F:s~{/20Vfk7Y#݋3U}m)! Nޣn%ݹG3-gWB}'<Bq<[ߖT`G3f` /58Sx~שO37Qt;4j` &ΞRwJl2IV͜Z%y8IWуQ3q(XuE&im/a|t]FŲba7)ye5rAQ˕nz H=s[~;׾̓|@j|ܛ'>d > Jc =Dq5/zFxx#aendstream endobj 630 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /FlateDecode /Height 106 /Width 75 /Length 3716 >> stream x[wezxK7n-[lBcq_!ٰa>0;vK;kdff1/׬Yޣh< .-1  S)Dv ?Bؾ[d^}U`+6 +eՂb Fт ޽{WO8NNTƨ1W&)("$Rɗ+2\g/__pa%U5 ,BY:ˆ٘9r6?BTtJk"%]`ktLy+o&%D ;)Ӑ- \'& h66Zkqx9ONӳ׷WFxM'@ɫ㪛IocZ[GUleWƵ:yE7kʖذH84?zqi)'ARduDZi7Aia[noa|C%=o6F̲!N'+{.:$#*騥jtV7n\g4\eo8gva"3=C&nUQ95JmK0ҙUe'Cn%Z+:R?TF%]iKѢU(qFG򈋂 a9;N͐I3wݱmh!6:}UɑU>:3C"]T=Y'`4q¢{5**j%kQ7Ah"Hǹ+L_bFki݀ ' EtFUə0\e8)w4"㎞d #thi);6Pɼ@pJ@6tӒ:-i'MAhYϖP8i') Y1W3~YHnC~ !w'簝WZqQD]\;2%Ub.ױeJZJ2ۿc X̝KP*4$Kj)opgaTOt\8#c$& 78ƴ2ظ:&dCٔRJLM3и=h*  zemxBϛNaɕ&5:G%HnO:6>EmQQRɕa4RXG 䤰O*fRJIAYQp.e͞JZ d iXIpwbnR4r---J`` OϤc,M)Q0tzg:v <Og ^2?Gs=W25*$ hD7uO7*I,(ċG[;6f#Gڹ#1^N*[&1(/S̙XR a!=-.}'ӔeZ(UǺfV8Jyu`E)|_eNɫa$Y2ZT6u+m"2SEC5>=_@cXqvAvD7vzoEhRU *ICagq evIt>JQ_$ gBdA oDZźKX%659CG!L*wYS-` פHiFMm*%X%X/"w-^SS m\+jzV4Й^|zDAZzQ~`j8PeղW߳ͭ>ONԪF',Mz !KDx#?oON~> stream x]s]㾐7pIݙfIL2$y8SD[U pRRt<yb߀Ō?1:l= x噠%/K_ og무WofK<j!𧿆?nX*ßO@{ᛞq{yW_JJ&0P9˫X#ia TV̄`1@b(&bi8\_V{s-܇-J/7*`?gпD9- u(-G2(jۯ/IbcH19nD ?ge!e{V,9()<2C=@v v 8Ĵ{ 57. $ tT+䧂H1~#=XA0m!JWj@;)Q& &$?uK1&-S݀CsBguNu LHn;+0.֢2{@#+4N>-y 洳m$.EQ˔S`$+if7SuE%@%]^B/ΏPk&jIRߒ)`5<gN SڙNiZ`7E/4"I! q* nD8x^w&+u$ 6%!j7' l5pKXcfV۴'h>?Q@Q4 `@⏃J|(lfXٚVv{&䙄ZEzyrЛ, }7"]E \_!jQߛ2F (uP*2xGjB@Zf#Cł1b.ߴ$V^FxR6kx ͞`X*esWb,Ojf;GM2 b[$vM7oa #C_kwXK. +A睄d&e ,WP.g6!-:WºsBV أt  7n;Kil[@cijij"7lE?>qv2Wa q+I2/m  ')\X">D)H!TE}YzlMjpӖbO݋Re;Ul)2bhHnE8+ [ÑQEI@>8?JIy"!ځA땈`{eʻ`A(c;Y[6>EA>IϞ^_}5i6EmD.qƱ)i1N{W[āQA4"w+Rw$<&\@"RO+zZ_s*RJK!}8_:jMOņJLNy_Gx^ :}^,r3,VVX.X %J|5Zm{Kc k 2PJ;~KS(Wia"1-R|+7m VƋOe~/*<7*ۈ3Hg]$2[q>|N9'̯p7QՎ%;K[nb[C][2O6pseitB`C\ -!BZֺ \ wL*@#Ne9iҾFQo]a/}XmLxJF]&Ť揼 }`UZ:3fPNH .3iSdi H+LiGzQ3@ˊmRgq z 9Pg)eZKކJ.~ ɍ—+qBd6z]r2ÍZbkdI*ZsO#4=u[%ԖIlܩIq;d)-TrJ Ih١. 0-+rE")'rQ$<řuQ)RBo*P5SՇcl"٧5†(5d*F=UxqUۊy^_+Eܖ]x <] 5:( bnڡ#k{s|h6F)00֍r,:*n D*/=a9݂w\:!:4Zt=YDW5)%l7Єd$Qu!3WuI31:͓eN9_}~/1)ڧ (?v-3o7w^j1M(δq-[>`0Lj|a+U]@jg۪tx82!l9ZḞ#RPőSgK͙0 '_ mv:$wX8 z]=;߶PTw(fƘׇX<B76 U"VW~u*_9J(DjyMJi(XA F&;jL[ 5 5>g>;j3T t9۞YPqՍpY` "YB( KP`B):]Ƹ,PN ʃv~yc$Z6JV;U# _^w+uRp=CG4É|hcGs`Y`Mÿ@B ڥ"e"1l7JVC6l jpFg@\rde=l_TpϚWc7+a{ '\1ෝ;J̏6T$W@{qؽt%85N7Tendstream endobj 632 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /FlateDecode /Height 106 /Width 75 /Length 2774 >> stream xOUET *hԅ c Bِ%qcH7e+q5 Lmʲhtۤ ~ꓓsמ'd93|3 ;;eQFeQF]f~k#hkkkٳ]]]}}}/\MQ@wE~>72\e}]:․8pĩS_>|cǎ!Aȑ?GFF& d7?6_eR#j'l-S\e/|2$mf[[i4+W/^[SG&'|oЫTXZZjii!Ld.,,?%aas~>o>+Q[A U.SYTThpoD罽N|=rSSS$?S`HSepp4:tǏ,.oaSvZ[[-&P6t@Ɔ? qVH#84r ͛ʦ* 046brM۷a<rd*a/Gb] B :FDA[hIx PO3(ןz}ѡ!Z' Luc,$^|s-GJ>k f,!3%]A*^S[̉nTIt $N@èmM\S U+WGM ~ezyk1Tvܫہ}0ْR.ˉqL-@b]\dя%i Rh2xZwllX`@syު?;11Z#~ ]s ϓqa#Eʬ)z{6`-a$%JY~|Kꜰ^^.({QF[juE&gz>In"9"GKXx&wK(۷oGɦtp{kw L9kV6t.H;bCCC^(#?/Q޳<{B;q.+ dKnJ]j-cJw[h^RBKE#$*f`.J!8Cĵ X$/ip_2Q m2ʰ0u\:Q^0%Ek[N#R٦ebµb3}5(հ:!S0uL0mz=똺/X]] wUu3===Q NdG3(n=<(}bbv05Q2.⥶SOqFC㦼a] :"֟+ZΝ;^'%#g$56z] a 3.R[;Q333qA isRW8(2(2=_Fendstream endobj 633 0 obj << /Filter /FlateDecode /Length1 1044 /Length2 4815 /Length3 532 /Length 5516 >> stream xe\T{EZRf@Azn:`aAB:$D@tH S:A$<ܽwU̧`"(>A~A)P_@@] `2 JJ U6#b*!\0GS q  Z# #laP7?P@}; 'lQ@ NClo'% i;{D mC?z8;kC\HSᇸpq@A@- g1Oq(8lP&* Ӆlgw/;nO 2UU6L9u!08Dbߌ `+ ^Y a;Dŀ$M==Xap;(Ec %~@{菁JA?lPOHo$CbQ /*eG$oB@= 5<!cb A/8M8V_$- ~6/F ^Ͽ!v^Q][_WTD}Sv [G()""m=H({= {̠P4Ԗh;V:qrEXJv nE8ou/pw>p.19O"]uWGu;-' &{ZKlGd+6EqtrM߶av72u9 V'YMeMCMTS CK8Y_FOy'N?'EzPfd$\ZNl#FP7Ƿg$EV +r{@fyU^ՇwjʘO3jōt _v-$ #H~ I-Y˦)yqiS|]yYpU ) Rܴ\v ׹AXW#3>B1ĢSL}(wkv{DT{A.JC("I}q]^|.=SBF6"eqΔǥ##o #*i_`TZJ}"|ίxTw}%p<66z5+7ڦ*Dҳ_{\:dDݛHFDÚ#{WiKxs`fMazNP-$Lg'j>G- |B+`USadLut>T[ԉff8)t&NXm"\Cc)$>"Ȗi6? OI)zp|XY2 ;.I4UCܾ&wdԙ$OF|vzu%`b~k{5:bNUz=-=*c{憍i+/`_>XE~ o{?cvm泠xq;Q;#C]Q3v+S~ #EME;5i@a9 \CylxkZ-cѭIU J2|Lw_S6p/)>AZx{hXɧVrIޡb/bSKC!EܚOY_8hoL-:`!A,xPTx>rKv*0!ܮ<ŭ_'쁆tfm0^JPGߧt.{m^7b#!+>[G 5M/D"U'gŇQ=Q^ {^yI3-/(:lF{#Uq.-YaOQP=,::\xtcW[m3 Zyl.2[3wWYrBȲu"훜32^)|;ɓWjvjY:})CַgQh0.#,6 xUfӧ3ܲ>\"8l#Ziܲ™޸аd,WΫHp '>J}D/| c+ZorZM^_v_>^VZIZ?甿Vǒbb9Q۬"x{( =#YAxO*S[KLD˼~6m_Tk!4W\L'Bm>[?u;"rJ3֞BpĘvZƐ"5,LV1Κ&vqTF/՞:zF&&+R~]M_87|IA{r$菍._@k}'/ A>?G%o'\fD9Wu^'l-_̄Q|O;%}\bpi`qubl* ?%!0 ̚R. 2WuMI,ƞ\a9ㄥWNgd77FMr'? 5ZQ*ܣ-+oĿ-O)pgjVZ@*[G A򘲛6ySep 0%*BRy;4?4$MU$ęyqgxJ/pM3FT,J#}/g=³olIe3\Jn:eҔ -Fɤڎ^je+VWͧ߱yVչ6;BcmRy/tbCGl>.dBBh ,Q*.M7yVDCL:PS6MuweQcS'W`}LSL.NB׏]CKPׁ,oXsLWvlG7$ 5?SWJ2=$H4?_aj2{@!\;[W'0>'FsA%7ėR2MǕyCDTIsptgd)3Cnāz@ ,\f+G{3M >3RrSW/_ qLH6CVG&Ng+i?^coH<`@~˶|N*K@razw/=Y=bq;{߈HDBZ," Vk<'5Tm6:-/qe%Ya5ʶ2O+E_RdƱڈN'HB g1K B)z6SPk2(/_CưfH~q_WWh WSQCR>hoz Hf-ӕ)K=8NY! vp2=ІqV";|p˒{a7v $9FكNti,`"d#u_Łs|ã2LZӞVCFA`a[U7.YNwqM2'?e\U~PFgf =׌ᖝMAhf ȃ)DSxLKyjњRB_=[ˑ\/*;/o uy)GT5icIUPCq}N{ftn Ow̤'*47zn<\: # 9`~]6dtY>8UO0B!!SwýX"-U׍=(2Tg2%Mq^)MF"ӷI" *!]eGi"P\Ug='3lsntoqh`^5?=SacBdݐ?gJ[543OΆT+yG蝱d#fc;3jm3h} 4ʻ_U{)"Ƌ}U] 'ɆfFք|'u/^-ןT8V$\R)smѲb;\.H.% |lwϕ(9ӁNXSۂϏV ?q[>^'pnء >ߪ u Rּf!dڅq䎸/rI$(bĺI-haJ6NjESfd7L\uѻRbֈ\!M[nG;k檬{d@ *Yփ K- f!A%?uDU v]%,=X"*}"yӷBx l$ A:= Bendstream endobj 634 0 obj << /BBox [ 0.00000000 0.00000000 864.00000000 504.00000000 ] /FormType 1 /Matrix [ 1.00000000 0.00000000 0.00000000 1.00000000 0.00000000 0.00000000 ] /PTEX.FileName (./kernlab-008.pdf) /PTEX.InfoDict 530 0 R /PTEX.PageNumber 1 /Resources << /ExtGState << >> /Font << /F1 531 0 R /F2 532 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 1930 /Filter /FlateDecode >> stream xYM$ c|XE(Qprrm`k{IU3=k{fK,Qz$ˏG_>݊\J?.Zϟ_G=}{}׏.>]79xVܰ/s )kO|xէʷzQ={#'ՊCl"ԠZZ-clR*E; QQG)F8Er-;,:jV9cQ1cAT9L+*s)h25-(R|Ep4I1jĤ GꎪAOCd,; +c0QDm̜9F/}U˝b9T L9.-Q${JJK i *k嚬(j-#v_Mٱ5/ZmJр9Z$5s0f≔ q᛺8mI`@nhd\*&XHqAjH=:ē! 5go(q1%V*'L%C"Vita\3 #9*W bscǻvm! kH|iȉRAw T4v(D*`ko@*d:OeO R0HƑyHO)6NCz  rQN S.7Z ly%4h59*dTW-&㊾Hf}F_50N cr Y9⵫\'hsWArm.*-eG˾ʲEa_+$.Þ @rt }V!Hr%d_<05vXI&ІhJK5'҉xmݫ ltutV_O>7?8/]׿}+zSZߏwp/}#SR`2 C=ml]|;73HAt$ 9=7Uz]::G+Of6q]u):$Y?ɛue}B7y驽Ѥ i4jSF0kQg;Q7dj/؀Fg!Z0"W?gc)0LIz&g2<ȳ#~m^`"AIt>o\bofmA(rQ#vi1vA5C؏AбCרvHx>+ո+` 4Ō҄ky*!(N~h(p|Ũxи87d%ﻂ8ņy`}PӬR-L<*t:4EV2,i(t7mo1)_1[E^Dla_碼W X*܀Ybx0ˁkZ `AS?̶x]}=u8VgӾxFrapطq$P SNz Yk 1A/_D#@aߵ8]e$N!=ӌna]:ySMαӺh;ѐu}{g'2xsFa]ިօ78B ݷwF pkZ o]w aAf->i+wS2z] ;}Gh0]m;nnj^Qx?YkDSXӁ^v-dgTc={-iFg~d7F/8`WWd\~bQNǗiahJq'屽L)^1΍-|GgT:z236 #?zoMc{OI=c:oDz^Aa)`[`8 #{FG/M\0A"rWwendstream endobj 635 0 obj << /Filter /FlateDecode /Length 3656 >> stream x[Yo$~ׯC>C Z@8~hZYKz]9)E6AGuX.rي?rte!NHޝ}oJ/>ʮqu5?_|WDq¸ur?g[.V ER?7B#!hfP61A, ܀HJ]}쯏So\ }nhJx//!Xm`&;'W D_Wn|kxk|_}q8l82hA`0nMxm^NgMMEDd{<}|z t.޿!ZB~*R_zx,ۣY I@~#J{[PHwh!F 0gv-dk2ŵGedJq͂a.WQ+ix0x\zVtޡ%8-DZ+t}ιd( pMl8$R3 5(+ HM9%pAW7 WG~Be\~jG4CP93&!mK(!>h?جr0fSdZJ:&F8܀ +8iO?@3@Z?9Wwg &BKn~8+gf)yP>,3` CNS|sFب9_q0e#Y-0S5S%9;uzaeԦmZ1f ɡ<0&4V+`:4,t K`DExϡBrt]  \4qYLB'ϙ;UtHEOڔd3͙eG\?HN N5(g.^}[^rcnHRqN$j&VNbx1L:̼RȼӔB>Wy 105)ߧgY;~MHx2ǮHއd+)C% uԜHfNμ M8Sm6a?~]% w;R c-nUJ"jsB>[O˾B <{ 'QG.SCGsjT=BPKԧ)oI@<n^\.yՄZ`?Nẙ9Sל-{(y~08osM-|Ho^P>Kݴ.ӱEʂYPjfXkY0"|1LUAT,ݏ8u:%Pr1:B(.T%}|kO[ѓXcuM$!$` z e7(T]d]U}UL \pFB"DwBQڏeyb; ,RU_@Bs+t[nڕcHwse4-NXfT;{V1f焋U͢M SPS`ؿ) +/*:" b[En!ooXFO4ރ35~ۑ&L<^NPOU0@KHlR?|[tv"a3zcIvìGbƖ@MC5G"&T̈ 62Ҋ:U`>Qň㖵H^88_V-r ^0}(P5P0 ^;( S1]=VMo6G$gko7)2EHVڎcc2znzV/>Kןï U^hȼbPg@ MZ'Bxt Zүg-!p\C-#f&R{hKK Wy?(ezS*fjNu'$gʱ{ȍR6z ?vaV0, eĀӮ?u1ew[zP5_\&y C^ayø\/aOUiJCxWo4C9NF2ЇP R~(0O~>:_Zj#fеSlp;`!P-%nf~^89C_y/w7sy 7WQF$H;} 0ܐ]\aK!4S302ylO #r(Xbq^u#^,*%Ҫ./bg% GX! $r.1uH JIZʔ&KUڨ 'O:(zᄰׅ<CtW*C3g>8Ͳ`huyZ3wgLlq-xy ˋ{(&~mbJnnҍ.A_Ow~6y}&jyDǟ:}tz r)Cc:*bc I3TˋiJ݈+xlRh@T1O;X\րV=SS5OBЪ<1z,o(i jrhb}qjr D,ߤ2PC44-Tq6 1!D磳ǰq벷`-8 'poOph/s3<^;c˶ar4J8KpC.?&TUMos‡ݨ> stream x]H]Ǎ1z5jbPB!` dF!1/ |10(Q!"K, -"4`iK)os=z?r:g={kP@P@?]Z]]ͤf'iS4-((LM7668PEEE("C{K.++ ôeP2 ,ک<-KL.۷kL-kkxx8.OLcd$ [?WVVgy@&ՕNK|P\w/I) *zΝ4Z$V5h4%T[fWg TƷ?նXk.]g R;raዏ><4գGG~D50Iossng%Aۥ~XD{䓗{=?>wEp.-i&߅xT H6ZXXYw79?dH:-9d9뺷]4VLOOk LxY;,-=}-䕟(::::999 yHVnzôbL(`B>*U7*DQl;Rew| 7<͗L ^gLP=?w߾}Z("BiiiEEE[nE"ŋT2EUUU'Nڞ={N}뫮>s̮](>ӃFϮݷXg[kk+FXQSS̀N >f=YăL>$1@sαI?~ѣV9;kؚ*aJF۩6֒U~z6hE'Ĉ~FUFٻv8cccfF-A>_iU&[Ho^rţ&8 @rVZJAǝH6nXCQ=)޽<7%mA3q(ڀ$( xu+<:!!OWTT@:|^Lr*AMoK /Z+;Ƶ3a0%%%hxݾQ*}#^ ɩ1xFVlެH/d8TEQ!kll$GMH B 9èOLLЄȅe*lO9ڝC@:> h'i2FyZ&z G2e!CaH.%pZ&%w[GRv-/2zcNcC׵$/|nejdQaJ@{Jgz(d"`$\<˴ .ȈPY[sGF.t+I&Lv+p e|䉮EV~*(|z,, ,@ ev# pcmb%yr_Q<5ѿLB^w Z**I7(]{]sL"5L'0 SSSK!2T)r#`H^铱4C|ď^wvYTcD4D^0U x1hQ\Vl=EW+ɒI3!Za6Uszsn%> /Font << /F1 540 0 R /F2 541 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 2549 /Filter /FlateDecode >> stream xZM$ ϯc Ċ(H!9{0Āks{gf3jv|i֪J#Y>b9z /O1ۿ3>_{~ßrP:ތ"A!5/>s<s둴4\1R>TGP4HT8RέJ!9Q76rvV%˕= Asm; =)jtH vI+~$THV{lIFxn5AtnUBGVhe"%1 #Lq.ԡ@DܑiHVBc ]nUZHF;y@ X5ĤzV>H%գr{D`犫 9#UV$-]R gθ$ӭ\fx+VWDXpFXX-I!d.'72#)HFa$@.uV* R/tZx\}|0PNaV s016Fo*[TH/8χk7M37hJ!f̃Q?dŠHͅK}mNܗؕ5 5cE(6fq}a+Bq5A#2# dfV_(@֧}afɪ՛Iq/ ubv0"C2y7,S\2+c$$j$X+Ő-U&9#D tW3P[gLv:7{d3Ǚ7Fa#A yf† A TDʜ#5w}u뾜ݩڶ// .~>^y麾/6te篜'\eZW[;c$ Vx[FA_q8NPX~eOu+yHRmcb߮ u}=ެr>zY_gu#{spn/Wa^s9{%ط;%Ș'J?ftu%/ybo|,Ⱥ>YA)laY RjTlͪ Jɺu+(]C"]/,+(9`ź5|ATָioUO(qJ0q J#A48RyBk`-,WAic{a1vl!bIDUDw厕rӭAPܷ^8> 6I|߫{{9cQ< dZ{x#ʙ#YHE'ʭMFG{4h4d응wFBS c XN5 ˤ&;;ÉtF2IΓj#`}6R#Z-{ Z#yhVS#e3q)a*z%Zo̭6~3S&#<DGvUۍZܝj&gWhn{F#Kfȝ3"Ao"<Q^4KQ2#53zB}i% }T}|*Zo AGd@y38/ y38d(k& $$$Ij^FfVL3qzfqq47S9Xo4^oWtXkLGhUSv/ÌӍ5YF)e3x/JCendstream endobj 638 0 obj << /Filter /FlateDecode /Length 2890 >> stream xZm۸¸O^4fw)Т^P.٢Azݵ4!)\@kJP fV[2_.8^EN𧫋f3̮fUF|n\y4;s pR9Ÿ_w $iymOz?cx]7W߃Rƭ4J%\Jj.C 9[\eXKJV:s q\h!yJz$jCρs9H " 1n\~ъܔ?8 ^ݫ0!pECh*=Zxkк(o#gב?_[4}G~dH @2IܭO˛*,ƽ~w;2Qoź/_p׿&r=~-1 6m )2sSa}GL^5Ct.܃ÆFYW_ :2ms(Efq =܂㶣y)/c즌=r-9JR2#cVX=*~ 5K(XW.U,~/ MHR[W ?R"y]dc -F3b3 D} ]Qĥr4XO8Qh4QJB(CLx].sI) 7ZHP@(09?i ?_vi)Zf/:]FF V;k? B)ojd615G%5gCX3^Z1%lLJ\qߓ@ Ng#ڲu :1#dr DTfCVhS1a4#jIjk5S|2n2"_NE'֓L;ki~8 f ^Yf0]D("Fg lh1f(@\gm[Xt2Tr&zH]u}fn!uU*w40ZzM t@+úESeY8f =f2ā |up^|>"Q61 0vxRӚԎVry~X@j|MW8NƄU pap ]9&fP=AC:f WڬkM@cz |)Znd՚l89,(YUk$`CHd"u:d47~kԺۇsJg&HVB5{8J@WY= u6fh5Uț( r폠<ьX0澼 3!$湞W8|vi"4*ƬQd.E2J#} N0Ss;Ы,R.([EacV-W]R }$se 릅u'}n雚-._>;nur &4v(ȍҜϤ0f},+m/bAU^`ZƴIN1kIu]~8g>?Xӎi-V/Mendstream endobj 639 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /FlateDecode /Height 106 /Width 75 /Length 2721 >> stream xkTI<4Nbc< EnD, ! @ .tF"lĉ0@ƁY ,CNt2Uuy׽_# " 򅥥<W:IAbXMM ݻZ[[$744P?xEs瘄JKGr&9ʪ*T%-( r%NH}``1888::" Ccyy9' wЙ}Fz &cr>%qpq(ϓ@] I ,A|bi(S/..K0cQ32#8,קgnRڔejBvڿ^ūǏgEG U(arqO.Mlv|*3jar`9cO{ Y>tbc. np0H xhh ) Xb6Y yKR3#?ko cZfBѴIh2::*9r䈤H&]v>hJ4!D&$Lɸzd#X ʯ\h\n˹L{PYYrQ,T_@tS+P5cQC#Kya}A\ӷa]ݧDOY=ȳ<Ǥ 9- X8!c):G8۷m]V.nJh׹"Ϭ 9V "z׮]h %(ݕ+W,De>`*CPGY9QX* ڳg" ĢY(u)m׼WX]Q 9M)!۴ V-@#( _X2ӘgLC䓮>Q=2' #8yWp~lar5%*5?kI컚e3뀬 B?.dyΜ9*g Nɞz=2npc] F:,_2bK,6\f$ 2G*~P+#񒝝ry<@)8֙ƃE&W/o ¾^T/s?_˲w&O(6IHHjjj8^*M̈GBYهqa_**曛?n|Kמ4'?b ;lEۓɤ 'A_p *Flt"9]rH$81,'Qll@ "ɾDuو%S|k bgR|$a -p 񘘘P/KE3Bk^&5Ǐu{W?Ym% E%HJl7eQ"׳ ~*b!~j$:ujnnNѵ~kW+3 7]rK6Cx T UaL.[JٳgɆEӡ$HWgPYܱcJWEEܲ`܄1 WSRRb/̤ϵk Fjϱ7 E05݄ieih[rCh%&R])?J뷟Ө'`#,f }5V0ڛ:.r_%=YȀ UB!c]IU K.ݔ^MAv%±(SXhV)1ݻw3d_eu+Oq9=|](#<> stream x\sܶ&qęӦn:g/ݩ~Iy:GINF&\bedaB rڞ< ~AFRxqYtb!ŷ_|#TFDEj}Z7T79i}j5di!B/coؿP:)4#ʴX,^VH x Ɂ n] ;~Xм[Έ_VFHIŃ/ϗ6_/(>%8i҅]\DȊׁ0D$c]`܆ߛsڌx{J:p) ;솇k>IQ"vn)*40pAS`ܦj%%|d}+~T 9Ӡfo]/VI0xDE Dۜ%+q) 0zK.FX`fhѨDsC{4:7*Q) 偠Kfax̥}qlmuycZ9wh^Cõh:W]Wų#f_lqA~HLY676xr,;`N`~ <fm鿬FZN1"C5xB*T…%!A vy;GQR'?`\3,.!?b5]edD<5 fZbRIbZ.:5\Գ]^|:?V u)Dž~V",x8UsJ#Y?[ܦw$GN6VUVr+C;2;u+DAxHawln,ΊJ[U=fzhȟgm Dm{>SK4ʂ7{N@X6'EZmO= p ӎ "_ D TDMг>4㏩ųG溚R5hM|]MWh@7ʖFļӱ5agy-\l>gMD*qjcM?QX#¢mn۝[0w0_ʎe:7(ҁg2Y?1{p$`ԘRi i ݶpgiiŧN*ʩh vBD 1fv''C8b`+ĦGG5'QNW$>)|yF` ?Bfo@@lDI37:sZ'ő_j vY͸z,F}ܧ#. #!K)蓌^x"L[O .Sy p-|zr$(gc'+D4ލQe(~+\T PIYDof9atoSl;܇s 1sJ^Ŭ$1`'ߕج5fR)uZ~ "vC*ަ aΪ݈qv#64jQ_eE.ey*tNk"MR*=MZ:>3-y,yZbӖlٺʧd\1 fL!P|\IrSi B xaN3P9mO=|v$q qUnXn0%S>^(${Wܦp# f”tU}7CTΰh Kb)9͙0u˲/Ku`>Fl=wpNLKą2Ι0z1Ch8NZ WjoBqk^6yB؅m?|vp4I?"1 c&1E~$#ltG-#T,gU]X~ '7ڏVkꙿ|޾ 5S밠Vxݦ*)lAJQw,t#W2ǾLfBlnhW n[qA2=z bcLeVgs4`/kuלHyJyd]ﳷgkc+}UY]w`r%̦ymu#YI бD=ԧ~ <o&s+:\'˰o/K}/ݗl_T+ jA*An7,R;BHdHZ~ukC#67/-OAF}7j`8 q; Q` U7nzqwe9dF[~e/Z7{+ WEe?؏ݫe!%QOj G&Iѐ{,>i,{D5?긳Q8go1 N5b@R9n~?P:o'> stream x휽kTc4q4?5(Ȍ@S0[!€ L [ͭӽm6ܙ̽{}Yg^{$H A }#פ54B ٭[ ŋrO?~sG^B?W9r+1 $Kиf}ymНK.:thmmM}ܿ?P ɓ}ͭ@Lw=8*5aDx1RINGCݻ͛3q۷oce1*MњT* ߜf,kEe~~>/}q:i1>}b$ 6z*wkݧw_?0\5kpUpA1Fґ xJũa{@3 k:u WEa ËדBx/QXIz5d GSsgϞS653/chX"ڮ&~ a$3LcrZ#]1~TUG022pMLbr`` c^~W_Sx'~$2κ Mt&MNN։}\өY"dFo0aKM^xiemC+&GUSŰCS )ӧBAS 3yiNN]۷oKD6 P( Cb<*X` òCLG`8Đ0...BC r'JWCVki WU2m񪲷0'u$c2YU$ cX|?evwO# KL:?Nۖ-4,Q:O'&&Rkh:{ 'h?VM#AG8WGTjyFш2FҊ0d$ %)$H;DtXH/ 1qʌcttT ߻w,=""&LJULQ\N163}͝Ě>,s嬞doon7yDM;.~wee`"f|%libX2e&%U2G#2%IzX5[4/B"sQd${u6[Rxƹ* f.*hd=zOOOG5ޑ*C-Ghvhk@,CL5dƊ<gtvX$/ A4Hi,8TZ *V*M4=x,X,ZG8,BtVNb{ޥ9Wrf3)4t7,`j 9[XQ\},bxGk0&[ wSmc/ %}"~*E{84*RIi[)-#m="N0T8-W&HpY('z}v2,^2Ć[IZkdXpijԬ3,6PiaT`TCcd_|iHPgA)I䠹{hxWl 4H=4G^#> @҃*dRd؅X&AkdJy.ҭk+AG"NtF`ãpmlR> stream xRiXSW"*H9R $ ˴!l]*{s/\ l"PHiQA6#ȢlSp)`E h ֙g>sq}{cgNcEd 8ɠH04bA4>XLx2Q`1ax@dh0 +b8mA HC Gn6la{(bqSxӦ MQ6!P|7[*N°!+_p-@DlAyBlu0apa޵xt^ XQÄc>۴@ qyEnx6/p8gǷl匰P6K5`bSD RT*H"†%Z _HRID65xtф$s+ [Ll)d 1 B/}ABP" ,_gV: Tə:rrj[}K<' w{ĘJjWTLMƿю%ٷ>{iH:1]ؤS4,~\U!t/t(1v򟑟Гi v9&^dec(]˸o]52>[O.ݣq~+7B?lHHS>8Lhy0\#7Į6;ʰ43Z=ChM'tie5w5ܚ:7\tdOL jKѳ1IQھdh^۲7H?vyJ%8,'>ʍ8n4ԟѨ9tإ[0Ns]W&=fPb ZO4ժVD.g۸ϸ<`5UkTM[E==6N<8[Q-WKظךOtmO4.fUrDT]&g]z0/g^zG/f_9R7o XsE!3ܻH*IDpsyO;ƇX8QWw"9᤼s)Q`G5W/byT/tV925{v ]4l]HA}^ =*p҄4נ|wkUXm?- ͞swm_!!&&@Ls¿0endstream endobj 643 0 obj << /BBox [ 0.00000000 0.00000000 504.00000000 504.00000000 ] /FormType 1 /Matrix [ 1.00000000 0.00000000 0.00000000 1.00000000 0.00000000 0.00000000 ] /PTEX.FileName (./kernlab-011.pdf) /PTEX.InfoDict 551 0 R /PTEX.PageNumber 1 /Resources << /ExtGState << >> /Font << /F1 552 0 R /F2 553 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 3013 /Filter /FlateDecode >> stream x\ˎWve*6`Y `X :=]ik &5?>}{xyr|r+g#w;~S:SJcoOv~x2>?ooo~L_3ᱍ3Շ_?󧭠;$fgy#vb6Ϛ6VYotSoC>cH+z9Ivv => sJG+XώakQ+:p)9;WP M{GOgSᙙlEj-ApÁpcbtkM㒠qznS4':+V)8l*CsnL;"9߱saI0(ߵЁRSOlx;BҊp+lu:² 3ۜ=WdLҊV u*:Q>&b"H2LZ#emHXf@4#qJ~죜V#*0YūJj%E|90;VdN 3ϭvӜH9)+x6EiQT5Q`K+t'ѫ|_ә%͙2"U8xBvau9`8بc%?LwTM0wS#s `+;Z*@gk(ޕ`@"$*d>WBX %3ծi'UbQ 3di'h"XdHɌ++j>faWavْ,Vlџb7 ;a[kinE[r2ٽl e}<: ZbEmģ:wYY{#=(>I;nu]Gmz MԁR@3菀;Y[3$- ßz6=scH?CoCLfqEQX;m7vu0+ר6Xiq<#Q9+nA?r,CZP8_m9CsB.! " [2QX a}I蜌C;EB19{jxD: 21Zjʈ#ȠyrяƏUҝ5j :iȑ$fPkwTq,׋Drm%5CT; rjL KA!emkKy68~\RS+XR*+AQ.Md;Zz!XsEdHJ=@ "+"oe*,H,PkU8Z["R;V8Hv״3E#,GcQL-w%FV1XB-!s6-FPo.c!q'v9-ݦ`]Bj~Ġg5u3?u(pzb `!yGێבٖ)G F.&VFRU붝b @曋$z0?2imlu-4oSs*&MZj.v-@d%Nf!XqqLq25csR%h{AZ4["s@.$oSDՁ8* 'kR$VZQ^^ѦOH:#΄8a0"+uBCbuQ>6gQ0$5|g.Z`cxM$e7oW_C匽6`qv͐Vxјut=;N qx\ĨGGm_el-خoxqpvoy|Ǘ|Qr総/9t~gO0endstream endobj 644 0 obj << /Filter /FlateDecode /Length 2282 >> stream xڽYKsWr"SxޘIeSYDUT6,IEB+P>= ׹(3=1Xp'ϛt+m7w͕-%WW_u[iFxΜzs}b3 #|i*6p4! @ӏaĕ>n *)PCaX#eghVj湽-.;K_( I?l[#'S{n8|My=ŭ$ #KH v^EϯE8'Q!NaMW1#_cT~!2G*Im铨ݬd $ޫFά|lG'!v?qz>i˔p ֙nZ%8$UN2YB߾Ky1Tӿ۶Z͟bLH 'rl>xJyD "8!2qCɗ}}HG!LaYDL58ς!)rRP ѽ P{U}  wt/I!7x E>aw""xDЧyPH@$˶u%/3qy#EBz7V= cĢυ=.MTNb%ϠّGh "tnS8Ty~J+;o BR:-V "B>X: *@sЩS us2=缢k:\Q2XaA ?W4zvVf_CEfw ѼEi\HC$~Uko6. }'$[5(R58k\bVZKYM|ƴ4Ľj8Ό:F5ݙlbuP$3RFA6sTqLJۥ1*O9Ԁ]ږ4 )4ym;YZtaRE)njܧ^>T6)Bg!Y=ZփZWe V-u:*欭 rHFo(e'jl5 sl9S WK&C6} :cE7k-=$2)$raV!kٛ&M/͟46}Bc -C͂Ѱ~nH%r!D;pdy- h%|%3J8V4dRJ+ fn61`[TJt`nFL06w00nL<qθpE CΟrn4xÄIg~CH)} 4&rSyT,,+@q z v-ԯ:cP<3fe}$A!߉nk\Vrˆx=NY} oBC̏k5ZծkL,eIV i.l yLw9<#Xf X 7+<=6e^Z@]-eϐLqTI{Jv4 ]{sjzf`\0^s !;rgD^/:>W3_U$M@춒[D]b81J8J鮇EQq)lQA~7m8zqUjml$H-Xf fJ⩮wxt pwR%Is<]`XT2 YO1х7% ~ ;ciHi,1\WLSUNhʵ|<=k])aTکx~)()V V9~`H}O#eυ >m;]?%=o;!+Hfh>}֙b(Ek}X;$&!&GXLNi ,vA[[rRkd!Ohc>aO[cڞ Y+cr&.WW7\]!E<܆dVp8-1aXD_ۭ2%VxfO\X=ί~sb׾KRQ EWJso1*ǡT OJE.&}-;yc"L _#/7 > stream x]KU]ǭJ+3 {!x ,H) Jȋ0.n"nk!7+؏a9t>>k.k=k5kf3|(P@ʗ~.RYSEEi[ZZ:;;7n822RSSH۶mt P!bEݩ<9*SzmUUQWWwԩfh~ $ M 1pZh B5E(HXͣ!<<2cxI (3[ziIkwܱfC%Pr}vrr3ߔPyGeGF~xLRr,٤3.%67m&P6EcE|c:L7S$B+xgll -pI&]Dw*z%61JFKW^5q㝎 K:CTioo߷o? pϋ/Q B:LYnvttlٳ6m2=")1b %7Q8R߽{L:uy?xtcPXB#*nkkK QACULU'}K>}T__od"XZ4tn4I$[[[5&F Tz)42soJo߾m2D=&'z1RDH3i:sq-=zTf &?B$⢁L4Ua9UZ'N=Ѳ5/QK%K3!9A_>%1Y$Y-y.l;w`HCC(0S~' )Ѡնl5R?{Lڒ:>}jnZ|%{G4"WHz4=f)~cՖT h5mX^>xil_Bo޼QZ>VT9=_F*^^ E=a}똊שB[y hezZq ^B#4666_6'IÏ/NC6G FFf p4/e, w?krtʵjժڥK\RNڰa#]da@Zu SSSUUU/ZS_hv^ՉBd֙1+`"455L?LgŊGY`jǎk׮Ѯ[npp¡p_|_ۺu+EȬŋ󶮮NSĆ !űc`[}y-[pXb'Bze3ahGXSS#C˖- CCCPCFYFZk5z*1Лf2Ta% Y7r7mGG,WYL\%p^e2oob{bAf:r~__M6M1N>-4w1EG]vaK >|PpQ`/GE]__WDn3899)7|=A˗SB|CCBWf[y;cVP2}v- s VwIWXGJa*'ao3 2:^p! ajQYF0:E'P,"Ǟ3lUODS_$Y1pb LgxtE#>Fy*QǵkP Z *͛7cuuA>wGz~*eDkis|W?O%>sݻ#f4l( svn9-X  TxhGv]A_RYdSsa\@(delZH FXGkxY*Y v%mU.llootZVlO V^ͱspe2D$R|i4UqGi͚5Ђı[l%/> ݻȢI߯tx< )1{`S[SS%1Dܳ'*Ʋ,\P`{HOgnv>:u12.!㑣%tNqp'XP)չs甽+gc;#SÌ `2ȥ@C9-VJd*^6[e TA94+;.B[UB$%D0Ux%]'@.3OF؅R0T"g7vW\ J@I` (P@ endstream endobj 646 0 obj << /Filter /FlateDecode /Length 4766 >> stream x<]s#L9.sUIW5KQZnDR!%z$ZkfhF|?laTf]|+h_ys/wxs[!xs풛W_h[q.:.kᖛk<¿=;LQiJJ|ORp@ #ԟpZX6nCCBKxޤnW{+pO' ЍIr Ri?djk.wa8Z!BvhFW}Xˇv>{'X~ *r{-V]'򑺴H5>!uAAauv}"!Rm`ϰSprՔ5Hp+ }WÈ.0F,ᖐ%<cM'%t)HÎz4Ѓz|L;RĽgcsZ'&|(l55c j!\){~5hwʟ.2CbS>vvu8.os➇@#1U AdM 4 H\8 \:8-ׅ}Ve晢N?Ru'@,VSґDH!W(m]hCC4* tb×C=]'r%Jj-:2!2OU{sSŵ5rvv 5?K)K6 o$ Gi)a|aLc:L_al"X![UtS;J>Jy*m`,&`*gT<$ht*!Ҙ!jG}6(`RŖyN=Dp_zXz`4=O4^H%x)G<2 ثd 鼕Atg@Ny ;TwUc8b'X 5?#7p1m^qί?-}5Ky"uHZZ@JD(Jg+h^؅[?vZSd D2!0t$l.6,gHZ%/tA=9XI` SԆhbRނd9;)+I#jXPW\ D$}iH+JpУ08HĽ2knLSFPlHtD ι l )fv%7i9L b8Y%r4`|?R2S)q!@RJ,wł^5Fק-uĽ~8`f||-Lѱǰɫφ?*SOA&m+ uN|}p}H2s#Vv\^,3jB.RPIn~¸*QگR9F܃ aEuJ:!3D'PͶ VJ1x)02@(;tώBK\$IaRM:K/>g 9'xa8*lHFqJ ÁQm:_62sIIRn?,Gdu>kkLoL|1`~l.R鸡O/+ /q(]O({HykTbE%G HFo,$kD!7цHN]QSLAn>Ԑ84pdӑHHIU E8,eL-I;إOB?-(Ǩ3l+j!pSVJZNIF"Κl6M=dYeeMc3؈ph#` 96^r.{2o4mQhyWK T :e9"e@Z69{zUsDtx,505ߤS P a? Y1Nm)R̒sE>?e$H}NA7DKզǹ%=V0o$G|/ `Kb*'0 hq>:67 %mC.1\a!d@:#G`B؄]M "K/ oRQ5 gs5vh9IDo;2EN|j&1v}3x?U9}քQ"Q+Be7.X6ވX$3:՚z#8SЪ!ypp ;p3ʖrUiN^Qt Ou6doX{\>&Y/u m3"C .a@UH06%lDŽ[e NV˂c.mdfVpbܷZ4Ւki@Rl҈FUiRl-}!ucPQ8a&eSÄ㐓 U HPHU H|ې@)w8qs`*+^Rܱkwl[zq7)P YcȯlLtJ^ fSrSu?S)Xx:d`n*[;a,yռBQh:N:+5Oc4Lo>4~|l7PR 3I12U,SfQ<1,ܙgJ:t\ꟻNdCo hpyH*Q;9l v>)ʤ>:$egGl!RRڠ7#ApY:R+-j>.$K,4[oTbZVBȮWl[]WJ.9"9*E t8Ij3:Ng|Y#j3l3 o0SIJ=.IY~~x܇B-I0~uބb#!z(%tx~fM!8`GFuCg#pfRs7-p҅|[F2'YiďHHHV#[MieP1 ܻ(֒TWJjG@KlEC9a Dd[]_)Pکx? \f͟h2qendstream endobj 647 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /FlateDecode /Height 106 /Width 75 /Length 3349 >> stream x휽OU[TD×bDc(%cA1Jc+ Z0t&f3$a?`0_\9sɾ^箭UKTKTKT|92;;{эz]?><<\+o߾+ ͕:t555\6qVKr1_fmpYG 6q_, ˮ.e";㹆w2)YLdͶ6pF###ϟ?g4/#*njj… e !*4C b:X S< 'ӕc 2FX7e"[xGxoo/{}&) P~gr@rL^۷ x Po@9_;-jSڱ㟇Y/u*ZZSS?<vB2~1KP)NXyև$;zY#Q "Ps|FEJiv*3DNW.} I|r))9=UDSJ420Wٽ{\E,>xi0RÄ\ţ<aEennn.8$$r['&&|G(l^DYbB+*R (+b!.2*6 (87"c8WmDmGzvڎIs~``@]=jea\z+iTmPkԦvP%pMcIvaWq f.ْT87noo;sA#z>+ \VEZ"Zzw8hDd0 f7op\~[,xҙ|n>=HS'!^fsEF( BM׎ki;8 C> [KD9OB!HHI%2,/}\,tR[[K`mqqʁ v yX|5i겗: I-g)t&C׍)H=Bl5TD ;0/KWMrM^7'BZԕMtY^^P.relWr4s$ cvjٚ^5u#‹<DMw߸q[sK4ߒFbYɢ&T5DvVFZ[ "2 A(޼y2FQ0cd{ŒщqԩIڵ(YsddܹsIӧO_ު+ tqca%Z9ØW0yh}}}}5[Рx LFlàs~D*tʲdSre;v ͼ+ht8@L, q ^h4HYOgE ,d"g&Z6Hx-a&}g89oD'Jp˨nhhbA=@;PR;s3قK'ѠLXf{&mI J[!LzΌ?R~"H奠)8ω'dC]p j#f1?]Xq/^ۙ^>mRWl:|PiB/g6AOդ`^Dn\Yht)|~f-vY&'[ʒt5D;:Ԭ<~Ϟ47̙n䟦NOhot55˹7H2D .؇VpE2}FuE!E+=D,D HRARRkbSCUЉ x{}ukR/,,\ LcrȨ9@^/&&N)Fnݺj~~+#=] *ػEb(z# @y!RfRVB̹eq7 ֥')X)IgVj]IF͹gP)~Hhֺ~BMwDGS^yKO~3* \793UvR]ky B1/U`+`~c"+ _14/SWPs"vRr6i"Q?]˓?0RdT2',B< }P(⹖"2E;A-34l+OY$FB/?ӒoLx#+*~X f&RN3}+,A [oohsu"1?dHFL` D"^QXvDJ(KMڰxMTKTKTKT˦ xendstream endobj 648 0 obj << /BBox [ 0.00000000 0.00000000 504.00000000 504.00000000 ] /FormType 1 /Matrix [ 1.00000000 0.00000000 0.00000000 1.00000000 0.00000000 0.00000000 ] /PTEX.FileName (./kernlab-kpca.pdf) /PTEX.InfoDict 563 0 R /PTEX.PageNumber 1 /Resources << /ExtGState << >> /Font << /F1 564 0 R /F2 565 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 3720 /Filter /FlateDecode >> stream x]K$Gϯ#Ȍ|_ Jdkwm/Q=̩Jl&/ۏ[j[ {[q/DomܓÃK6}?=x<ۇo~{/=VK)1'~~ջ_ooo?>ǝUWڢkw;XF.tM9>/pyMўO{N53j0xO6[wǏ]} {TkjĤ.x|Y(D;[힨8Kx@oF(樸٫Eޅ\C$ryf>ijIvwnWSnQg[өz397aI1ō"ہptk{TN[T\Rbce|y"R"̷nvU `9lhTGubR_ ('fT_Bω4H/]Rag2;g[hN*q݃*aM -;hV45ZܫYk4`68`Q,iM%_v g"nֽ K\oNW\jr@`rc]V0bO-Ț5?[;8%T ـ8ܢt n|Tn5כV_d)>8=~&{a!klM@v9ѥ4bXq~",3Tc5^iNaY,@މ2z 5DkgP34@=v# a^|7R,a=ܕ=%:#RL#L.ianq5U' ΡjJK_[`B 7U6{n`^~FWPn{$<,EP2ޮ+w(] !qc&|IMI.#?en]s|%\腶7cӤQu m'8Tfz рH-l-VKؚwcN%LׂpZ^8l$dQa.ydkM\1^MC|;4@`b,&{jQ 'TDaI1`',4#-KO1` 6f5@^ʸ;D0 ٯ{B5q=R'~l$V."JpKGQAK籫ڼWZzH1!"ׅljN0-pse:n4@U!&DȖd Pcl8Bx&FV $b \>x+vnEti3ʧ_LR}q | -mX#rGhG0sTÀ? jhdO&ܺ+eyʃlH-Ey7Z ;24|=xLe;C: [Жyx6Ώ@%;*y]=vIe<09TLdBH $NϖJ,W-:fH2+HDm[$v~K׼PX0J9WjY"drpeo.F0B8 `c#͈j(O8[6,ӘnHqV %/QtA^Є#6iKj{TPqa^S'ӣ)*ES1qF^"(9+Zsov@\2T0`\Aw ľVsق8y>]q[(V6^u/8~*rFƾ=а_HȆ6Q9 c~$yQaA?$ Oe'MsM˖D#0+S N'kō2a$ Qr)"vy=/VW Y |lWbH6IZk< kSdJM`NҞ_'43ɝf6PifG~(qR`HvH@r&\`\iiEjصa]Q&7[%+0J!k\YekO\M0<+|RlɅ<r>qA  ݣ F9 {+h2=פ *JB_ ,4!6(eZR.NiڶOT0UAGcL_`|{1V'">^Afv;SuPp9Edb)dtGbf2& =0hbVb:,MM~7sGJ@‚ K baha[d&qlJV`Yn1F.pڤ6Ik 4l\農|`EI((Mc]Q/d,B,I0tap5٦I3^}plm`Q;w$^7f@1km_ 吅saĔ&V ڷ=?>+]+~IjKr6aF‬`l8,hWڒ~܎ >`s<ӱ®V?D:5o^&ηI6>Nu?^>yo9*3cx|~=?& Ď?Y';}ҡٞW|XT)j4w8dw\_evq%5{)0E}$~~͇>kOIO vۭendstream endobj 649 0 obj << /Filter /FlateDecode /Length 748 >> stream xڵUKs0WhzZ+[; B!҃ɋ&nvv-7M2y~HD"/d3#FA*f(~;PNPIiD%+F BU,dp|U4G f'Z#)HP񻙑ysd}EC˘k'z 5χ0 Q U FLomһ@DJV.%-]&F+l/$mm5kuŢ+a&ʚ9?mⶫ-]<vܚ5bdMyIxΠD;M?dO= =c>=-PW/eٚX8k5W'qFUm a/9CM ל29|յr 3M,v<m4Ӵه{l6oa¬o ?b~kUCagA|{*6quvj'Š>q\s{| 7%?Ṭ4H*- -e%Z*˭UGû\nqr!rg: v^`J#hoPg6/՗Iu5HP њ}Ӛ'be]Ek}1 @9 &Av̮m&ےP TngNVs Of#͛J G._IWnj蛂LL?; Gendstream endobj 650 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /FlateDecode /Height 106 /Width 75 /Length 1567 >> stream x[K#WXM(%Aq.WBp yam|.i b|h},0]hc9s|sg'8`0 FE1EU;J㴑WZZf544x^`6Qokk3-ߗF tN:u`0 `0YG8FY\\mZEEvc+wb FMMMEYYYr`),,۫@KK hlZo3qEG>8u%L&qQ흝P5\zpr=K>j8CEEQo8Dw#DM*n RדnQioo!js:KrꇑKJɏC @xP#8bv J]]MF2"ZP(1 P3Y\\2.L$ l 2R,|dO{zz*Ⴎ.mllPYYi555Av"z&٣ЛbXÍXLcˆ`04`/'X[[ _g z<!=4GU<.Fo>~\\Zm~"wvBo.~ja!$`?i<~pp gffRxMֳgo= vr"qpp@bDnAX~4 %vLr%tn7 |w *Γzt!bsO[Й-z5oʾK |>MkAϟ?Ocm( AEJf0{߿PsIP~GPeK8O4 /_H^&)R>7++ރQtggdJ>T tRzBWa`&|si;M;azpPV~&cRIfd%oaa:x,3vԹ˄p_6>lḻDLĥg<߼E:jy W_#R!Hi.=h4HTcӗ[_@j&DOURErNTzF"Di4MCH$u DqUƥlik6sOB I4tȃطfNߟwwwB^F_ܻ1lks>~l-!ȇ`}%Q׍oɓ/\aL&1?-K$Q ;PKwj:::FFFZ[[-ccc*tD驨sssd5;I .//mIH0NNNUB꿈LјXfw/x'.GS獩du <Jl-/2 ZyHu*2vVou4Ph4Ng:;;PA |"bn$vECCCFuE ,t-@mޓ 4ꓺEK=::QhOx^t6%%%h; `0 `0 `\ z4endstream endobj 651 0 obj << /BBox [ 0.00000000 0.00000000 504.00000000 504.00000000 ] /FormType 1 /Matrix [ 1.00000000 0.00000000 0.00000000 1.00000000 0.00000000 0.00000000 ] /PTEX.FileName (./kernlab-kfa.pdf) /PTEX.InfoDict 569 0 R /PTEX.PageNumber 1 /Resources << /ExtGState << >> /Font << /F1 570 0 R /F2 571 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 1106 /Filter /FlateDecode >> stream xYM7 W>zhc4c/풇^~|$g>~ B1$28HX8\o)ŔR~G|J!' ?~>:M\Bi2bRy}Ëp~4 S3oK-8P!HDٵndDY<@@yhBUrKl@ldG@rHێ,$ʆ$˞n~Z)u0ϲgFiFЖpn/C*W:LMj*n+/_u4i^uhA2endstream endobj 652 0 obj << /Filter /FlateDecode /Length 698 >> stream xڝUn0}߯qb#@Ehٷ҇tov/noglNV^O390+XknUYb  /Rhp44r[Ɋf, u$i,r:U.M6!ŶvJFkrJ2A)K^Aō4@.fCU̧an-hOc0[y 6ң{*2x=jAWmzb\A`U`y)E^h| -حIś )ZB E0Мuc;=w `9]%> $u>/O~M1vhո*J҆nfMEiКn㪻:фw!]ujZ1ytЦ9w==THvr?{,^^Zp;q))ɿqF2JribR .R&xUC=*quZ N줝+yit (PMYe /B߳ZcOnVO,zu[Y-C^B7Ql[_su7^vŴ$$a!a0C<.*KЊzRU+YDȼME0endstream endobj 653 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /FlateDecode /Height 106 /Width 75 /Length 937 >> stream x?HQǃL߅$K2dtp "H(A(d%S,J,![A.)dlpK۱\zU\g8^~bByN.[hr.sR󭭭|>iϢT*FSSS$8DpDyzz:NZF%B!BA1 TUU<.--1O1 EQ ;+7`\n6fL&#t:8eN...b P]R|DeIwVC,vbG p{{}@D677D"A'a\R88OypHaqyyټKa }\~vv6}4DQQ]dX^@_i*KM\"\q@ 0qp"B]VFٛ]\\4Ő5ʲUVִe?>{N#(Q.J89 Cu@ yU=,T*=xNL&dd:,ϝ rƭ+\0ȹQu9dן]t ]SV#Nz]rttvvv(T*Wmo~*{Umb돘AGv͍zTJ>qn{||PST뽽5%!jp,Rp@j7ƧَiS~ς͛Z&~Źo5Gl`\^^ڍa噾w1 _;GB4D"82 s6(垥Wvݷo%MLL(iʘV@@Q|DD"!qBr2\YY9<> stream x\Ys#~ׯp7b(*ItF~߾HTH$2j>cPBvh:Y a篣Tix/>Q㭚c  =~o-5q.52jszkExß%Ryn7>3b>wvu00%ךakDKsVlq\(Jڙ鼕 9NyhʧFa;!~;:GyNExџmW7;yVscʸ[[7d2qu5bEJ+P+"{kXg3,c{S[([S W mJ4oU@PvTvOymGmG|F/RY\uAI6FM>2;'gbdKm쮛6dʁNѺեpg':1M~1 ZiƬVDHB @sT.?L t 4 t /UzGllyY?`7JRTU*^ӓ8t $* Fq24VCBЋ o@uztV޿v4Ҡe Iek-'@ L]C'Mx!$j߂yQCc`( cfdRI{N A(W 9Yo [(\VgɵP۩~[ PY."lp}Ng,,oyyjh4꫅S@? !s 7 @[>EI'q sL QJsǛYsA0*˲΄d4B&Fl'R #NZdcakKA7"Oqf$X5d,'\Rg4 J Ҷ27ķZns% P!z[a(x!Nf,6Xkwhk} bbb*<̻fr) X,n|k X՘6lIr%Ef7xXF4@%^jeQMXO i*x;9)-n {nf'Nk%Wz\֩ބ$~VSCRc5^}D2?YŊOQU˃\S`P&@arR[q}Soxmyk7y BSq\>iK^q,<ڦ[(1cV́L,<:2I) o5=--JRSr{׭\7~&[Pj&Gw`Ήn`qi¦a#U8c:eD+U ZHmsFȞ.DgM:oI?kc wATTL4,seYlsi Ov'V#lc C<<{U" O謄8!3ѐ-2"?f!)5+PSwلxD:J[qVNS');HhQ8@뀆7B;EcNs _*0z 6=Ġ[쎡[2 }} Ub5B1*-C]4hb%@,+MIӨ,]X6`xZڣK=/dF\PS-s3NCF+<Z\Ϟ̥}aބ`Δg$~B7A)0z- 򃠔j9vղܠƑUSo[L5iSdڡmy!ď'Ie!WaVG6#$3%1^rS/6l2J)\1]+tǍ-=p唖pat%uBXdt,u#Z ~9?l%r\ ȇUa!hi5dDs'E|t^@bЧeY8̥Jx˓]xyWDwIW#?t,@#\Mq,BM> !jh_<dzES~@Sq1TX/" z}Y#/tX5GjsA"H /JWrEt U>!QFUO *)̱8=cGΩr<K&(8ED&^A&MA3<[2Mxi9TXZ;Q6rBo avGH|o ,n[PoHŠ~N6|i1O[ |\m2g<6Zok+WGOR|xSzaHccHM<[cO >6PTܺQr\ʎI{L3&4ӊjp<.p^h1CC(ݫE@RiU UsۢY V]?A@v=SҾIue)3i@4O7Ʃ5Bjp#ˈX}F\I@-1>%2:vG½kyhog#Pc |٥nc VW-H\2:s6_ϕ-x7d^߶daHu|we\>sNba#`+jȯ];:j ptY{n9$|1>*n|qHವU2P\2*UcũXQ.osI&H4? YO@5t<ΎdбЧ㧸" ;Ue[X8L:Dn΄1}"e>W%FTƜ 7E\8{'ẀOcڞn}jcKn0%ٹX}Tx&/|'VŔLk{_Z ˟D|YRȒ,2i3% /<+-uײy,i݌0׌AyT@x@G.,I)g!_wKwDfB cEfF<T8i0pe>V:}XȡH3&(7a[[ (>i|6"xHꐺ1=0x=)Z1>lƮ% כS§%W}m n,a(w«OX]v7/UHܜP0P8l!S" eendstream endobj 655 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /FlateDecode /Height 106 /Width 75 /Length 3029 >> stream x=LUP!|(*ALHP ⳱?1F B0PIAAebgg51f o^LEl^M^L{Ͻ^wp.]:JVd%+Y)lmmTxǥٳgz{{?ѣT\hݣ*wss;{##TW>03׾_PhN6w!j hzA% pNI͘tbD#peaMG۔8M ? XjpTfD11A!VS3*R"3pTEU1;SVu%5@f*)=,_Rv_ʋk&p>5 J["DŤDod2 ryBScG6 - " P0iIiI~-I_x%Va,VU"Je\o޼IS8s%͹ENOKX2==_ax|M+-=oww ȉ|BD0\ 28RxpUv5ro{OGƀgzPiUJgCQikbr9Rڼb5@1^yۏ2B`Ċ&͘D?uJ,WX5pAaB[DnݣGDO|,uod:`SCmI `)g0e/N|EKO#tCi;دA0SVƐvWmfE7U]wyZ3MS=>⻶v.聻hĿ( fԍa-Ʊ:!XE4j^fdB qJJbq0O HEC?9 M#R[CeTϻz:eo> stream xR}aqfˆ@{WK޷bA43c&fB)qf6 Ĝ7L}(( =e>jsEP/"ApG Hyc"s @zWbxφļ +W ڊ}N 'wcs!X#2ylP(O"HdB>>c@\Jq^`$6#ˆ;`$[LK:`rqSH8Y'bҦlnEhr< xe]hp An٤Go>Tp9S"jJª] %*qotbrZzgS}o'c9%/(JܥQ g4MSS?j~"J p,ff40اQ]e9QTHgֻg6,F~I@ҭ@ꓶ0W=%̺*PVKplp誓Ǣ i/$}.;g ~i۵o)h^4Nh?vO8 UR_kkw<bz3l}#zٞFm˦ mNm%#yzxN{DYm{yp;a1VMPOmu4n9wRnXxU-9h><=͜Zn*v ͍6ͦj$/> stream x[mo_!KeR85\.}=PRIz,w(@z9%g>3?>qlvR|M7xh^>za&z . v˗M&Ĺt'N>>7:J][C'rzLR&^BUemA 1Lf:j#A-^pFC;Q K.%?GG/hk. W5~ \mFȚ-Y>4cg \nֻ0Bס{%x͙b oָi|exY˧[- a"O88Ѱm .JN"XmaprGDeSxAoOk4^O0nzhkn$̓E'sбDC3t/P˿IsbS|Ղ&|ZQrR)&kiZTz]@ᦫpۦwcGj'3juQ&S E(mn0T6Sw2>d']6nk^w[2,zF^& +סafKB4Mʹ<ƫS*4rj4) nn"o盫dk>`GejՂrR]Dxo Rv~S`.ce͉ˢjeȥ3_oxz~<4(嚞ۉ5oPpKm0<Fsm͡i]^^|]j qEyfW44 Yqh~@ݜtfzXm9Cq8{@Hg*!))BG}R061U1hСVar uk?)_Ѻ=+rTRJyAo(Zn~Mux?X9M1O_{ VITj&+weZ=`rJ ElY$2`DN:b 1_xPwվ%Ȟ~/Bu4I%;|8u6CoY*/SӘR]S1 {`7"TT7vQP3pCOw)\*r4:+waĞq|>5m6mu Q߿/_H18=֯T|F_H 2ŘSƸHVb$tOsE2"@~i d=_EwKI\EQWA)-utDeN+hDk{Mp)V!pЯ!~USoOXabx>: E atRA'}]_K6 ~jVbLfA_aKv| C:J>=Oッ .ϼ'~ĀCXIa?TaA1<)pTya)/cM*dQp]б YTYܿ:γ "S5|d,GƣC2<$qU#GX#6iDfЦ߯թ*%P/%_Np~EIS|ҲJ1m:sB O4+8'-̓xEĆ~l1~חX," Zhqmw ;QFILaN%H0\㳹2dyw~Ilw4 }aN`:#j mL8uɐCm j 6 H8@ILE1:c{NrHttaz7㒫cga?%|D[z[OO)Mh9-ӯ04OP_WdZ`Tj-͠ddIr)H,Ƚ\ Q uPQWH,)Tp+Vp@ 3Sr/X& ? cGNϬ..zu!lm/gk1Y=%Lhsfv6MM5Zh%A O)~L;u>g dS >&uw}1> t{y!WonF<F\K|P'*{APxvS&endstream endobj 658 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /FlateDecode /Height 106 /Width 75 /Length 3041 >> stream xMH\Y(--MY~bHHl 7Ʀ,bmB\EwنledћFN:mf atf0 d0|Uzuޡxܺ~s޷RH!RH!T,~o1UYrjj*;w.N9t9iiis'xnllL$]^^&aByȲv@T@$h[[[S!< whggǘJ _wž|wNvoJgQ:#ghl'[HU*}qXt؀.Y`}̭߷lnbwv@fЦڛ-pT=^UUlnN&X^G__&'|Դ:ldL'"ƞj$l7i21u!AyYڋϋÈ`S: X_2@HAq?}joo5l]T\yhW-e(^hqłŷ%r-iQE&#,E|?E.1eJ|E.//59"_ T-Eƪ C& zEZYL Q\bX>ӧ62Zӹ,"2qa(~U;dˊVV-،A4` i=Q ,E˙** *Gbc*<ɁlkhnnFCL"x2{N>m:euFYٷ$yڊZcl:0#Rێ-gdv AY9tmmMv]߼y+Kd)M ULZH:_WV"wH7eTpJ.-VuoIc1>kl7;:yR 8HU矟4r \Ӻ|WH #Xݠ( LS;wt8k 0bɱ5P/ᒁ"<nF _йu _  ;JꂄN7.x+W#yœ!d',.z@SGw1`soڍ_EXmh(Tiܜ\\KK={5! /ؿ@. V{3X.>y =`Ixcv4`[0utǢ.JȉZ1#@ 6QlGwM.e)"ݩ`RvSM-ȗDi/dN?oiAPǙw.c;ȚE{+t84TUͣ}jͿ|_#EUrĨ(rVDh~~C`cH'9pr``&fӶە .FP,͠PgvO߸! @3>$m!5 IQM? -Y&ŧ|H?"( PO[uu_ vaC 9}+7l9  _3`NE|1>@4`ב$ku\_2l-51|nF%, }0CE}ePEQ Α͓/R#v7[&fggI3_N:ţq wG!!mIݣGVVV6ߗTH!/1ȇO++2ȗm ><ǿɉ}ޮ;]{PC )B )3endstream endobj 659 0 obj << /Type /XRef /Length 379 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 97 0 R /Root 96 0 R /Size 660 /ID [<9f194f27030dc4c32481eeca099b6eb6>] >> stream x+q.vlā $rȅڜB{V88 R'E$2vo^~So" ODS$U M(rr^É$ ;K9D;Q, ÛvʁE9hh}5Y4T"pϟ_JX47$ +3g ̙s-* 6 Ss݂}LF؝dq'5fX} ռY3Ek;Xy / =Yu98\6+jlj_mp\e>= library(kernlab) options(width = 70) @ \title{\pkg{kernlab} -- An \proglang{S4} Package for Kernel Methods in \proglang{R}} \Plaintitle{kernlab - An S4 Package for Kernel Methods in R} \author{Alexandros Karatzoglou\\Technische Universit\"at Wien \And Alex Smola\\Australian National University, NICTA \And Kurt Hornik\\Wirtschaftsuniversit\"at Wien } \Plainauthor{Alexandros Karatzoglou, Alex Smola, Kurt Hornik} \Abstract{ \pkg{kernlab} is an extensible package for kernel-based machine learning methods in \proglang{R}. It takes advantage of \proglang{R}'s new \proglang{S4} object model and provides a framework for creating and using kernel-based algorithms. The package contains dot product primitives (kernels), implementations of support vector machines and the relevance vector machine, Gaussian processes, a ranking algorithm, kernel PCA, kernel CCA, kernel feature analysis, online kernel methods and a spectral clustering algorithm. Moreover it provides a general purpose quadratic programming solver, and an incomplete Cholesky decomposition method. } \Keywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, \proglang{S4}, \proglang{R}} \Plainkeywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, S4, R} \begin{document} \section{Introduction} Machine learning is all about extracting structure from data, but it is often difficult to solve problems like classification, regression and clustering in the space in which the underlying observations have been made. Kernel-based learning methods use an implicit mapping of the input data into a high dimensional feature space defined by a kernel function, i.e., a function returning the inner product $ \langle \Phi(x),\Phi(y) \rangle$ between the images of two data points $x, y$ in the feature space. The learning then takes place in the feature space, provided the learning algorithm can be entirely rewritten so that the data points only appear inside dot products with other points. This is often referred to as the ``kernel trick'' \citep{kernlab:Schoelkopf+Smola:2002}. More precisely, if a projection $\Phi: X \rightarrow H$ is used, the dot product $\langle\Phi(x),\Phi(y)\rangle$ can be represented by a kernel function~$k$ \begin{equation} \label{eq:kernel} k(x,y)= \langle \Phi(x),\Phi(y) \rangle, \end{equation} which is computationally simpler than explicitly projecting $x$ and $y$ into the feature space~$H$. One interesting property of kernel-based systems is that, once a valid kernel function has been selected, one can practically work in spaces of any dimension without paying any computational cost, since feature mapping is never effectively performed. In fact, one does not even need to know which features are being used. Another advantage is the that one can design and use a kernel for a particular problem that could be applied directly to the data without the need for a feature extraction process. This is particularly important in problems where a lot of structure of the data is lost by the feature extraction process (e.g., text processing). The inherent modularity of kernel-based learning methods allows one to use any valid kernel on a kernel-based algorithm. \subsection{Software review} The most prominent kernel based learning algorithm is without doubt the support vector machine (SVM), so the existence of many support vector machine packages comes as little surprise. Most of the existing SVM software is written in \proglang{C} or \proglang{C++}, e.g.\ the award winning \pkg{libsvm}\footnote{\url{http://www.csie.ntu.edu.tw/~cjlin/libsvm/}} \citep{kernlab:Chang+Lin:2001}, \pkg{SVMlight}\footnote{\url{http://svmlight.joachims.org}} \citep{kernlab:joachim:1999}, \pkg{SVMTorch}\footnote{\url{http://www.torch.ch}}, Royal Holloway Support Vector Machines\footnote{\url{http://svm.dcs.rhbnc.ac.uk}}, \pkg{mySVM}\footnote{\url{http://www-ai.cs.uni-dortmund.de/SOFTWARE/MYSVM/index.eng.html}}, and \pkg{M-SVM}\footnote{\url{http://www.loria.fr/~guermeur/}} with many packages providing interfaces to \proglang{MATLAB} (such as \pkg{libsvm}), and even some native \proglang{MATLAB} toolboxes\footnote{ \url{http://www.isis.ecs.soton.ac.uk/resources/svminfo/}}\,\footnote{ \url{http://asi.insa-rouen.fr/~arakotom/toolbox/index}}\,\footnote{ \url{http://www.cis.tugraz.at/igi/aschwaig/software.html}}. Putting SVM specific software aside and considering the abundance of other kernel-based algorithms published nowadays, there is little software available implementing a wider range of kernel methods with some exceptions like the \pkg{Spider}\footnote{\url{http://www.kyb.tuebingen.mpg.de/bs/people/spider/}} software which provides a \proglang{MATLAB} interface to various \proglang{C}/\proglang{C++} SVM libraries and \proglang{MATLAB} implementations of various kernel-based algorithms, \pkg{Torch} \footnote{\url{http://www.torch.ch}} which also includes more traditional machine learning algorithms, and the occasional \proglang{MATLAB} or \proglang{C} program found on a personal web page where an author includes code from a published paper. \subsection[R software]{\proglang{R} software} The \proglang{R} package \pkg{e1071} offers an interface to the award winning \pkg{libsvm} \citep{kernlab:Chang+Lin:2001}, a very efficient SVM implementation. \pkg{libsvm} provides a robust and fast SVM implementation and produces state of the art results on most classification and regression problems \citep{kernlab:Meyer+Leisch+Hornik:2003}. The \proglang{R} interface provided in \pkg{e1071} adds all standard \proglang{R} functionality like object orientation and formula interfaces to \pkg{libsvm}. Another SVM related \proglang{R} package which was made recently available is \pkg{klaR} \citep{kernlab:Roever:2004} which includes an interface to \pkg{SVMlight}, a popular SVM implementation along with other classification tools like Regularized Discriminant Analysis. However, most of the \pkg{libsvm} and \pkg{klaR} SVM code is in \proglang{C++}. Therefore, if one would like to extend or enhance the code with e.g.\ new kernels or different optimizers, one would have to modify the core \proglang{C++} code. \section[kernlab]{\pkg{kernlab}} \pkg{kernlab} aims to provide the \proglang{R} user with basic kernel functionality (e.g., like computing a kernel matrix using a particular kernel), along with some utility functions commonly used in kernel-based methods like a quadratic programming solver, and modern kernel-based algorithms based on the functionality that the package provides. Taking advantage of the inherent modularity of kernel-based methods, \pkg{kernlab} aims to allow the user to switch between kernels on an existing algorithm and even create and use own kernel functions for the kernel methods provided in the package. \subsection[S4 objects]{\proglang{S4} objects} \pkg{kernlab} uses \proglang{R}'s new object model described in ``Programming with Data'' \citep{kernlab:Chambers:1998} which is known as the \proglang{S4} class system and is implemented in the \pkg{methods} package. In contrast with the older \proglang{S3} model for objects in \proglang{R}, classes, slots, and methods relationships must be declared explicitly when using the \proglang{S4} system. The number and types of slots in an instance of a class have to be established at the time the class is defined. The objects from the class are validated against this definition and have to comply to it at any time. \proglang{S4} also requires formal declarations of methods, unlike the informal system of using function names to identify a certain method in \proglang{S3}. An \proglang{S4} method is declared by a call to \code{setMethod} along with the name and a ``signature'' of the arguments. The signature is used to identify the classes of one or more arguments of the method. Generic functions can be declared using the \code{setGeneric} function. Although such formal declarations require package authors to be more disciplined than when using the informal \proglang{S3} classes, they provide assurance that each object in a class has the required slots and that the names and classes of data in the slots are consistent. An example of a class used in \pkg{kernlab} is shown below. Typically, in a return object we want to include information on the result of the method along with additional information and parameters. Usually \pkg{kernlab}'s classes include slots for the kernel function used and the results and additional useful information. \begin{smallexample} setClass("specc", representation("vector", # the vector containing the cluster centers="matrix", # the cluster centers size="vector", # size of each cluster kernelf="function", # kernel function used withinss = "vector"), # within cluster sum of squares prototype = structure(.Data = vector(), centers = matrix(), size = matrix(), kernelf = ls, withinss = vector())) \end{smallexample} Accessor and assignment function are defined and used to access the content of each slot which can be also accessed with the \verb|@| operator. \subsection{Namespace} Namespaces were introduced in \proglang{R} 1.7.0 and provide a means for packages to control the way global variables and methods are being made available. Due to the number of assignment and accessor function involved, a namespace is used to control the methods which are being made visible outside the package. Since \proglang{S4} methods are being used, the \pkg{kernlab} namespace also imports methods and variables from the \pkg{methods} package. \subsection{Data} The \pkg{kernlab} package also includes data set which will be used to illustrate the methods included in the package. The \code{spam} data set \citep{kernlab:Hastie:2001} set collected at Hewlett-Packard Labs contains data on 2788 and 1813 e-mails classified as non-spam and spam, respectively. The 57 variables of each data vector indicate the frequency of certain words and characters in the e-mail. Another data set included in \pkg{kernlab}, the \code{income} data set \citep{kernlab:Hastie:2001}, is taken by a marketing survey in the San Francisco Bay concerning the income of shopping mall customers. It consists of 14 demographic attributes (nominal and ordinal variables) including the income and 8993 observations. The \code{ticdata} data set \citep{kernlab:Putten:2000} was used in the 2000 Coil Challenge and contains information on customers of an insurance company. The data consists of 86 variables and includes product usage data and socio-demographic data derived from zip area codes. The data was collected to answer the following question: Can you predict who would be interested in buying a caravan insurance policy and give an explanation why? The \code{promotergene} is a data set of E. Coli promoter gene sequences (DNA) with 106 observations and 58 variables available at the UCI Machine Learning repository. Promoters have a region where a protein (RNA polymerase) must make contact and the helical DNA sequence must have a valid conformation so that the two pieces of the contact region spatially align. The data contains DNA sequences of promoters and non-promoters. The \code{spirals} data set was created by the \code{mlbench.spirals} function in the \pkg{mlbench} package \citep{kernlab:Leisch+Dimitriadou}. This two-dimensional data set with 300 data points consists of two spirals where Gaussian noise is added to each data point. \subsection{Kernels} A kernel function~$k$ calculates the inner product of two vectors $x$, $x'$ in a given feature mapping $\Phi: X \rightarrow H$. The notion of a kernel is obviously central in the making of any kernel-based algorithm and consequently also in any software package containing kernel-based methods. Kernels in \pkg{kernlab} are \proglang{S4} objects of class \code{kernel} extending the \code{function} class with one additional slot containing a list with the kernel hyper-parameters. Package \pkg{kernlab} includes 7 different kernel classes which all contain the class \code{kernel} and are used to implement the existing kernels. These classes are used in the function dispatch mechanism of the kernel utility functions described below. Existing kernel functions are initialized by ``creator'' functions. All kernel functions take two feature vectors as parameters and return the scalar dot product of the vectors. An example of the functionality of a kernel in \pkg{kernlab}: <>= ## create a RBF kernel function with sigma hyper-parameter 0.05 rbf <- rbfdot(sigma = 0.05) rbf ## create two random feature vectors x <- rnorm(10) y <- rnorm(10) ## compute dot product between x,y rbf(x, y) @ The package includes implementations of the following kernels: \begin{itemize} \item the linear \code{vanilladot} kernel implements the simplest of all kernel functions \begin{equation} k(x,x') = \langle x, x' \rangle \end{equation} which is useful specially when dealing with large sparse data vectors~$x$ as is usually the case in text categorization. \item the Gaussian radial basis function \code{rbfdot} \begin{equation} k(x,x') = \exp(-\sigma \|x - x'\|^2) \end{equation} which is a general purpose kernel and is typically used when no further prior knowledge is available about the data. \item the polynomial kernel \code{polydot} \begin{equation} k(x, x') = \left( \mathrm{scale} \cdot \langle x, x' \rangle + \mathrm{offset} \right)^\mathrm{degree}. \end{equation} which is used in classification of images. \item the hyperbolic tangent kernel \code{tanhdot} \begin{equation} k(x, x') = \tanh \left( \mathrm{scale} \cdot \langle x, x' \rangle + \mathrm{offset} \right) \end{equation} which is mainly used as a proxy for neural networks. \item the Bessel function of the first kind kernel \code{besseldot} \begin{equation} k(x, x') = \frac{\mathrm{Bessel}_{(\nu+1)}^n(\sigma \|x - x'\|)} {(\|x-x'\|)^{-n(\nu+1)}}. \end{equation} is a general purpose kernel and is typically used when no further prior knowledge is available and mainly popular in the Gaussian process community. \item the Laplace radial basis kernel \code{laplacedot} \begin{equation} k(x, x') = \exp(-\sigma \|x - x'\|) \end{equation} which is a general purpose kernel and is typically used when no further prior knowledge is available. \item the ANOVA radial basis kernel \code{anovadot} performs well in multidimensional regression problems \begin{equation} k(x, x') = \left(\sum_{k=1}^{n}\exp(-\sigma(x^k-{x'}^k)^2)\right)^{d} \end{equation} where $x^k$ is the $k$th component of $x$. \end{itemize} \subsection{Kernel utility methods} The package also includes methods for computing commonly used kernel expressions (e.g., the Gram matrix). These methods are written in such a way that they take functions (i.e., kernels) and matrices (i.e., vectors of patterns) as arguments. These can be either the kernel functions already included in \pkg{kernlab} or any other function implementing a valid dot product (taking two vector arguments and returning a scalar). In case one of the already implemented kernels is used, the function calls a vectorized implementation of the corresponding function. Moreover, in the case of symmetric matrices (e.g., the dot product matrix of a Support Vector Machine) they only require one argument rather than having to pass the same matrix twice (for rows and columns). The computations for the kernels already available in the package are vectorized whenever possible which guarantees good performance and acceptable memory requirements. Users can define their own kernel by creating a function which takes two vectors as arguments (the data points) and returns a scalar (the dot product). This function can then be based as an argument to the kernel utility methods. For a user defined kernel the dispatch mechanism calls a generic method implementation which calculates the expression by passing the kernel function through a pair of \code{for} loops. The kernel methods included are: \begin{description} \item[\code{kernelMatrix}] This is the most commonly used function. It computes $k(x, x')$, i.e., it computes the matrix $K$ where $K_{ij} = k(x_i, x_j)$ and $x$ is a \emph{row} vector. In particular, \begin{verbatim} K <- kernelMatrix(kernel, x) \end{verbatim} computes the matrix $K_{ij} = k(x_i, x_j)$ where the $x_i$ are the columns of $X$ and \begin{verbatim} K <- kernelMatrix(kernel, x1, x2) \end{verbatim} computes the matrix $K_{ij} = k(x1_i, x2_j)$. \item[\code{kernelFast}] This method is different to \code{kernelMatrix} for \code{rbfdot}, \code{besseldot}, and the \code{laplacedot} kernel, which are all RBF kernels. It is identical to \code{kernelMatrix}, except that it also requires the squared norm of the first argument as additional input. It is mainly used in kernel algorithms, where columns of the kernel matrix are computed per invocation. In these cases, evaluating the norm of each column-entry as it is done on a \code{kernelMatrix} invocation on an RBF kernel, over and over again would cause significant computational overhead. Its invocation is via \begin{verbatim} K = kernelFast(kernel, x1, x2, a) \end{verbatim} Here $a$ is a vector containing the squared norms of $x1$. \item[\code{kernelMult}] is a convenient way of computing kernel expansions. It returns the vector $f = (f(x_1), \dots, f(x_m))$ where \begin{equation} f(x_i) = \sum_{j=1}^{m} k(x_i, x_j) \alpha_j, \mbox{~hence~} f = K \alpha. \end{equation} The need for such a function arises from the fact that $K$ may sometimes be larger than the memory available. Therefore, it is convenient to compute $K$ only in stripes and discard the latter after the corresponding part of $K \alpha$ has been computed. The parameter \code{blocksize} determines the number of rows in the stripes. In particular, \begin{verbatim} f <- kernelMult(kernel, x, alpha) \end{verbatim} computes $f_i = \sum_{j=1}^m k(x_i, x_j) \alpha_j$ and \begin{verbatim} f <- kernelMult(kernel, x1, x2, alpha) \end{verbatim} computes $f_i = \sum_{j=1}^m k(x1_i, x2_j) \alpha_j$. \item[\code{kernelPol}] is a method very similar to \code{kernelMatrix} with the only difference that rather than computing $K_{ij} = k(x_i, x_j)$ it computes $K_{ij} = y_i y_j k(x_i, x_j)$. This means that \begin{verbatim} K <- kernelPol(kernel, x, y) \end{verbatim} computes the matrix $K_{ij} = y_i y_j k(x_i, x_j)$ where the $x_i$ are the columns of $x$ and $y_i$ are elements of the vector~$y$. Moreover, \begin{verbatim} K <- kernelPol(kernel, x1, x2, y1, y2) \end{verbatim} computes the matrix $K_{ij} = y1_i y2_j k(x1_i, x2_j)$. Both \code{x1} and \code{x2} may be matrices and \code{y1} and \code{y2} vectors. \end{description} An example using these functions : <>= ## create a RBF kernel function with sigma hyper-parameter 0.05 poly <- polydot(degree=2) ## create artificial data set x <- matrix(rnorm(60), 6, 10) y <- matrix(rnorm(40), 4, 10) ## compute kernel matrix kx <- kernelMatrix(poly, x) kxy <- kernelMatrix(poly, x, y) @ \section{Kernel methods} Providing a solid base for creating kernel-based methods is part of what we are trying to achieve with this package, the other being to provide a wider range of kernel-based methods in \proglang{R}. In the rest of the paper we present the kernel-based methods available in \pkg{kernlab}. All the methods in \pkg{kernlab} can be used with any of the kernels included in the package as well as with any valid user-defined kernel. User defined kernel functions can be passed to existing kernel-methods in the \code{kernel} argument. \subsection{Support vector machine} Support vector machines \citep{kernlab:Vapnik:1998} have gained prominence in the field of machine learning and pattern classification and regression. The solutions to classification and regression problems sought by kernel-based algorithms such as the SVM are linear functions in the feature space: \begin{equation} f(x) = w^\top \Phi(x) \end{equation} for some weight vector $w \in F$. The kernel trick can be exploited in this whenever the weight vector~$w$ can be expressed as a linear combination of the training points, $w = \sum_{i=1}^{n} \alpha_i \Phi(x_i)$, implying that $f$ can be written as \begin{equation} f(x) = \sum_{i=1}^{n}\alpha_i k(x_i, x) \end{equation} A very important issue that arises is that of choosing a kernel~$k$ for a given learning task. Intuitively, we wish to choose a kernel that induces the ``right'' metric in the space. Support Vector Machines choose a function $f$ that is linear in the feature space by optimizing some criterion over the sample. In the case of the 2-norm Soft Margin classification the optimization problem takes the form: \begin{eqnarray} \nonumber \mathrm{minimize} && t(w,\xi) = \frac{1}{2}{\|w\|}^2+\frac{C}{m}\sum_{i=1}^{m}\xi_i \\ \mbox{subject to~} && y_i ( \langle x_i , w \rangle +b ) \geq 1- \xi_i \qquad (i=1,\dots,m)\\ \nonumber && \xi_i \ge 0 \qquad (i=1,\dots, m) \end{eqnarray} Based on similar methodology, SVMs deal with the problem of novelty detection (or one class classification) and regression. \pkg{kernlab}'s implementation of support vector machines, \code{ksvm}, is based on the optimizers found in \pkg{bsvm}\footnote{\url{http://www.csie.ntu.edu.tw/~cjlin/bsvm}} \citep{kernlab:Hsu:2002} and \pkg{libsvm} \citep{kernlab:Chang+Lin:2001} which includes a very efficient version of the Sequential Minimization Optimization (SMO). SMO decomposes the SVM Quadratic Problem (QP) without using any numerical QP optimization steps. Instead, it chooses to solve the smallest possible optimization problem involving two elements of $\alpha_i$ because they must obey one linear equality constraint. At every step, SMO chooses two $\alpha_i$ to jointly optimize and finds the optimal values for these $\alpha_i$ analytically, thus avoiding numerical QP optimization, and updates the SVM to reflect the new optimal values. The SVM implementations available in \code{ksvm} include the C-SVM classification algorithm along with the $\nu$-SVM classification formulation which is equivalent to the former but has a more natural ($\nu$) model parameter taking values in $[0,1]$ and is proportional to the fraction of support vectors found in the data set and the training error. For classification problems which include more than two classes (multi-class) a one-against-one or pairwise classification method \citep{kernlab:Knerr:1990, kernlab:Kressel:1999} is used. This method constructs ${k \choose 2}$ classifiers where each one is trained on data from two classes. Prediction is done by voting where each classifier gives a prediction and the class which is predicted more often wins (``Max Wins''). This method has been shown to produce robust results when used with SVMs \citep{kernlab:Hsu2:2002}. Furthermore the \code{ksvm} implementation provides the ability to produce class probabilities as output instead of class labels. This is done by an improved implementation \citep{kernlab:Lin:2001} of Platt's posteriori probabilities \citep{kernlab:Platt:2000} where a sigmoid function \begin{equation} P(y=1\mid f) = \frac{1}{1+ e^{Af+B}} \end{equation} is fitted on the decision values~$f$ of the binary SVM classifiers, $A$ and $B$ are estimated by minimizing the negative log-likelihood function. To extend the class probabilities to the multi-class case, each binary classifiers class probability output is combined by the \code{couple} method which implements methods for combing class probabilities proposed in \citep{kernlab:Wu:2003}. Another approach for multIn order to create a similar probability output for regression, following \cite{kernlab:Weng:2004}, we suppose that the SVM is trained on data from the model \begin{equation} y_i = f(x_i) + \delta_i \end{equation} where $f(x_i)$ is the underlying function and $\delta_i$ is independent and identical distributed random noise. Given a test data $x$ the distribution of $y$ given $x$ and allows one to draw probabilistic inferences about $y$ e.g. one can construct a predictive interval $\Phi = \Phi(x)$ such that $y \in \Phi$ with a certain probability. If $\hat{f}$ is the estimated (predicted) function of the SVM on new data then $\eta = \eta(x) = y - \hat{f}(x)$ is the prediction error and $y \in \Phi$ is equivalent to $\eta \in \Phi $. Empirical observation shows that the distribution of the residuals $\eta$ can be modeled both by a Gaussian and a Laplacian distribution with zero mean. In this implementation the Laplacian with zero mean is used : \begin{equation} p(z) = \frac{1}{2\sigma}e^{-\frac{|z|}{\sigma}} \end{equation} Assuming that $\eta$ are independent the scale parameter $\sigma$ is estimated by maximizing the likelihood. The data for the estimation is produced by a three-fold cross-validation. For the Laplace distribution the maximum likelihood estimate is : \begin{equation} \sigma = \frac{\sum_{i=1}^m|\eta_i|}{m} \end{equation} i-class classification supported by the \code{ksvm} function is the one proposed in \cite{kernlab:Crammer:2000}. This algorithm works by solving a single optimization problem including the data from all classes: \begin{eqnarray} \nonumber \mathrm{minimize} && t(w_n,\xi) = \frac{1}{2}\sum_{n=1}^k{\|w_n\|}^2+\frac{C}{m}\sum_{i=1}^{m}\xi_i \\ \mbox{subject to~} && \langle x_i , w_{y_i} \rangle - \langle x_i , w_{n} \rangle \geq b_i^n - \xi_i \qquad (i=1,\dots,m) \\ \mbox{where} && b_i^n = 1 - \delta_{y_i,n} \end{eqnarray} where the decision function is \begin{equation} \mathrm{argmax}_{m=1,\dots,k} \langle x_i , w_{n} \rangle \end{equation} This optimization problem is solved by a decomposition method proposed in \cite{kernlab:Hsu:2002} where optimal working sets are found (that is, sets of $\alpha_i$ values which have a high probability of being non-zero). The QP sub-problems are then solved by a modified version of the \pkg{TRON}\footnote{\url{http://www-unix.mcs.anl.gov/~more/tron/}} \citep{kernlab:more:1999} optimization software. One-class classification or novelty detection \citep{kernlab:Williamson:1999, kernlab:Tax:1999}, where essentially an SVM detects outliers in a data set, is another algorithm supported by \code{ksvm}. SVM novelty detection works by creating a spherical decision boundary around a set of data points by a set of support vectors describing the spheres boundary. The $\nu$ parameter is used to control the volume of the sphere and consequently the number of outliers found. Again, the value of $\nu$ represents the fraction of outliers found. Furthermore, $\epsilon$-SVM \citep{kernlab:Vapnik2:1995} and $\nu$-SVM \citep{kernlab:Smola1:2000} regression are also available. The problem of model selection is partially addressed by an empirical observation for the popular Gaussian RBF kernel \citep{kernlab:Caputo:2002}, where the optimal values of the hyper-parameter of sigma are shown to lie in between the 0.1 and 0.9 quantile of the $\|x- x'\| $ statistics. The \code{sigest} function uses a sample of the training set to estimate the quantiles and returns a vector containing the values of the quantiles. Pretty much any value within this interval leads to good performance. An example for the \code{ksvm} function is shown below. <>= ## simple example using the promotergene data set data(promotergene) ## create test and training set tindex <- sample(1:dim(promotergene)[1],5) genetrain <- promotergene[-tindex, ] genetest <- promotergene[tindex,] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot",kpar="automatic",C=60,cross=3,prob.model=TRUE) gene predict(gene, genetest) predict(gene, genetest, type="probabilities") @ \begin{figure} \centering <>= set.seed(123) x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) @ \caption{A contour plot of the SVM decision values for a toy binary classification problem using the \code{plot} function} \label{fig:ksvm Plot} \end{figure} \subsection{Relevance vector machine} The relevance vector machine \citep{kernlab:Tipping:2001} is a probabilistic sparse kernel model identical in functional form to the SVM making predictions based on a function of the form \begin{equation} y(x) = \sum_{n=1}^{N} \alpha_n K(\mathbf{x},\mathbf{x}_n) + a_0 \end{equation} where $\alpha_n$ are the model ``weights'' and $K(\cdotp,\cdotp)$ is a kernel function. It adopts a Bayesian approach to learning, by introducing a prior over the weights $\alpha$ \begin{equation} p(\alpha, \beta) = \prod_{i=1}^m N(\beta_i \mid 0 , a_i^{-1}) \mathrm{Gamma}(\beta_i\mid \beta_\beta , \alpha_\beta) \end{equation} governed by a set of hyper-parameters $\beta$, one associated with each weight, whose most probable values are iteratively estimated for the data. Sparsity is achieved because in practice the posterior distribution in many of the weights is sharply peaked around zero. Furthermore, unlike the SVM classifier, the non-zero weights in the RVM are not associated with examples close to the decision boundary, but rather appear to represent ``prototypical'' examples. These examples are termed \emph{relevance vectors}. \pkg{kernlab} currently has an implementation of the RVM based on a type~II maximum likelihood method which can be used for regression. The functions returns an \proglang{S4} object containing the model parameters along with indexes for the relevance vectors and the kernel function and hyper-parameters used. <>= x <- seq(-20, 20, 0.5) y <- sin(x)/x + rnorm(81, sd = 0.03) y[41] <- 1 @ <>= rvmm <- rvm(x, y,kernel="rbfdot",kpar=list(sigma=0.1)) rvmm ytest <- predict(rvmm, x) @ \begin{figure} \centering <>= plot(x, y, cex=0.5) lines(x, ytest, col = "red") points(x[RVindex(rvmm)],y[RVindex(rvmm)],pch=21) @ \caption{Relevance vector regression on data points created by the $sinc(x)$ function, relevance vectors are shown circled.} \label{fig:RVM sigmoid} \end{figure} \subsection{Gaussian processes} Gaussian processes \citep{kernlab:Williams:1995} are based on the ``prior'' assumption that adjacent observations should convey information about each other. In particular, it is assumed that the observed variables are normal, and that the coupling between them takes place by means of the covariance matrix of a normal distribution. Using the kernel matrix as the covariance matrix is a convenient way of extending Bayesian modeling of linear estimators to nonlinear situations. Furthermore it represents the counterpart of the ``kernel trick'' in methods minimizing the regularized risk. For regression estimation we assume that rather than observing $t(x_i)$ we observe $y_i = t(x_i) + \xi_i$ where $\xi_i$ is assumed to be independent Gaussian distributed noise with zero mean. The posterior distribution is given by \begin{equation} p(\mathbf{y}\mid \mathbf{t}) = \left[ \prod_ip(y_i - t(x_i)) \right] \frac{1}{\sqrt{(2\pi)^m \det(K)}} \exp \left(\frac{1}{2}\mathbf{t}^T K^{-1} \mathbf{t} \right) \end{equation} and after substituting $\mathbf{t} = K\mathbf{\alpha}$ and taking logarithms \begin{equation} \ln{p(\mathbf{\alpha} \mid \mathbf{y})} = - \frac{1}{2\sigma^2}\| \mathbf{y} - K \mathbf{\alpha} \|^2 -\frac{1}{2}\mathbf{\alpha}^T K \mathbf{\alpha} +c \end{equation} and maximizing $\ln{p(\mathbf{\alpha} \mid \mathbf{y})}$ for $\mathbf{\alpha}$ to obtain the maximum a posteriori approximation yields \begin{equation} \mathbf{\alpha} = (K + \sigma^2\mathbf{1})^{-1} \mathbf{y} \end{equation} Knowing $\mathbf{\alpha}$ allows for prediction of $y$ at a new location $x$ through $y = K(x,x_i){\mathbf{\alpha}}$. In similar fashion Gaussian processes can be used for classification. \code{gausspr} is the function in \pkg{kernlab} implementing Gaussian processes for classification and regression. \subsection{Ranking} The success of Google has vividly demonstrated the value of a good ranking algorithm in real world problems. \pkg{kernlab} includes a ranking algorithm based on work published in \citep{kernlab:Zhou:2003}. This algorithm exploits the geometric structure of the data in contrast to the more naive approach which uses the Euclidean distances or inner products of the data. Since real world data are usually highly structured, this algorithm should perform better than a simpler approach based on a Euclidean distance measure. First, a weighted network is defined on the data and an authoritative score is assigned to every point. The query points act as source nodes that continually pump their scores to the remaining points via the weighted network, and the remaining points further spread the score to their neighbors. The spreading process is repeated until convergence and the points are ranked according to the scores they received. Suppose we are given a set of data points $X = {x_1, \dots, x_{s}, x_{s+1}, \dots, x_{m}}$ in $\mathbf{R}^n$ where the first $s$ points are the query points and the rest are the points to be ranked. The algorithm works by connecting the two nearest points iteratively until a connected graph $G = (X, E)$ is obtained where $E$ is the set of edges. The affinity matrix $K$ defined e.g.\ by $K_{ij} = \exp(-\sigma\|x_i - x_j \|^2)$ if there is an edge $e(i,j) \in E$ and $0$ for the rest and diagonal elements. The matrix is normalized as $L = D^{-1/2}KD^{-1/2}$ where $D_{ii} = \sum_{j=1}^m K_{ij}$, and \begin{equation} f(t+1) = \alpha Lf(t) + (1 - \alpha)y \end{equation} is iterated until convergence, where $\alpha$ is a parameter in $[0,1)$. The points are then ranked according to their final scores $f_{i}(t_f)$. \pkg{kernlab} includes an \proglang{S4} method implementing the ranking algorithm. The algorithm can be used both with an edge-graph where the structure of the data is taken into account, and without which is equivalent to ranking the data by their distance in the projected space. \begin{figure} \centering <>= data(spirals) ran <- spirals[rowSums(abs(spirals) < 0.55) == 2,] ranked <- ranking(ran, 54, kernel = "rbfdot", kpar = list(sigma = 100), edgegraph = TRUE) ranked[54, 2] <- max(ranked[-54, 2]) c<-1:86 op <- par(mfrow = c(1, 2),pty="s") plot(ran) plot(ran, cex=c[ranked[,3]]/40) @ \caption{The points on the left are ranked according to their similarity to the upper most left point. Points with a higher rank appear bigger. Instead of ranking the points on simple Euclidean distance the structure of the data is recognized and all points on the upper structure are given a higher rank although further away in distance than points in the lower structure.} \label{fig:Ranking} \end{figure} \subsection{Online learning with kernels} The \code{onlearn} function in \pkg{kernlab} implements the online kernel algorithms for classification, novelty detection and regression described in \citep{kernlab:Kivinen:2004}. In batch learning, it is typically assumed that all the examples are immediately available and are drawn independently from some distribution $P$. One natural measure of quality for some $f$ in that case is the expected risk \begin{equation} R[f,P] := E_{(x,y)~P}[l(f(x),y)] \end{equation} Since usually $P$ is unknown a standard approach is to instead minimize the empirical risk \begin{equation} R_{emp}[f,P] := \frac{1}{m}\sum_{t=1}^m l(f(x_t),y_t) \end{equation} Minimizing $R_{emp}[f]$ may lead to overfitting (complex functions that fit well on the training data but do not generalize to unseen data). One way to avoid this is to penalize complex functions by instead minimizing the regularized risk. \begin{equation} R_{reg}[f,S] := R_{reg,\lambda}[f,S] := R_{emp}[f] = \frac{\lambda}{2}\|f\|_{H}^2 \end{equation} where $\lambda > 0$ and $\|f\|_{H} = {\langle f,f \rangle}_{H}^{\frac{1}{2}}$ does indeed measure the complexity of $f$ in a sensible way. The constant $\lambda$ needs to be chosen appropriately for each problem. Since in online learning one is interested in dealing with one example at the time the definition of an instantaneous regularized risk on a single example is needed \begin{equation} R_inst[f,x,y] := R_{inst,\lambda}[f,x,y] := R_{reg,\lambda}[f,((x,y))] \end{equation} The implemented algorithms are classical stochastic gradient descent algorithms performing gradient descent on the instantaneous risk. The general form of the update rule is : \begin{equation} f_{t+1} = f_t - \eta \partial_f R_{inst,\lambda}[f,x_t,y_t]|_{f=f_t} \end{equation} where $f_i \in H$ and $\partial_f$< is short hand for $\partial \ \partial f$ (the gradient with respect to $f$) and $\eta_t > 0$ is the learning rate. Due to the learning taking place in a \textit{reproducing kernel Hilbert space} $H$ the kernel $k$ used has the property $\langle f,k(x,\cdotp)\rangle_H = f(x)$ and therefore \begin{equation} \partial_f l(f(x_t)),y_t) = l'(f(x_t),y_t)k(x_t,\cdotp) \end{equation} where $l'(z,y) := \partial_z l(z,y)$. Since $\partial_f\|f\|_H^2 = 2f$ the update becomes \begin{equation} f_{t+1} := (1 - \eta\lambda)f_t -\eta_t \lambda '( f_t(x_t),y_t)k(x_t,\cdotp) \end{equation} The \code{onlearn} function implements the online learning algorithm for regression, classification and novelty detection. The online nature of the algorithm requires a different approach to the use of the function. An object is used to store the state of the algorithm at each iteration $t$ this object is passed to the function as an argument and is returned at each iteration $t+1$ containing the model parameter state at this step. An empty object of class \code{onlearn} is initialized using the \code{inlearn} function. <>= ## create toy data set x <- rbind(matrix(rnorm(90),,2),matrix(rnorm(90)+3,,2)) y <- matrix(c(rep(1,45),rep(-1,45)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2),type="classification") ind <- sample(1:90,90) ## learn one data point at the time for(i in ind) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) @ \subsection{Spectral clustering} Spectral clustering \citep{kernlab:Ng:2001} is a recently emerged promising alternative to common clustering algorithms. In this method one uses the top eigenvectors of a matrix created by some similarity measure to cluster the data. Similarly to the ranking algorithm, an affinity matrix is created out from the data as \begin{equation} K_{ij}=\exp(-\sigma\|x_i - x_j \|^2) \end{equation} and normalized as $L = D^{-1/2}KD^{-1/2}$ where $D_{ii} = \sum_{j=1}^m K_{ij}$. Then the top $k$ eigenvectors (where $k$ is the number of clusters to be found) of the affinity matrix are used to form an $n \times k$ matrix $Y$ where each column is normalized again to unit length. Treating each row of this matrix as a data point, \code{kmeans} is finally used to cluster the points. \pkg{kernlab} includes an \proglang{S4} method called \code{specc} implementing this algorithm which can be used through an formula interface or a matrix interface. The \proglang{S4} object returned by the method extends the class ``vector'' and contains the assigned cluster for each point along with information on the centers size and within-cluster sum of squares for each cluster. In case a Gaussian RBF kernel is being used a model selection process can be used to determine the optimal value of the $\sigma$ hyper-parameter. For a good value of $\sigma$ the values of $Y$ tend to cluster tightly and it turns out that the within cluster sum of squares is a good indicator for the ``quality'' of the sigma parameter found. We then iterate through the sigma values to find an optimal value for $\sigma$. \begin{figure} \centering <>= data(spirals) sc <- specc(spirals, centers=2) plot(spirals, pch=(23 - 2*sc)) @ \caption{Clustering the two spirals data set with \code{specc}} \label{fig:Spectral Clustering} \end{figure} \subsection{Kernel principal components analysis} Principal component analysis (PCA) is a powerful technique for extracting structure from possibly high-dimensional datasets. PCA is an orthogonal transformation of the coordinate system in which we describe the data. The new coordinates by which we represent the data are called principal components. Kernel PCA \citep{kernlab:Schoelkopf:1998} performs a nonlinear transformation of the coordinate system by finding principal components which are nonlinearly related to the input variables. Given a set of centered observations $x_k$, $k=1,\dots,M$, $x_k \in \mathbf{R}^N$, PCA diagonalizes the covariance matrix $C = \frac{1}{M}\sum_{j=1}^Mx_jx_{j}^T$ by solving the eigenvalue problem $\lambda\mathbf{v}=C\mathbf{v}$. The same computation can be done in a dot product space $F$ which is related to the input space by a possibly nonlinear map $\Phi:\mathbf{R}^N \rightarrow F$, $x \mapsto \mathbf{X}$. Assuming that we deal with centered data and use the covariance matrix in $F$, \begin{equation} \hat{C}=\frac{1}{C}\sum_{j=1}^N \Phi(x_j)\Phi(x_j)^T \end{equation} the kernel principal components are then computed by taking the eigenvectors of the centered kernel matrix $K_{ij} = \langle \Phi(x_j),\Phi(x_j) \rangle$. \code{kpca}, the the function implementing KPCA in \pkg{kernlab}, can be used both with a formula and a matrix interface, and returns an \proglang{S4} object of class \code{kpca} containing the principal components the corresponding eigenvalues along with the projection of the training data on the new coordinate system. Furthermore, the \code{predict} function can be used to embed new data points into the new coordinate system. \begin{figure} \centering <>= data(spam) train <- sample(1:dim(spam)[1],400) kpc <- kpca(~.,data=spam[train,-58],kernel="rbfdot",kpar=list(sigma=0.001),features=2) kpcv <- pcv(kpc) plot(rotated(kpc),col=as.integer(spam[train,58]),xlab="1st Principal Component",ylab="2nd Principal Component") @ \caption{Projection of the spam data on two kernel principal components using an RBF kernel} \label{fig:KPCA} \end{figure} \subsection{Kernel feature analysis} Whilst KPCA leads to very good results there are nevertheless some issues to be addressed. First the computational complexity of the standard version of KPCA, the algorithm scales $O(m^3)$ and secondly the resulting feature extractors are given as a dense expansion in terms of the of the training patterns. Sparse solutions are often achieved in supervised learning settings by using an $l_1$ penalty on the expansion coefficients. An algorithm can be derived using the same approach in feature extraction requiring only $n$ basis functions to compute the first $n$ feature. Kernel feature analysis \citep{kernlab:Olvi:2000} is computationally simple and scales approximately one order of magnitude better on large data sets than standard KPCA. Choosing $\Omega [f] = \sum_{i=1}^m |\alpha_i |$ this yields \begin{equation} F_{LP} = \{ \mathbf{w} \vert \mathbf{w} = \sum_{i=1}^m \alpha_i \Phi(x_i) \mathrm{with} \sum_{i=1}^m |\alpha_i | \leq 1 \} \end{equation} This setting leads to the first ``principal vector'' in the $l_1$ context \begin{equation} \mathbf{\nu}^1 = \mathrm{argmax}_{\mathbf{\nu} \in F_{LP}} \frac{1}{m} \sum_{i=1}^m \langle \mathbf{\nu},\mathbf{\Phi}(x_i) - \frac{1}{m}\sum_{j=1}^m\mathbf{\Phi}(x_i) \rangle^2 \end{equation} Subsequent ``principal vectors'' can be defined by enforcing optimality with respect to the remaining orthogonal subspaces. Due to the $l_1$ constrain the solution has the favorable property of being sparse in terms of the coefficients $\alpha_i$. The function \code{kfa} in \pkg{kernlab} implements Kernel Feature Analysis by using a projection pursuit technique on a sample of the data. Results are then returned in an \proglang{S4} object. \begin{figure} \centering <>= data(promotergene) f <- kfa(~.,data=promotergene,features=2,kernel="rbfdot",kpar=list(sigma=0.013)) plot(predict(f,promotergene),col=as.numeric(promotergene[,1]),xlab="1st Feature",ylab="2nd Feature") @ \caption{Projection of the spam data on two features using an RBF kernel} \label{fig:KFA} \end{figure} \subsection{Kernel canonical correlation analysis} Canonical correlation analysis (CCA) is concerned with describing the linear relations between variables. If we have two data sets $x_1$ and $x_2$, then the classical CCA attempts to find linear combination of the variables which give the maximum correlation between the combinations. I.e., if \begin{eqnarray*} && y_1 = \mathbf{w_1}\mathbf{x_1} = \sum_j w_1 x_{1j} \\ && y_2 = \mathbf{w_2}\mathbf{x_2} = \sum_j w_2 x_{2j} \end{eqnarray*} one wishes to find those values of $\mathbf{w_1}$ and $\mathbf{w_2}$ which maximize the correlation between $y_1$ and $y_2$. Similar to the KPCA algorithm, CCA can be extended and used in a dot product space~$F$ which is related to the input space by a possibly nonlinear map $\Phi:\mathbf{R}^N \rightarrow F$, $x \mapsto \mathbf{X}$ as \begin{eqnarray*} && y_1 = \mathbf{w_1}\mathbf{\Phi(x_1)} = \sum_j w_1 \Phi(x_{1j}) \\ && y_2 = \mathbf{w_2}\mathbf{\Phi(x_2)} = \sum_j w_2 \Phi(x_{2j}) \end{eqnarray*} Following \citep{kernlab:kuss:2003}, the \pkg{kernlab} implementation of a KCCA projects the data vectors on a new coordinate system using KPCA and uses linear CCA to retrieve the correlation coefficients. The \code{kcca} method in \pkg{kernlab} returns an \proglang{S4} object containing the correlation coefficients for each data set and the corresponding correlation along with the kernel used. \subsection{Interior point code quadratic optimizer} In many kernel based algorithms, learning implies the minimization of some risk function. Typically we have to deal with quadratic or general convex problems for support vector machines of the type \begin{equation} \begin{array}{ll} \mathrm{minimize} & f(x) \\ \mbox{subject to~} & c_i(x) \leq 0 \mbox{~for all~} i \in [n]. \end{array} \end{equation} $f$ and $c_i$ are convex functions and $n \in \mathbf{N}$. \pkg{kernlab} provides the \proglang{S4} method \code{ipop} implementing an optimizer of the interior point family \citep{kernlab:Vanderbei:1999} which solves the quadratic programming problem \begin{equation} \begin{array}{ll} \mathrm{minimize} & c^\top x+\frac{1}{2}x^\top H x \\ \mbox{subject to~} & b \leq Ax \leq b + r\\ & l \leq x \leq u \\ \end{array} \end{equation} This optimizer can be used in regression, classification, and novelty detection in SVMs. \subsection{Incomplete cholesky decomposition} When dealing with kernel based algorithms, calculating a full kernel matrix should be avoided since it is already a $O(N^2)$ operation. Fortunately, the fact that kernel matrices are positive semidefinite is a strong constraint and good approximations can be found with small computational cost. The Cholesky decomposition factorizes a positive semidefinite $N \times N$ matrix $K$ as $K=ZZ^T$, where $Z$ is an upper triangular $N \times N$ matrix. Exploiting the fact that kernel matrices are usually of low rank, an \emph{incomplete Cholesky decomposition} \citep{kernlab:Wright:1999} finds a matrix $\tilde{Z}$ of size $N \times M$ where $M\ll N$ such that the norm of $K-\tilde{Z}\tilde{Z}^T$ is smaller than a given tolerance $\theta$. The main difference of incomplete Cholesky decomposition to the standard Cholesky decomposition is that pivots which are below a certain threshold are simply skipped. If $L$ is the number of skipped pivots, we obtain a $\tilde{Z}$ with only $M = N - L$ columns. The algorithm works by picking a column from $K$ to be added by maximizing a lower bound on the reduction of the error of the approximation. \pkg{kernlab} has an implementation of an incomplete Cholesky factorization called \code{inc.chol} which computes the decomposed matrix $\tilde{Z}$ from the original data for any given kernel without the need to compute a full kernel matrix beforehand. This has the advantage that no full kernel matrix has to be stored in memory. \section{Conclusions} In this paper we described \pkg{kernlab}, a flexible and extensible kernel methods package for \proglang{R} with existing modern kernel algorithms along with tools for constructing new kernel based algorithms. It provides a unified framework for using and creating kernel-based algorithms in \proglang{R} while using all of \proglang{R}'s modern facilities, like \proglang{S4} classes and namespaces. Our aim for the future is to extend the package and add more kernel-based methods as well as kernel relevant tools. Sources and binaries for the latest version of \pkg{kernlab} are available at CRAN\footnote{\url{http://CRAN.R-project.org}} under the GNU Public License. A shorter version of this introduction to the \proglang{R} package \pkg{kernlab} is published as \cite{kernlab:Karatzoglou+Smola+Hornik:2004} in the \emph{Journal of Statistical Software}. \bibliography{jss} \end{document} kernlab/src/0000755000176000001440000000000012651720434012513 5ustar ripleyuserskernlab/src/iweightfactory.h0000644000176000001440000000323312651720731015715 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/I_WeightFactory.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef I_WEIGHTFACTORY_H #define I_WEIGHTFACTORY_H #include "datatype.h" #include "errorcode.h" /// Weight Factory interface for string kernel class I_WeightFactory { public: /// Constructor I_WeightFactory(){} /// Destructor virtual ~I_WeightFactory(){} /// Compute edge weight between floor interval and the end of matched substring. virtual ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) = 0; }; #endif kernlab/src/stringkernel.h0000644000176000001440000000543312651720731015400 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/StringKernel.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 // 10 Aug 2006 #ifndef STRINGKERNEL_H #define STRINGKERNEL_H #include "datatype.h" #include "errorcode.h" #include "esa.h" #include "isafactory.h" #include "ilcpfactory.h" #include "iweightfactory.h" //#include "W_msufsort.h" #include "wkasailcp.h" #include "cweight.h" #include "expdecayweight.h" #include "brweight.h" #include "kspectrumweight.h" //' Types of substring weighting functions enum WeightFunction{CONSTANT, EXPDECAY, KSPECTRUM, BOUNDRANGE}; using namespace std; class StringKernel { public: /// Variables ESA *esa; I_WeightFactory *weigher; Real *val; //' val array. Storing precomputed val(t) values. Real *lvs; //' leaves array. Storing weights for leaves. /// Constructors StringKernel(); //' Given contructed suffix array StringKernel(ESA *esa_, int weightfn, Real param, int verb=INFO); //' Given text, build suffix array for it StringKernel(const UInt32 &size, SYMBOL *text, int weightfn, Real param, int verb=INFO); /// Destructor virtual ~StringKernel(); //' Methods /// Precompute the contribution of each intervals (or internal nodes) void PrecomputeVal(); /// Compute Kernel matrix void Compute_K(SYMBOL *xprime, const UInt32 &xprime_len, Real &value); /// Set leaves array, lvs[] void Set_Lvs(const Real *leafWeight, const UInt32 *len, const UInt32 &m); /// Set leaves array as lvs[i]=i for i=0 to esa->length void Set_Lvs(); private: int _verb; /// An iterative auxiliary function used in PrecomputeVal() void IterativeCompute(const UInt32 &left, const UInt32 &right); }; #endif kernlab/src/introsort.h0000644000176000001440000001560012651720731014731 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #ifndef TERNARY_INTRO_SORT_H #define TERNARY_INTRO_SORT_H //======================================================================// // Class: IntroSort // // // // Template based implementation of Introspective sorting algorithm // // using a ternary quicksort. // // // // Author: M.A. Maniscalco // // Date: January 20, 2005 // // // //======================================================================// // *** COMPILER WARNING DISABLED *** // Disable a warning which appears in MSVC // "conversion from '__w64 int' to ''" // Just plain annoying ... Restored at end of this file. #ifdef WIN32 #pragma warning (disable : 4244) #endif #define MIN_LENGTH_FOR_QUICKSORT 32 #define MAX_DEPTH_BEFORE_HEAPSORT 128 //===================================================================== // IntroSort class declaration // Notes: Any object used with this class must implement the following // the operators: <=, >=, == //===================================================================== template void IntroSort(T * array, unsigned int count); template void Partition(T * left, unsigned int count, unsigned int depth = 0); template T SelectPivot(T value1, T value2, T value3); template void Swap(T * valueA, T * valueB); template void InsertionSort(T * array, unsigned int count); template void HeapSort(T * array, int length); template void HeapSort(T * array, int k, int N); template inline void IntroSort(T * array, unsigned int count) { // Public method used to invoke the sort. // Call quick sort partition method if there are enough // elements to warrant it or insertion sort otherwise. if (count >= MIN_LENGTH_FOR_QUICKSORT) Partition(array, count); InsertionSort(array, count); } template inline void Swap(T * valueA, T * valueB) { // do the ol' "switch-a-me-do" on two values. T temp = *valueA; *valueA = *valueB; *valueB = temp; } template inline T SelectPivot(T value1, T value2, T value3) { // middle of three method. if (value1 < value2) return ((value2 < value3) ? value2 : (value1 < value3) ? value3 : value1); return ((value1 < value3) ? value1 : (value2 < value3) ? value3 : value2); } template inline void Partition(T * left, unsigned int count, unsigned int depth) { if (++depth > MAX_DEPTH_BEFORE_HEAPSORT) { // If enough recursion has happened then we bail to heap sort since it looks // as if we are experiencing a 'worst case' for quick sort. This should not // happen very often at all. HeapSort(left, count); return; } T * right = left + count - 1; T * startingLeft = left; T * startingRight = right; T * equalLeft = left; T * equalRight = right; // select the pivot value. T pivot = SelectPivot(left[0], right[0], left[((right - left) >> 1)]); // do three way partitioning. do { while ((left < right) && (*left <= pivot)) if (*(left++) == pivot) Swap(equalLeft++, left - 1); // equal to pivot value. move to far left. while ((left < right) && (*right >= pivot)) if (*(right--) == pivot) Swap(equalRight--, right + 1); // equal to pivot value. move to far right. if (left >= right) { if (left == right) { if (*left >= pivot) left--; if (*right <= pivot) right++; } else { left--; right++; } break; // done partitioning } // left and right are ready for swaping Swap(left++, right--); } while (true); // move values that were equal to pivot from the far left into the middle. // these values are now placed in their final sorted position. if (equalLeft > startingLeft) while (equalLeft > startingLeft) Swap(--equalLeft, left--); // move values that were equal to pivot from the far right into the middle. // these values are now placed in their final sorted position. if (equalRight < startingRight) while (equalRight < startingRight) Swap(++equalRight, right++); // Calculate new partition sizes ... unsigned int leftSize = left - startingLeft + 1; unsigned int rightSize = startingRight - right + 1; // Partition left (less than pivot) if there are enough values to warrant it // otherwise do insertion sort on the values. if (leftSize >= MIN_LENGTH_FOR_QUICKSORT) Partition(startingLeft, leftSize, depth); // Partition right (greater than pivot) if there are enough values to warrant it // otherwise do insertion sort on the values. if (rightSize >= MIN_LENGTH_FOR_QUICKSORT) Partition(right, rightSize, depth); } template inline void InsertionSort(T * array, unsigned int count) { // A basic insertion sort. if (count < 3) { if ((count == 2) && (array[0] > array[1])) Swap(array, array + 1); return; } T * ptr2, * ptr3 = array + 1, * ptr4 = array + count; if (array[0] > array[1]) Swap(array, array + 1); while (true) { while ((++ptr3 < ptr4) && (ptr3[0] >= ptr3[-1])); if (ptr3 >= ptr4) break; if (ptr3[-2] <= ptr3[0]) { if (ptr3[-1] > ptr3[0]) Swap(ptr3, ptr3 - 1); } else { ptr2 = ptr3 - 1; T v = *ptr3; while ((ptr2 >= array) && (ptr2[0] > v)) { ptr2[1] = ptr2[0]; ptr2--; } ptr2[1] = v; } } } template inline void HeapSort(T * array, int length) { // A basic heapsort. for (int k = length >> 1; k > 0; k--) HeapSort(array, k, length); do { Swap(array, array + (--length)); HeapSort(array, 1, length); } while (length > 1); } template inline void HeapSort(T * array, int k, int N) { // A basic heapsort. T temp = array[k - 1]; int n = N >> 1; int j = (k << 1); while (k <= n) { if ((j < N) && (array[j - 1] < array[j])) j++; if (temp >= array[j - 1]) break; else { array[k - 1] = array[j - 1]; k = j; j <<= 1; } } array[k - 1] = temp; } // Restore the default warning which appears in MSVC for // warning #4244 which was disabled at top of this file. #ifdef WIN32 #pragma warning (default : 4244) #endif #endif kernlab/src/Makevars0000644000176000001440000000006012651720731014203 0ustar ripleyusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) kernlab/src/dbreakpt.c0000644000176000001440000000417112651720731014456 0ustar ripleyusersextern double mymin(double, double); extern double mymax(double, double); void dbreakpt(int n, double *x, double *xl, double *xu, double *w, int *nbrpt, double *brptmin, double *brptmax) { /* c ********** c c Subroutine dbreakpt c c This subroutine computes the number of break-points, and c the minimal and maximal break-points of the projection of c x + alpha*w on the n-dimensional interval [xl,xu]. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is unchanged. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c w is a double precision array of dimension n. c On entry w specifies the vector w. c On exit w is unchanged. c c nbrpt is an integer variable. c On entry nbrpt need not be specified. c On exit nbrpt is the number of break points. c c brptmin is a double precision variable c On entry brptmin need not be specified. c On exit brptmin is minimal break-point. c c brptmax is a double precision variable c On entry brptmax need not be specified. c On exit brptmax is maximal break-point. c c ********** */ int i; double brpt; *nbrpt = 0; for (i=0;i 0) { (*nbrpt)++; brpt = (xu[i] - x[i])/w[i]; if (*nbrpt == 1) *brptmin = *brptmax = brpt; else { *brptmin = mymin(brpt, *brptmin); *brptmax = mymax(brpt, *brptmax); } } else if (x[i] > xl[i] && w[i] < 0) { (*nbrpt)++; brpt = (xl[i] - x[i])/w[i]; if (*nbrpt == 1) *brptmin = *brptmax = brpt; else { *brptmin = mymin(brpt, *brptmin); *brptmax = mymax(brpt, *brptmax); } } if (*nbrpt == 0) *brptmin = *brptmax = 0; } kernlab/src/kspectrumweight.h0000644000176000001440000000326212651720731016114 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/KSpectrumWeight.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef KSPECTRUMWEIGHT_H #define KSPECTRUMWEIGHT_H #include "datatype.h" #include "errorcode.h" #include "iweightfactory.h" #include //' K-spectrum weight class class KSpectrumWeight : public I_WeightFactory { Real k; public: /// Constructor KSpectrumWeight(const Real & k_=5.0):k(k_) {} /// Destructor virtual ~KSpectrumWeight(){} /// Compute weight ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight); }; #endif kernlab/src/dtron.c0000644000176000001440000001705712651720731014017 0ustar ripleyusers#include #include #include #include #include extern void *xmalloc(size_t); extern double mymin(double, double); extern double mymax(double, double); extern int ufv(int, double *, double *); extern int ugrad(int, double *, double *); extern int uhes(int, double *, double **); /* LEVEL 1 BLAS */ /*extern double dnrm2_(int *, double *, int *);*/ /*extern double ddot_(int *, double *, int *, double *, int *);*/ /* LEVEL 2 BLAS */ /*extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *);*/ /* MINPACK 2 */ extern double dgpnrm(int, double *, double *, double *, double *); extern void dcauchy(int, double *, double *, double *, double *, double *, double, double *, double *, double *); extern void dspcg(int, double *, double *, double *, double *, double *, double, double, double *, int *); void dtron(int n, double *x, double *xl, double *xu, double gtol, double frtol, double fatol, double fmin, int maxfev, double cgtol) { /* c ********* c c Subroutine dtron c c The optimization problem of BSVM is a bound-constrained quadratic c optimization problem and its Hessian matrix is positive semidefinite. c We modified the optimization solver TRON by Chih-Jen Lin and c Jorge More' into this version which is suitable for this c special case. c c This subroutine implements a trust region Newton method for the c solution of large bound-constrained quadratic optimization problems c c min { f(x)=0.5*x'*A*x + g0'*x : xl <= x <= xu } c c where the Hessian matrix A is dense and positive semidefinite. The c user must define functions which evaluate the function, gradient, c and the Hessian matrix. c c The user must choose an initial approximation x to the minimizer, c lower bounds, upper bounds, quadratic terms, linear terms, and c constants about termination criterion. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is the final minimizer. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c gtol is a double precision variable. c On entry gtol specifies the relative error of the projected c gradient. c On exit gtol is unchanged. c c frtol is a double precision variable. c On entry frtol specifies the relative error desired in the c function. Convergence occurs if the estimate of the c relative error between f(x) and f(xsol), where xsol c is a local minimizer, is less than frtol. c On exit frtol is unchanged. c c fatol is a double precision variable. c On entry fatol specifies the absolute error desired in the c function. Convergence occurs if the estimate of the c absolute error between f(x) and f(xsol), where xsol c is a local minimizer, is less than fatol. c On exit fatol is unchanged. c c fmin is a double precision variable. c On entry fmin specifies a lower bound for the function. c The subroutine exits with a warning if f < fmin. c On exit fmin is unchanged. c c maxfev is an integer variable. c On entry maxfev specifies the limit of function evaluations. c On exit maxfev is unchanged. c c cgtol is a double precision variable. c On entry gqttol specifies the convergence criteria for c subproblems. c On exit gqttol is unchanged. c c ********** */ /* Parameters for updating the iterates. */ double eta0 = 1e-4, eta1 = 0.25, eta2 = 0.75; /* Parameters for updating the trust region size delta. */ double sigma1 = 0.25, sigma2 = 0.5, sigma3 = 4; double p5 = 0.5, one = 1; double gnorm, gnorm0, delta, snorm; double alphac = 1, alpha, f, fc, prered, actred, gs; int search = 1, iter = 1, info, inc = 1; double *xc = (double *) xmalloc(sizeof(double)*n); double *s = (double *) xmalloc(sizeof(double)*n); double *wa = (double *) xmalloc(sizeof(double)*n); double *g = (double *) xmalloc(sizeof(double)*n); double *A = NULL; uhes(n, x, &A); ugrad(n, x, g); ufv(n, x, &f); gnorm0 = F77_CALL(dnrm2)(&n, g, &inc); delta = 1000*gnorm0; gnorm = dgpnrm(n, x, xl, xu, g); if (gnorm <= gtol*gnorm0) { /* //printf("CONVERGENCE: GTOL TEST SATISFIED\n"); */ search = 0; } while (search) { /* Save the best function value and the best x. */ fc = f; memcpy(xc, x, sizeof(double)*n); /* Compute the Cauchy step and store in s. */ dcauchy(n, x, xl, xu, A, g, delta, &alphac, s, wa); /* Compute the projected Newton step. */ dspcg(n, x, xl, xu, A, g, delta, cgtol, s, &info); if (ufv(n, x, &f) > maxfev) { /* //printf("ERROR: NFEV > MAXFEV\n"); */ search = 0; continue; } /* Compute the predicted reduction. */ memcpy(wa, g, sizeof(double)*n); F77_CALL(dsymv)("U", &n, &p5, A, &n, s, &inc, &one, wa, &inc); prered = -F77_CALL(ddot)(&n, s, &inc, wa, &inc); /* Compute the actual reduction. */ actred = fc - f; /* On the first iteration, adjust the initial step bound. */ snorm = F77_CALL(dnrm2)(&n, s, &inc); if (iter == 1) delta = mymin(delta, snorm); /* Compute prediction alpha*snorm of the step. */ gs = F77_CALL(ddot)(&n, g, &inc, s, &inc); if (f - fc - gs <= 0) alpha = sigma3; else alpha = mymax(sigma1, -0.5*(gs/(f - fc - gs))); /* Update the trust region bound according to the ratio of actual to predicted reduction. */ if (actred < eta0*prered) /* Reduce delta. Step is not successful. */ delta = mymin(mymax(alpha, sigma1)*snorm, sigma2*delta); else { if (actred < eta1*prered) /* Reduce delta. Step is not sufficiently successful. */ delta = mymax(sigma1*delta, mymin(alpha*snorm, sigma2*delta)); else if (actred < eta2*prered) /* The ratio of actual to predicted reduction is in the interval (eta1,eta2). We are allowed to either increase or decrease delta. */ delta = mymax(sigma1*delta, mymin(alpha*snorm, sigma3*delta)); else /* The ratio of actual to predicted reduction exceeds eta2. Do not decrease delta. */ delta = mymax(delta, mymin(alpha*snorm, sigma3*delta)); } /* Update the iterate. */ if (actred > eta0*prered) { /* Successful iterate. */ iter++; /* uhes(n, x, &A); */ ugrad(n, x, g); gnorm = dgpnrm(n, x, xl, xu, g); if (gnorm <= gtol*gnorm0) { /* //printf("CONVERGENCE: GTOL = %g TEST SATISFIED\n", gnorm/gnorm0); */ search = 0; continue; } } else { /* Unsuccessful iterate. */ memcpy(x, xc, sizeof(double)*n); f = fc; } /* Test for convergence */ if (f < fmin) { //printf("WARNING: F .LT. FMIN\n"); search = 0; /* warning */ continue; } if (fabs(actred) <= fatol && prered <= fatol) { //printf("CONVERGENCE: FATOL TEST SATISFIED\n"); search = 0; continue; } if (fabs(actred) <= frtol*fabs(f) && prered <= frtol*fabs(f)) { /* //printf("CONVERGENCE: FRTOL TEST SATISFIED\n"); */ search = 0; continue; } } free(g); free(xc); free(s); free(wa); } kernlab/src/wkasailcp.cpp0000644000176000001440000000452112651720731015177 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/W_kasai_lcp.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef W_KASAI_LCP_CPP #define W_KASAI_LCP_CPP #include "wkasailcp.h" #include /** * Compute LCP array. Algorithm adapted from Manzini's SWAT2004 paper. * Modification: array indexing changed from 1-based to 0-based. * * \param text - (IN) The text which corresponds to SA. * \param len - (IN) Length of text. * \param sa - (IN) Suffix array. * \param lcp - (OUT) Computed LCP array. */ ErrorCode W_kasai_lcp::ComputeLCP(const SYMBOL *text, const UInt32 &len, const UInt32 *sa, LCP& lcp) { //chteo: [111006:0141] //std::vector isa(len); UInt32 *isa = new UInt32[len]; //' Step 1: Compute inverse suffix array for(UInt32 i=0; i0) h--; } //chteo: [111006:0141] delete [] isa; isa = 0; return NOERROR; } #endif kernlab/src/dgpnrm.c0000644000176000001440000000217212651720731014150 0ustar ripleyusers#include double dgpnrm(int n, double *x, double *xl, double *xu, double *g) { /* c ********** c c Function dgpnrm c c This function computes the infinite norm of the c projected gradient at x. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is unchanged. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c g is a double precision array of dimension n. c On entry g specifies the gradient g. c On exit g is unchanged. c c ********** */ int i; double norm = 0; for (i=0;i= 0 && x[i] == xl[i]))) if (fabs(g[i]) > norm) norm = fabs(g[i]); return norm; } kernlab/src/dtrpcg.c0000644000176000001440000001515112651720731014145 0ustar ripleyusers#include #include #include #include extern void *xmalloc(size_t); /* LEVEL 1 BLAS */ /* extern int daxpy_(int *, double *, double *, int *, double *, int *); */ /* extern double ddot_(int *, double *, int *, double *, int *); */ /* extern double dnrm2_(int *, double *, int *); */ /* extern int dscal_(int *, double *, double *, int *); */ /* LEVEL 2 BLAS */ /* extern int dtrsv_(char *, char *, char *, int *, double *, int *, double *, int *); */ /* extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); */ /* MINPACK 2 */ extern void dtrqsol(int, double *, double *, double , double *); void dtrpcg(int n, double *A, double *g, double delta, double *L, double tol, double stol, double *w, int *iters, int *info) { /* c ********* c c Subroutine dtrpcg c c Given a dense symmetric positive semidefinite matrix A, this c subroutine uses a preconditioned conjugate gradient method to find c an approximate minimizer of the trust region subproblem c c min { q(s) : || L'*s || <= delta }. c c where q is the quadratic c c q(s) = 0.5*s'*A*s + g'*s, c c This subroutine generates the conjugate gradient iterates for c the equivalent problem c c min { Q(w) : || w || <= delta }. c c where Q is the quadratic defined by c c Q(w) = q(s), w = L'*s. c c Termination occurs if the conjugate gradient iterates leave c the trust region, a negative curvature direction is generated, c or one of the following two convergence tests is satisfied. c c Convergence in the original variables: c c || grad q(s) || <= tol c c Convergence in the scaled variables: c c || grad Q(w) || <= stol c c Note that if w = L'*s, then L*grad Q(w) = grad q(s). c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c A is a double precision array of dimension n*n. c On entry A specifies the matrix A. c On exit A is unchanged. c c g is a double precision array of dimension n. c On entry g must contain the vector g. c On exit g is unchanged. c c delta is a double precision variable. c On entry delta is the trust region size. c On exit delta is unchanged. c c L is a double precision array of dimension n*n. c On entry L need not to be specified. c On exit the lower triangular part of L contains the matrix L. c c tol is a double precision variable. c On entry tol specifies the convergence test c in the un-scaled variables. c On exit tol is unchanged c c stol is a double precision variable. c On entry stol specifies the convergence test c in the scaled variables. c On exit stol is unchanged c c w is a double precision array of dimension n. c On entry w need not be specified. c On exit w contains the final conjugate gradient iterate. c c iters is an integer variable. c On entry iters need not be specified. c On exit iters is set to the number of conjugate c gradient iterations. c c info is an integer variable. c On entry info need not be specified. c On exit info is set as follows: c c info = 1 Convergence in the original variables. c || grad q(s) || <= tol c c info = 2 Convergence in the scaled variables. c || grad Q(w) || <= stol c c info = 3 Negative curvature direction generated. c In this case || w || = delta and a direction c c of negative curvature w can be recovered by c solving L'*w = p. c c info = 4 Conjugate gradient iterates exit the c trust region. In this case || w || = delta. c c info = 5 Failure to converge within itermax(n) iterations. c c ********** */ int i, inc = 1; double one = 1, zero = 0, alpha, malpha, beta, ptq, rho; double *p, *q, *t, *r, *z, sigma, rtr, rnorm, rnorm0, tnorm; p = (double *) xmalloc(sizeof(double)*n); q = (double *) xmalloc(sizeof(double)*n); t = (double *) xmalloc(sizeof(double)*n); r = (double *) xmalloc(sizeof(double)*n); z = (double *) xmalloc(sizeof(double)*n); /* Initialize the iterate w and the residual r. Initialize the residual t of grad q to -g. Initialize the residual r of grad Q by solving L*r = -g. Note that t = L*r. */ for (i=0;i 0) alpha = rho/ptq; else alpha = 0; dtrqsol(n, w, p, delta, &sigma); /* Exit if there is negative curvature or if the iterates exit the trust region. */ if (ptq <= 0 || alpha >= sigma) { F77_CALL(daxpy)(&n, &sigma, p, &inc, w, &inc); if (ptq <= 0) *info = 3; else *info = 4; goto return0; } /* Update w and the residuals r and t. Note that t = L*r. */ malpha = -alpha; F77_CALL(daxpy)(&n, &alpha, p, &inc, w, &inc); F77_CALL(daxpy)(&n, &malpha, q, &inc, r, &inc); F77_CALL(daxpy)(&n, &malpha, z, &inc, t,&inc); /* Exit if the residual convergence test is satisfied. */ rtr = F77_CALL(ddot)(&n, r, &inc, r, &inc); rnorm = sqrt(rtr); tnorm = sqrt(F77_CALL(ddot)(&n, t, &inc, t, &inc)); if (tnorm <= tol) { *info = 1; goto return0; } if (rnorm <= stol) { *info = 2; goto return0; } /* Compute p = r + beta*p and update rho. */ beta = rtr/rho; F77_CALL(dscal)(&n, &beta, p, &inc); F77_CALL(daxpy)(&n, &one, r, &inc, p, &inc); rho = rtr; } /* iters > itermax = n */ *info = 5; return0: free(p); free(q); free(r); free(t); free(z); } kernlab/src/inductionsort.h0000644000176000001440000000554312651720731015577 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #ifndef MSUFSORT_INDUCTION_SORTING_H #define MSUFSORT_INDUCTION_SORTING_H #include "introsort.h" class InductionSortObject { public: InductionSortObject(unsigned int inductionPosition = 0, unsigned int inductionValue = 0, unsigned int suffixIndex = 0); bool operator <= (InductionSortObject & object); bool operator == (InductionSortObject & object); InductionSortObject& operator = (InductionSortObject & object); bool operator >= (InductionSortObject & object); bool operator > (InductionSortObject & object); bool operator < (InductionSortObject & object); unsigned int m_sortValue[2]; }; inline bool InductionSortObject::operator <= (InductionSortObject & object) { if (m_sortValue[0] < object.m_sortValue[0]) return true; else if (m_sortValue[0] == object.m_sortValue[0]) return (m_sortValue[1] <= object.m_sortValue[1]); return false; } inline bool InductionSortObject::operator == (InductionSortObject & object) { return ((m_sortValue[0] == object.m_sortValue[0]) && (m_sortValue[1] == object.m_sortValue[1])); } inline bool InductionSortObject::operator >= (InductionSortObject & object) { if (m_sortValue[0] > object.m_sortValue[0]) return true; else if (m_sortValue[0] == object.m_sortValue[0]) return (m_sortValue[1] >= object.m_sortValue[1]); return false; } inline InductionSortObject & InductionSortObject::operator = (InductionSortObject & object) { m_sortValue[0] = object.m_sortValue[0]; m_sortValue[1] = object.m_sortValue[1]; return *this; } inline bool InductionSortObject::operator > (InductionSortObject & object) { if (m_sortValue[0] > object.m_sortValue[0]) return true; else if (m_sortValue[0] == object.m_sortValue[0]) return (m_sortValue[1] > object.m_sortValue[1]); return false; } inline bool InductionSortObject::operator < (InductionSortObject & object) { if (m_sortValue[0] < object.m_sortValue[0]) return true; else if (m_sortValue[0] == object.m_sortValue[0]) return (m_sortValue[1] < object.m_sortValue[1]); return false; } #endif kernlab/src/ctable.cpp0000644000176000001440000000661712651720731014463 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ChildTable.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef CTABLE_CPP #define CTABLE_CPP #include "ctable.h" #include /** * Return the value of idx-th "up" field of child table. * val = childtab[idx -1]; * * \param idx - (IN) The index of child table. * \param val - (OUT) The value of idx-th entry in child table's "up" field. */ ErrorCode ChildTable::up(const UInt32 &idx, UInt32 &val){ if(idx == size()) { // Special case: To get the first 0-index val = (*this)[idx-1]; return NOERROR; } // svnvish: BUGBUG // Do we need to this in production code? UInt32 lcp_idx = 0, lcp_prev_idx = 0; lcp_idx = _lcptab[idx]; lcp_prev_idx = _lcptab[idx-1]; assert(lcp_prev_idx > lcp_idx); val = (*this)[idx-1]; return NOERROR; } /** * Return the value of idx-th "down" field of child table. Deprecated. * Instead use val = childtab[idx]; * * \param idx - (IN) The index of child table. * \param val - (OUT) The value of idx-th entry in child table's "down" field. */ ErrorCode ChildTable::down(const UInt32 &idx, UInt32 &val){ // For a l-interval, l-[i..j], childtab[i].down == childtab[j+1].up // If l-[i..j] is last child-interval of its parent OR 0-[0..n], // childtab[i].nextlIndex == childtab[i].down // svnvish: BUGBUG // Do we need to this in production code? // UInt32 lcp_idx = 0, lcp_nextidx = 0; // lcp_nextidx = _lcptab[(*this)[idx]]; // lcp_idx = _lcptab[idx]; // assert(lcp_nextidx > lcp_idx); // childtab[i].down := childtab[i].nextlIndex val = (*this)[idx]; return NOERROR; } /** * Return the first l-index of a given l-[i..j] interval. * * \param i - (IN) Left bound of l-[i..j] * \param j - (IN) Right bound of l-[i..j] * \param idx - (OUT) The first l-index. */ ErrorCode ChildTable::l_idx(const UInt32 &i, const UInt32 &j, UInt32 &idx){ UInt32 up = (*this)[j]; if(i < up && up <= j){ idx = up; }else { idx = (*this)[i]; } return NOERROR; } /** * Dump array elements to output stream * * \param os - (IN) Output stream. * \param ct - (IN) ChildTable object. */ std::ostream& operator << (std::ostream& os, const ChildTable& ct){ for( UInt32 i = 0; i < ct.size(); i++ ){ os << "ct[ " << i << "]: " << ct[i] << std::endl; } return os; } #endif kernlab/src/misc.c0000644000176000001440000000055312651720731013615 0ustar ripleyusers#include #include void *xmalloc(size_t size) { void *ptr = (void *) malloc(size); return ptr; } double mymax(double a, double b) { if (a > b) return a; return b; } double mymin(double a, double b) { if (a < b) return a; return b; } double sign(double a, double b) { if (b >= 0) return fabs(a); return -fabs(a); } kernlab/src/expdecayweight.h0000644000176000001440000000342012651720731015675 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ExpDecayWeight.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef EXPDECAYWEIGHT_H #define EXPDECAYWEIGHT_H #include "datatype.h" #include "errorcode.h" #include "iweightfactory.h" #include class ExpDecayWeight : public I_WeightFactory { public: Real lambda; /// Constructors //' NOTE: lambda shouldn't be equal to 1, othexrwise there will be //' divide-by-zero error. ExpDecayWeight(const Real &lambda_=2.0):lambda(lambda_) {} /// Destructor virtual ~ExpDecayWeight(){} /// Compute weight ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight); }; #endif kernlab/src/dcauchy.c0000644000176000001440000001147612651720731014310 0ustar ripleyusers#include #include extern void *xmalloc(size_t); /* LEVEL 1 BLAS */ /* extern double ddot_(int *, double *, int *, double *, int *); extern double dnrm2_(int *, double *, int *); */ /* LEVEL 2 BLAS */ /* extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); */ /* MINPACK 2 */ extern void dbreakpt(int, double *, double *, double *, double *, int *, double *, double *); extern void dgpstep(int, double *, double *, double *, double, double *, double *); void dcauchy(int n, double *x, double *xl, double *xu, double *A, double *g, double delta, double *alpha, double *s) { /* c ********** c c Subroutine dcauchy c c This subroutine computes a Cauchy step that satisfies a trust c region constraint and a sufficient decrease condition. c c The Cauchy step is computed for the quadratic c c q(s) = 0.5*s'*A*s + g'*s, c c where A is a symmetric matrix , and g is a vector. Given a c parameter alpha, the Cauchy step is c c s[alpha] = P[x - alpha*g] - x, c c with P the projection onto the n-dimensional interval [xl,xu]. c The Cauchy step satisfies the trust region constraint and the c sufficient decrease condition c c || s || <= delta, q(s) <= mu_0*(g'*s), c c where mu_0 is a constant in (0,1). c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is unchanged. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c A is a double precision array of dimension n*n. c On entry A specifies the matrix A. c On exit A is unchanged. c c g is a double precision array of dimension n. c On entry g specifies the gradient g. c On exit g is unchanged. c c delta is a double precision variable. c On entry delta is the trust region size. c On exit delta is unchanged. c c alpha is a double precision variable. c On entry alpha is the current estimate of the step. c On exit alpha defines the Cauchy step s[alpha]. c c s is a double precision array of dimension n. c On entry s need not be specified. c On exit s is the Cauchy step s[alpha]. c c ********** */ double one = 1, zero = 0; /* Constant that defines sufficient decrease. Interpolation and extrapolation factors. */ double mu0 = 0.01, interpf = 0.1, extrapf = 10; int search, interp, nbrpt, nsteps = 1, i, inc = 1; double alphas, brptmax, brptmin, gts, q; double *wa = (double *) xmalloc(sizeof(double)*n); /* Find the minimal and maximal break-point on x - alpha*g. */ for (i=0;i delta) interp = 1; else { F77_CALL(dsymv)("U", &n, &one, A, &n, s, &inc, &zero, wa, &inc); gts = F77_CALL(ddot)(&n, g, &inc, s, &inc); q = 0.5*F77_CALL(ddot)(&n, s, &inc, wa, &inc) + gts; interp = q >= mu0*gts ? 1 : 0; } /* Either interpolate or extrapolate to find a successful step. */ if (interp) { /* Reduce alpha until a successful step is found. */ search = 1; while (search) { /* This is a crude interpolation procedure that will be replaced in future versions of the code. */ nsteps++; (*alpha) *= interpf; dgpstep(n, x, xl, xu, -(*alpha), g, s); if (F77_CALL(dnrm2)(&n, s, &inc) <= delta) { F77_CALL(dsymv)("U", &n, &one, A, &n, s, &inc, &zero, wa, &inc); gts = F77_CALL(ddot)(&n, g, &inc, s, &inc); q = 0.5 * F77_CALL(ddot)(&n, s, &inc, wa, &inc) + gts; search = q > mu0*gts ? 1 : 0; } } } else { search = 1; alphas = *alpha; /* Increase alpha until a successful step is found. */ while (search && (*alpha) <= brptmax) { /* This is a crude extrapolation procedure that will be replaced in future versions of the code. */ nsteps++; alphas = *alpha; (*alpha) *= extrapf; dgpstep(n, x, xl, xu, -(*alpha), g, s); if (F77_CALL(dnrm2)(&n, s, &inc) <= delta) { F77_CALL(dsymv)("U", &n, &one, A, &n, s, &inc, &zero, wa, &inc); gts = F77_CALL(ddot)(&n, g, &inc, s, &inc); q = 0.5 * F77_CALL(ddot)(&n, s, &inc, wa, &inc) + gts; search = q < mu0*gts ? 1 : 0; } else search = 0; } *alpha = alphas; dgpstep(n, x, xl, xu, -(*alpha), g, s); } free(wa); } kernlab/src/wmsufsort.cpp0000644000176000001440000000442512651720731015275 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/W_msufsort.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 //' Wrapper for Michael Maniscalco's MSufSort version 2.2 algorithm #ifndef W_MSUFSORT_CPP #define W_MSUFSORT_CPP #include #include #include #include "wmsufsort.h" W_msufsort::W_msufsort() { msuffixsorter = new MSufSort(); } W_msufsort::~W_msufsort() { delete msuffixsorter; } /** * Construct Suffix Array using Michael Maniscalco's algorithm * * \param _text - (IN) The text which resultant SA corresponds to. * \param _len - (IN) The length of the text. * \param _sa - (OUT) Suffix array instance. */ ErrorCode W_msufsort::ConstructSA(SYMBOL *text, const UInt32 &len, UInt32 *&array){ //' A temporary copy of text SYMBOL *text_copy = new SYMBOL[len]; //' chteo: BUGBUG //' redundant? assert(text_copy != NULL); memcpy(text_copy, text, sizeof(SYMBOL) * len); msuffixsorter->Sort(text_copy, len); //' Code adapted from MSufSort::verifySort() for (UInt32 i = 0; i < len; i++) { UInt32 tmp = msuffixsorter->ISA(i)-1; array[tmp] = i; } //' Deallocate the memory allocated for #text_copy# delete [] text_copy; return NOERROR; } #endif kernlab/src/dspcg.c0000644000176000001440000001617212651720731013766 0ustar ripleyusers#include #include extern void *xmalloc(size_t); extern double mymin(double, double); extern double mymax(double, double); /* LEVEL 1 BLAS */ /*extern double dnrm2_(int *, double *, int *);*/ /* LEVEL 2 BLAS */ /*extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *);*/ /*extern void dtrsv_(char *, char *, char *, int *, double *, int *, double *, int *);*/ /* MINPACK 2 */ extern void dprsrch(int, double *, double *, double *, double *, double *, double *); extern double dprecond(int, double *, double *); extern void dtrpcg(int, double*, double *, double, double *, double, double, double *, int *, int *); void dspcg(int n, double *x, double *xl, double *xu, double *A, double *g, double delta, double rtol, double *s, int *info) { /* c ********* c c Subroutine dspcg c c This subroutine generates a sequence of approximate minimizers c for the subproblem c c min { q(x) : xl <= x <= xu }. c c The quadratic is defined by c c q(x[0]+s) = 0.5*s'*A*s + g'*s, c c where x[0] is a base point provided by the user, A is a symmetric c positive semidefinite dense matrix, and g is a vector. c c At each stage we have an approximate minimizer x[k], and generate c a direction p[k] by solving the subproblem c c min { q(x[k]+p) : || p || <= delta, s(fixed) = 0 }, c c where fixed is the set of variables fixed at x[k], delta is the c trust region bound. c c B = A(free:free), c c where free is the set of free variables at x[k]. Given p[k], c the next minimizer x[k+1] is generated by a projected search. c c The starting point for this subroutine is x[1] = x[0] + s, where c x[0] is a base point and s is the Cauchy step. c c The subroutine converges when the step s satisfies c c || (g + A*s)[free] || <= rtol*|| g[free] || c c In this case the final x is an approximate minimizer in the c face defined by the free variables. c c The subroutine terminates when the trust region bound does c not allow further progress, that is, || L'*p[k] || = delta. c In this case the final x satisfies q(x) < q(x[k]). c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is the final minimizer. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c A is a double precision array of dimension n*n. c On entry A specifies the matrix A. c On exit A is unchanged. c c g is a double precision array of dimension n. c On entry g must contain the vector g. c On exit g is unchanged. c c delta is a double precision variable. c On entry delta is the trust region size. c On exit delta is unchanged. c c rtol is a double precision variable. c On entry rtol specifies the accuracy of the final minimizer. c On exit rtol is unchanged. c c s is a double precision array of dimension n. c On entry s is the Cauchy step. c On exit s contain the final step. c c info is an integer variable. c On entry info need not be specified. c On exit info is set as follows: c c info = 1 Convergence. The final step s satisfies c || (g + A*s)[free] || <= rtol*|| g[free] ||, c and the final x is an approximate minimizer c in the face defined by the free variables. c c info = 2 Termination. The trust region bound does c not allow further progress. */ int i, j, nfaces, nfree, inc = 1, infotr, iters = 0, itertr; double gfnorm, gfnormf, stol = 1e-16, alpha; double one = 1, zero = 0; double *B = (double *) xmalloc(sizeof(double)*n*n); double *L = (double *) xmalloc(sizeof(double)*n*n); double *w = (double *) xmalloc(sizeof(double)*n); double *wa = (double *) xmalloc(sizeof(double)*n); double *wxl = (double *) xmalloc(sizeof(double)*n); double *wxu = (double *) xmalloc(sizeof(double)*n); int *indfree = (int *) xmalloc(sizeof(int)*n); double *gfree = (double *) xmalloc(sizeof(double)*n); /* Compute A*(x[1] - x[0]) and store in w. */ F77_CALL(dsymv)("U", &n, &one, A, &n, s, &inc, &zero, w, &inc); /* Compute the Cauchy point. */ for (j=0;j * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/DataType.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef DATATYPE_H #define DATATYPE_H // #define UInt32 unsigned int // #define UInt64 unsigned long long // #define Byte1 unsigned char // #define Byte2 unsigned short // #define Real double typedef unsigned int UInt32; // Seems that even using __extension__ g++ 4.6 will complain that // ISO C++ 1998 does not support 'long long' ... /* #if defined __GNUC__ && __GNUC__ >= 2 __extension__ typedef unsigned long long UInt64; #else typedef unsigned long long UInt64; #endif */ #include typedef uint64_t UInt64; typedef unsigned char Byte1; typedef unsigned short Byte2; typedef double Real; // #define SENTINEL '\n' // #define SENTINEL2 '\0' const char SENTINEL = '\n'; const char SENTINEL2 = '\0'; #ifndef UNICODE // # define SYMBOL Byte1 typedef Byte1 SYMBOL; #else // # define SYMBOL Byte2 typedef Byte2 SYMBOL; #endif #endif kernlab/src/lcp.h0000644000176000001440000000452512651720731013450 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/LCP.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef LCP_H #define LCP_H #include "datatype.h" #include "errorcode.h" #include #include #include #include #include /** * LCP array class */ class LCP { private: /// Compacted array /* std::vector _p_array; */ /* std::vector _idx_array; */ /* std::vector _val_array; */ Byte1 *_p_array; UInt32 *_idx_array; UInt32 *_val_array; UInt32 _size; bool _is_compact; UInt32 *_beg; UInt32 *_end; UInt32 *_cache; /* typedef std::vector::const_iterator const_itr; */ /* const_itr _beg; */ /* const_itr _end; */ /* const_itr _cache; */ UInt32 _dist; public: /// Original array - 4bytes //std::vector array; UInt32 *array; /// Constructors LCP(const UInt32 &size); /// Destructors virtual ~LCP(); /// Methods /// Compact 4n bytes array into (1n+8p) bytes arrays ErrorCode compact(void); /// Retrieve lcp array value // ErrorCode lcp(const UInt32 &idx, UInt32 &value); UInt32 operator[] (const UInt32& idx); friend std::ostream& operator << (std::ostream& os, LCP& lcp); }; #endif kernlab/src/stringk.c0000644000176000001440000001100612651720731014336 0ustar ripleyusers#include #include #include #include #include #include #include #include #include #include double ***cache ; double kaux (const char *u, int p, const char *v, int q, int n, double lambda) { register int j; double tmp; /* case 1: if a full substring length is processed, return*/ if (n == 0) return (1.0); /* check, if the value was already computed in a previous computation */ if (cache [n] [p] [q] != -1.0) return (cache [n] [p] [q]); /* case 2: at least one substring is to short */ if (p < n || q < n) return (0.0); /* case 3: recursion */ for (j= 0, tmp = 0; j < q; j++) { if (v [j] == u [p - 1]) tmp += kaux (u, p - 1, v, j, n - 1, lambda) * pow (lambda, (float) (q - j + 1)); } cache [n] [p] [q] = lambda * kaux (u, p - 1, v, q, n, lambda) + tmp; return (cache [n] [p] [q]); } double seqk (const char *u, int p, const char *v, int q, int n, double lambda) { register int j; double kp; /* the simple case: (at least) one string is to short */ if (p < n || q < n) return (0.0); /* the recursion: use kaux for the t'th substrings*/ for (j = 0, kp = 0.0; j < q; j++) { if (v [j] == u [p - 1]) kp += kaux (u, p - 1, v, j, n - 1, lambda) * lambda * lambda; } return (seqk (u, p - 1, v, q, n, lambda) + kp); } /* recursively computes the subsequence kernel between s1 and s2 where subsequences of exactly length n are considered */ SEXP subsequencek(SEXP s1, SEXP s2, SEXP l1, SEXP l2, SEXP nr, SEXP lambdar) { const char *u = CHAR(STRING_ELT(s1, 0)); const char *v = CHAR(STRING_ELT(s2, 0)); int p = *INTEGER(l1); int q = *INTEGER(l2); int n = *INTEGER(nr); double lambda = *REAL(lambdar); int i, j, k; SEXP ret; /* allocate memory for auxiallary cache variable */ cache = (double ***) malloc (n * sizeof (double **)); for (i = 1; i < n; i++) { cache [i] = (double **) malloc (p * sizeof (double *)); for (j = 0; j < p; j++) { cache [i] [j] = (double *) malloc (q * sizeof (double)); for (k = 0; k < q; k++) cache [i] [j] [k] = -1.0; } } PROTECT(ret = allocVector(REALSXP, 1)); /* invoke recursion */ REAL(ret)[0] = seqk (u, p, v, q, n, lambda); /* free memory */ for (i = 1; i < n; i++) { for (j = 0; j < p; j++) free (cache [i] [j]); free (cache [i]); } free (cache); UNPROTECT(1); return (ret); } /* computes the substring kernel between s1 and s2 where substrings up to length n are considered */ SEXP fullsubstringk (SEXP s1, SEXP s2, SEXP l1, SEXP l2, SEXP nr, SEXP lambdar) { const char *u = CHAR(STRING_ELT(s1, 0)); const char *v = CHAR(STRING_ELT(s2, 0)); int p = *INTEGER(l1); int q = *INTEGER(l2); int n = *INTEGER(nr); double lambda = *REAL(lambdar); register int i, j, k; double ret, tmp; SEXP retk; /* computes the substring kernel */ for (ret = 0.0, i = 0; i < p; i++) { for (j = 0; j < q; j++) if (u [i] == v [j]) { for (k = 0, tmp = lambda * lambda; /* starting condition */ (i + k < p) && (j + k < q) && (u [i + k] == v [j + k]) && (k < n); /* stop conditions */ k++, tmp *= (lambda * lambda)) /* update per iteration */ ret += tmp; } } PROTECT(retk = allocVector(REALSXP, 1)); REAL(retk)[0] = ret; UNPROTECT(1); return (retk); } /* computes the substring kernel between s1 and s2 where substrings of exactly length n are considered */ SEXP substringk (SEXP s1, SEXP s2, SEXP l1, SEXP l2, SEXP nr, SEXP lambdar) { const char *u = CHAR(STRING_ELT(s1, 0)); const char *v = CHAR(STRING_ELT(s2, 0)); int p = *INTEGER(l1); int q = *INTEGER(l2); int n = *INTEGER(nr); double lambda = *REAL(lambdar); SEXP retk; register int i, j, k; double ret, tmp; /* computes the substring kernel */ for (ret = 0.0, i = 0; i < p; i++) { for (j = 0; j < q; j++) { for (k = 0, tmp = lambda * lambda; /* starting condition */ (i + k < p) && (j + k < q) && (u [i + k] == v [j + k]) && (k < n); /* stop conditions */ k++, tmp *= (lambda * lambda)); /* update per iteration */ if (k == n) ret += tmp; /* update features in case of full match */ } } PROTECT(retk = allocVector(REALSXP, 1)); REAL(retk)[0] = ret; UNPROTECT(1); return (retk); } kernlab/src/dtrqsol.c0000644000176000001440000000333612651720731014354 0ustar ripleyusers#include #include extern double mymax(double, double); /* LEVEL 1 BLAS */ /*extern double ddot_(int *, double *, int *, double *, int *);*/ void dtrqsol(int n, double *x, double *p, double delta, double *sigma) { /* c ********** c c Subroutine dtrqsol c c This subroutine computes the largest (non-negative) solution c of the quadratic trust region equation c c ||x + sigma*p|| = delta. c c The code is only guaranteed to produce a non-negative solution c if ||x|| <= delta, and p != 0. If the trust region equation has c no solution, sigma = 0. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x must contain the vector x. c On exit x is unchanged. c c p is a double precision array of dimension n. c On entry p must contain the vector p. c On exit p is unchanged. c c delta is a double precision variable. c On entry delta specifies the scalar delta. c On exit delta is unchanged. c c sigma is a double precision variable. c On entry sigma need not be specified. c On exit sigma contains the non-negative solution. c c ********** */ int inc = 1; double dsq = delta*delta, ptp, ptx, rad, xtx; ptx = F77_CALL(ddot)(&n, p, &inc, x, &inc); ptp = F77_CALL(ddot)(&n, p, &inc, p, &inc); xtx = F77_CALL(ddot)(&n, x, &inc, x, &inc); /* Guard against abnormal cases. */ rad = ptx*ptx + ptp*(dsq - xtx); rad = sqrt(mymax(rad, 0)); if (ptx > 0) *sigma = (dsq - xtx)/(ptx + rad); else if (rad > 0) *sigma = (rad - ptx)/ptp; else *sigma = 0; } kernlab/src/esa.cpp0000644000176000001440000007435512651720731014005 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ESA.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef ESA_CPP #define ESA_CPP #include #include #include #include #include #include #include #include #include "esa.h" #ifdef SSARRAY #ifdef __cplusplus extern "C" { #endif #include "sarray.h" #ifdef __cplusplus } #endif #else #include "wmsufsort.h" #endif #include "wkasailcp.h" #define MIN(x,y) (((x) < (y)) ? (x):(y)) ESA::ESA(const UInt32 & size_, SYMBOL *text_, int verb): _verb(verb), size(size_), text(text_), suftab(0), lcptab(size_), childtab(size_, lcptab), suflink(0), bcktab_depth(0), bcktab_size(0), bcktab_val(0), bcktab_key4(0), coef4(0), bcktab_key8(0), coef8(0) { I_SAFactory* sa_fac = 0; I_LCPFactory* lcp_fac = 0; //' input validation assert(size > 0); // if(text[size-1] != SENTINEL) // text = (SYMBOL*)(std::string(text)+SENTINEL).c_str(); assert(text[size-1] == SENTINEL); // CW Sanity test for (int i = 0; i < size-1 ; i++) { assert(text[i] != 0); } // for (int i = 0; i < size ; i++) { // printf("%c : %i\n", text[i], (int) text[i]); // } #if SSARRAY suftab = new int[size]; for (int i = 0; i < size - 1 ; i++) { suftab[i] = text[i]; } suftab[size-1] = 0; ssarray((int*) suftab); #else //' Construct Suffix Array if(!sa_fac){ sa_fac = new W_msufsort(); } // CW Try // size = 10; // text[size-1] = 0; suftab = new UInt32[size]; sa_fac->ConstructSA(text, size, suftab); if(sa_fac) { delete sa_fac; sa_fac = NULL; } #endif //' Compute LCP array if(!lcp_fac){ lcp_fac = new W_kasai_lcp(); } // CW lcp_fac->ComputeLCP(text, size, suftab, lcptab); lcp_fac->ComputeLCP(text, size, (UInt32 *) suftab, lcptab); if(lcp_fac) { delete lcp_fac; lcp_fac = NULL; } //' Compress LCP array lcptab.compact(); //' Construct Child Table ConstructChildTable(); #ifdef SLINK //' Construct Suffix link table //' The suffix link interval, (l-1)-[p..q] of interval l-[i..j] can be retrieved //' by following method: //' Let k be the firstlIndex of l-[i..j], p = suflink[2*k], q = suflink[2*k+1]. suflink = new UInt32[2 * size + 2]; //' extra space for extra sentinel char! memset(suflink,0,sizeof(UInt32)*(2 * size +2)); ConstructSuflink(); #else //' Threshold for constructing bucket table if(size >= 1024) ConstructBcktab(); //' Otherwise, just do plain binary search to search for suffix link interval #endif } ESA::~ESA() { //if(text) { delete text; text = 0;} if(suflink) { delete [] suflink; suflink=0; } if(suftab) { delete [] suftab; suftab=0; } if(bcktab_val) { delete [] bcktab_val; bcktab_val=0; } if(bcktab_key4) { delete [] bcktab_key4; bcktab_key4=0;} if(coef4) { delete [] coef4; coef4 = 0; } if(bcktab_key8) { delete [] bcktab_key8; bcktab_key8=0;} if(coef8) { delete [] coef8; coef8 = 0; } } /// The lcp-interval structure. Used in ESA::ConstructChildTable() class lcp_interval { public: UInt32 lcp; UInt32 lb; UInt32 rb; std::vector child; /// Constructors lcp_interval(){} lcp_interval(const UInt32 &lcp_, const UInt32 lb_, const UInt32 &rb_, lcp_interval *itv) { lcp = lcp_; lb = lb_; rb = rb_; if(itv) child.push_back(itv); } /// Destructor ~lcp_interval(){ for(UInt32 i=0; i< child.size(); i++) delete child[i]; child.clear(); } }; /** * Construct 3-fields-merged child table. */ ErrorCode ESA::ConstructChildTable(){ // Input validation assert(text); assert(suftab); //' stack for lcp-intervals std::stack lit; //' Refer to: Abo05::Algorithm 4.5.2. lcp_interval *lastInterval = 0; lcp_interval *new_itv = 0; lit.push(new lcp_interval(0, 0, 0, 0)); //' root interval // Variables to handle 0-idx bool first = true; UInt32 prev_0idx = 0; UInt32 first0idx = 0; // Loop thru and process each index. for(UInt32 idx = 1; idx < size + 1; idx++) { UInt32 tmp_lb = idx - 1; //svnvish: BUGBUG // We just assume that the lcp of size + 1 is zero. // This simplifies the logic of the code UInt32 lcp_idx = 0; if(idx < size){ lcp_idx = lcptab[idx]; } while (lcp_idx < lit.top()->lcp){ lastInterval = lit.top(); lit.pop(); lastInterval->rb = idx - 1; // svnvish: Begin process UInt32 n_child = lastInterval->child.size(); UInt32 i = lastInterval->lb; UInt32 j = lastInterval->rb; // idx -1 ? //Step 1: Set childtab[i].down or childtab[j+1].up to first l-index UInt32 first_l_index = i+1; if(n_child && (lastInterval->child[0]->lb == i)) first_l_index = lastInterval->child[0]->rb+1; //svnvish: BUGBUG // ec = childtab.Set_Up(lastInterval->rb+1, first_l_index); // ec = childtab.Set_Down(lastInterval->lb, first_l_index); childtab[lastInterval->rb] = first_l_index; childtab[lastInterval->lb] = first_l_index; // Now we need to set the NextlIndex fields The main problem here // is that the child intervals might not be contiguous UInt32 ptr = i+1; UInt32 child_count = 0; while(ptr < j){ UInt32 first = j; UInt32 last = j; // Get next child to process if(n_child - child_count){ first = lastInterval->child[child_count]->lb; last = lastInterval->child[child_count]->rb; child_count++; } // Eat away singleton intervals while(ptr < first){ childtab[ptr] = ptr + 1; ptr++; } // Handle an child interval and make appropriate entries in // child table ptr = last + 1; if(last < j){ childtab[first] = ptr; } } //' Free lcp_intervals for(UInt32 child_cnt = 0; child_cnt < n_child; child_cnt++) { delete lastInterval->child[child_cnt]; lastInterval->child[child_cnt] = 0; } // svnvish: End process tmp_lb = lastInterval->lb; if(lcp_idx <= lit.top()->lcp) { lit.top()->child.push_back(lastInterval); lastInterval = 0; } }// while if(lcp_idx > lit.top()->lcp) { new_itv = new lcp_interval(lcp_idx, tmp_lb,0, lastInterval); lit.push(new_itv); new_itv = 0; lastInterval = 0; } // Handle the 0-indices. // 0-indices := { i | LCP[i]=0, \forall i = 0,...,n-1} if((idx < size) && (lcp_idx == 0)) { // svnvish: BUGBUG // ec = childtab.Set_NextlIndex(prev_0_index,k); childtab[prev_0idx] = idx; prev_0idx = idx; // Handle first 0-index specially // Store in childtab[(size-1)+1].up if(first){ // svnvish: BUGBUG // ec = childtab.Set_Up(size,k); CHECKERROR(ec); first0idx = idx; first = false; } } } // for childtab[size-1] = first0idx; // svnvish: All remaining elements in the stack are ignored. // chteo: Free all remaining elements in the stack. while(!lit.empty()) { lastInterval = lit.top(); delete lastInterval; lit.pop(); } assert(lit.empty()); return NOERROR; } #ifdef SLINK /** * Get suffix link interval, [sl_i..sl_j], of a given interval, [i..j]. * * \param i - (IN) Left bound of interval [i..j] * \param j - (IN) Right bound of interval [i..j] * \param sl_i - (OUT) Left bound of suffix link interval [sl_i..sl_j] * \param sl_j - (OUT) Right bound of suffix link interval [sl_i..sl_j] */ ErrorCode ESA::GetSuflink(const UInt32 &i, const UInt32 &j, UInt32 &sl_i, UInt32 &sl_j) { //' Input validation assert(i=0 && j= (j-i)); return NOERROR; } #elif defined(LSEARCH) /** * "Linear" Search version of GetSuflink. Suffix link intervals are not stored * explicitly but searched when needed. * * Note: Slow!!! especially in the case of long and similar texts. */ ErrorCode ESA::GetSuflink(const UInt32 &i, const UInt32 &j, UInt32 &sl_i, UInt32 &sl_j) { //' Variables SYMBOL ch; UInt32 lcp=0; UInt32 final_lcp = 0; UInt32 lb = 0, rb = size-1; //' root interval //' First suflink interval char := Second char of original interval ch = text[suftab[i]+1]; //' lcp of suffix link interval := lcp of original interval - 1 final_lcp = 0; GetLcp(i,j,final_lcp); final_lcp = (final_lcp > 0) ? final_lcp-1 : 0; //' Searching for suffix link interval sl_i = lb; sl_j = rb; while(lcp < final_lcp) { GetIntervalByChar(lb,rb,ch,lcp,sl_i, sl_j); GetLcp(sl_i, sl_j, lcp); lb = sl_i; rb = sl_j; ch = text[suftab[i]+lcp+1]; } assert(sl_j > sl_i); assert((sl_j-sl_i) >= (j-i)); return NOERROR; } #else /** * Construct bucket table. * * \param alpahabet_size - Size of alphabet set */ ErrorCode ESA::ConstructBcktab(const UInt32 &alphabet_size) { UInt32 MAX_DEPTH = 8; //' when alphabet_size is 256 UInt32 sizeof_uint4 = 4; //' 4 bytes integer UInt32 sizeof_uint8 = 8; //' 8 bytes integer UInt32 sizeof_key = sizeof_uint8; //' Step 1: Determine the bcktab_depth for(bcktab_depth = MAX_DEPTH; bcktab_depth >0; bcktab_depth--) { bcktab_size = 0; for(UInt32 i=0; i < size; i++) if(lcptab[i] < bcktab_depth) bcktab_size++; if(bcktab_depth <= 4) sizeof_key = sizeof_uint4; if(bcktab_size <= size/(sizeof_key + sizeof_uint4)) break; } //' Step 2: Allocate memory for bcktab_key and bcktab_val. //' Step 3: Precompute coefficients for computing hash values of prefixes later. //' Step 4: Collect the prefixes with lcp <= bcktab_depth and //' convert them into hash value. if(sizeof_key == sizeof_uint4) { //' (2) bcktab_key4 = new UInt32[bcktab_size]; bcktab_val = new UInt32[bcktab_size]; assert(bcktab_key4 && bcktab_val); //' (3) coef4 = new UInt32[4]; coef4[0] = 1; for(UInt32 i=1; i < 4; i++) coef4[i] = coef4[i-1]*alphabet_size; //' (4) for(UInt32 i=0, k=0; i < size; i++) { if(lcptab[i] < bcktab_depth) { UInt32 c = MIN((size-suftab[i]), bcktab_depth); hash_value4 = 0; for(UInt32 j=0; j < c; j++) hash_value4 += text[suftab[i]+j]*coef4[bcktab_depth-1-j]; bcktab_key4[k] = hash_value4; bcktab_val[k] = i; k++; } } } else { //' (2) bcktab_key8 = new UInt64[bcktab_size]; bcktab_val = new UInt32[bcktab_size]; assert(bcktab_key8 && bcktab_val); //' (3) coef8 = new UInt64[9]; coef8[0] = 1; for(UInt32 i=1; i < 9; i++) coef8[i] = coef8[i-1]*alphabet_size; //' (4) for(UInt32 i=0, k=0; i < size; i++) { if(lcptab[i] < bcktab_depth) { UInt32 c = MIN( (size-suftab[i]), bcktab_depth); hash_value8 = 0; for(UInt32 j=0; j < c; j++) hash_value8 += text[suftab[i]+j]*coef8[bcktab_depth-1-j]; bcktab_key8[k] = hash_value8; bcktab_val[k] = i; k++; } } } //' check if bcktab in ascending order // for(UInt32 ii=1; ii= 1); //' the interval [i..j] must has at least 2 suffixes. //' Variables UInt32 left=0, mid=0, right=0, tmp_right=0; UInt32 llcp=0, mlcp=0, rlcp=0; UInt32 orig_lcp = 0; UInt32 c = 0; UInt32 offset = 0; GetLcp(i, j, orig_lcp); if(orig_lcp <= 1) { sl_i = 0; sl_j = size-1; return NOERROR; } //' Default left = 0; right = size-1; //' Make use of bcktab here. Maximum lcp value is always 1 less than bcktab_depth. //' This is because including lcp values equal to bcktab_depth will violate //' the constraint of prefix uniqueness. offset = MIN(orig_lcp-1, bcktab_depth); assert(offset>=0); if(bcktab_key4) { hash_value4 = 0; for(UInt32 cnt=0; cnt < offset; cnt++) hash_value4 += coef4[bcktab_depth-1-cnt]*text[suftab[i]+1+cnt]; //' lower bound return the exact position of of target, if found one UInt32 *p = std::lower_bound(bcktab_key4, bcktab_key4+bcktab_size, hash_value4); left = bcktab_val[p - bcktab_key4]; //' this hash value is used to find the right bound of target interval hash_value4 += coef4[bcktab_depth-offset]; //' upper bound return the smallest value > than target. UInt32 *q = std::upper_bound(p, bcktab_key4+bcktab_size, hash_value4); if(q == bcktab_key4+bcktab_size) right = size-1; else right = bcktab_val[q - bcktab_key4] - 1; } else if(bcktab_key8) { hash_value8 = 0; for(UInt32 cnt=0; cnt < offset; cnt++) hash_value8 += coef8[bcktab_depth-1-cnt]*text[suftab[i]+1+cnt]; //' lower bound return the exact position of of target, if found one UInt64 *p = std::lower_bound(bcktab_key8, bcktab_key8+bcktab_size, hash_value8); left = bcktab_val[p - bcktab_key8]; //' this hash value is used to find the right bound of target interval hash_value8 += coef8[bcktab_depth-offset]; //' upper bound return the smallest value > than target. UInt64 *q = std::upper_bound(p, bcktab_key8+bcktab_size, hash_value8); if(q == bcktab_key8+bcktab_size) right = size-1; else right = bcktab_val[q - bcktab_key8] - 1; } tmp_right = right; assert(right <= size-1); assert(right > left); offset = 0; //' Compute LEFT boundary of suflink interval Compare(left, offset, &text[suftab[i]+1+offset], orig_lcp-1-offset, llcp); llcp += offset; if(llcp < orig_lcp-1) { Compare(right, offset, &text[suftab[i]+1+offset], orig_lcp-1-offset, rlcp); rlcp += offset; c = MIN(llcp,rlcp); while(right-left > 1){ mid = (left + right)/2; Compare(mid, c, &text[suftab[i]+1+c], orig_lcp-1-c, mlcp); mlcp += c; //' if target not found yet... if(mlcp < orig_lcp-1) { if(text[suftab[mid]+mlcp] < text[suftab[i]+mlcp+1]) { left = mid; llcp = mlcp; } else { right = mid; rlcp = mlcp; } } else { //' mlcp == orig_lcp-1 assert(mlcp == orig_lcp-1); //' target found, but want to make sure it is the LEFTmost... right = mid; rlcp = mlcp; } c = MIN(llcp, rlcp); } sl_i = right; llcp = rlcp; } else { sl_i = left; } //' Compute RIGHT boundary of suflink interval right = tmp_right; left = sl_i; Compare(right, offset, &text[suftab[i]+1+offset], orig_lcp-1-offset, rlcp); rlcp += offset; if(rlcp < orig_lcp-1) { c = MIN(llcp,rlcp); while(right-left > 1){ mid = (left + right)/2; Compare(mid, c, &text[suftab[i]+1+c], orig_lcp-1-c, mlcp); mlcp += c; //' if target not found yet... if(mlcp < orig_lcp-1) { if(text[suftab[mid]+mlcp] < text[suftab[i]+mlcp+1]) { //' target is on the right half left = mid; llcp = mlcp; } else { //' target is on the left half right = mid; rlcp = mlcp; } } else { //' mlcp == orig_lcp-1 assert(mlcp == orig_lcp-1); //' target found, but want to make sure it is the RIGHTmost... left = mid; llcp = mlcp; } c = MIN(llcp, rlcp); } sl_j = left; } else { sl_j = right; } assert(sl_i < sl_j); return NOERROR; } #endif /** * Find suffix link interval, [p..q], for a child interval, [c_i..c_j], given its * parent interval [p_i..p_j]. * * Pre : 1. Suffix link interval for parent interval has been computed. * 2. [child_i..child_j] is not a singleton interval. * * * \param parent_i - (IN) Left bound of parent interval. * \param parent_j - (IN) Right bound of parent interval. * \param child_i - (IN) Left bound of child interval. * \param child_j - (IN) Right bound of child interval. * \param sl_i - (OUT) Left bound of suffix link interval of child interval * \param sl_j - (OUT) Right bound of suffix link interval of child interval */ ErrorCode ESA::FindSuflink(const UInt32 &parent_i, const UInt32 &parent_j, const UInt32 &child_i, const UInt32 &child_j, UInt32 &sl_i, UInt32 &sl_j) { assert(child_i != child_j); //' Variables SYMBOL ch; UInt32 tmp_i = 0; UInt32 tmp_j = 0; UInt32 lcp_child = 0; UInt32 lcp_parent = 0; UInt32 lcp_sl = 0; //' Step 1: Get suffix link interval of parent interval and its lcp value. //' 2: Get lcp values of parent and child intervals. //' Shortcut! if(parent_i ==0 && parent_j == size-1) { //' this is root interval //' (1) sl_i = 0; sl_j = size-1; lcp_sl = 0; //' (2) lcp_parent = 0; GetLcp(child_i,child_j,lcp_child); assert(lcp_child > 0); } else { //' (1) GetSuflink(parent_i,parent_j,sl_i,sl_j); GetLcp(sl_i, sl_j, lcp_sl); //' (2) GetLcp(parent_i,parent_j,lcp_parent); GetLcp(child_i,child_j,lcp_child); assert(lcp_child > 0); } //' Traversing down the subtree of [sl_i..sl_j] and looking for //' the suffix link interval of child interval. while (lcp_sl < lcp_child-1) { //' The character that we want to look for in suflink interval. ch = text[suftab[child_i]+lcp_sl+1]; tmp_i = sl_i; tmp_j = sl_j; GetIntervalByChar(tmp_i, tmp_j, ch, lcp_sl, sl_i, sl_j); assert(sl_i > q; //' The interval queue std::pair interval; //' Step 0: Push root onto queue. And define itself as its suflink. q.push(std::make_pair((unsigned int)0,size-1)); UInt32 idx = 0; childtab.l_idx(0,size-1,idx); suflink[idx+idx] = 0; //' left bound of suffix link interval suflink[idx+idx+1] = size-1; //' right bound of suffix link interval //' Step 1: Breadth first traversal. while (!q.empty()) { //' Step 1.1: Pop interval from queue. interval = q.front(); q.pop(); //' Step 1.2: For each non-singleton child-intervals, [p..q], "find" its //' suffix link interval and then "push" it onto the interval queue. UInt32 i=0,j=0, sl_i=0, sl_j=0, start_idx=interval.first; do { //' Notes: interval.first := left bound of suffix link interval //' interval.second := right bound of suffix link interval assert(interval.first>=0 && interval.second < size); GetIntervalByIndex(interval.first, interval.second, start_idx, i, j); if(j > i) { //' [i..j] is non-singleton interval FindSuflink(interval.first, interval.second, i,j, sl_i, sl_j); assert(!(sl_i == i && sl_j == j)); //' Store suflink of [i..j] UInt32 idx=0; childtab.l_idx(i, j, idx); suflink[idx+idx] = sl_i; suflink[idx+idx+1] = sl_j; //' Push suflink interval onto queue q.push(std::make_pair(i,j)); } start_idx = j+1; //' prepare to get next child interval }while(start_idx < interval.second); } return NOERROR; } /** * Get all child-intervals, including singletons. * Store all non-singleton intervals onto #q#, where interval is defined as * (i,j) where i and j are left and right bounds. * * \param lb - (IN) Left bound of current interval. * \param rb - (IN) Right bound of current interval. * \param q - (OUT) Storage for intervals. */ ErrorCode ESA::GetChildIntervals(const UInt32 &lb, const UInt32 &rb, std::vector > &q) { //' Variables UInt32 k=0; //' general index UInt32 i=0,j=0; //' for interval [i..j] //' Input validation assert(rb-lb >= 1); k = lb; do { assert(lb>=0 && rb 0) { if(j > i) { // chteo: saved 1 operation ;) [260906] //' Non-singleton interval q.push_back(std::make_pair(i,j)); } k = j+1; }while(k < rb); return NOERROR; } /** * Given an l-interval, l-[i..j] and a starting index, idx \in [i..j], * return the child-interval, k-[p..q], of l-[i..j] where p == idx. * * Reference: Abo05::algorithm 4.6.4 * * Pre: #start_idx# is a l-index or equal to parent_i. * * \param parent_i - (IN) Left bound of parent interval. * \param parent_j - (IN) Right bound of parent interval. * \param start_idx - (IN) Predefined left bound of child interval. * \param child_i - (OUT) Left bound of child interval. * \param child_j - (OUT) Right bound of child interval. * * Time complexity: O(|alphabet set|) */ ErrorCode ESA::GetIntervalByIndex(const UInt32 &parent_i, const UInt32 &parent_j, const UInt32 &start_idx, UInt32 &child_i, UInt32 &child_j) { //' Variables UInt32 lcp_child_i = 0; UInt32 lcp_child_j = 0; //' Input validation assert( (parent_i < parent_j) && (parent_i >= 0) && (parent_j < size) && (start_idx >= parent_i) && (start_idx < parent_j)); child_i = start_idx; //' #start_idx# is not and l-index, i.e. #start_idx == #parent_i# if(child_i == parent_i) { childtab.l_idx(parent_i,parent_j,child_j); child_j--; return NOERROR; } //' #start_idx# is a l-index // svnvish:BUGBUG child_j = childtab[child_i]; lcp_child_i = lcptab[child_i]; lcp_child_j = lcptab[child_j]; if(child_i < child_j && lcp_child_i == lcp_child_j) child_j--; else { //' child_i is the left bound of last child interval child_j = parent_j; } return NOERROR; } /** * Given an l-interval, l-[i..j] and a starting character, ch \in alphabet set, * return the child-interval, k-[p..q], of l-[i..j] such that text[sa[p]+depth] == ch. * * Reference: Abo05::algorithm 4.6.4 * * Post: Return [i..j]. If interval was found, i<=j, otherwise, i>j. * * \param parent_i - (IN) Left bound of parent interval. * \param parent_j - (IN) Right bound of parent interval. * \param ch - (IN) Starting character of left bound (suffix) of child interval. * \param depth - (IN) The position where #ch# is located in #text# * i.e. ch = text[suftab[parent_i]+depth]. * \param child_i - (OUT) Left bound of child interval. * \param child_j - (OUT) Right bound of child interval. * * Time complexity: O(|alphabet set|) */ ErrorCode ESA::GetIntervalByChar(const UInt32 &parent_i, const UInt32 &parent_j, const SYMBOL &ch, const UInt32 &depth, UInt32 &child_i, UInt32 &child_j) { //' Input validation assert(parent_i < parent_j && parent_i >= 0 && parent_j < size); //' Variables UInt32 idx = 0; UInt32 idx_next = 0; UInt32 lcp_idx = 0; UInt32 lcp_idx_next = 0; UInt32 lcp = 0; //' #depth# is actually equal to the following statement! //ec = GetLcp(parent_i, parent_j, lcp); CHECKERROR(ec); lcp = depth; //' Step 1: Check if #ch# falls in the initial range. if(text[suftab[parent_i]+lcp] > ch || text[suftab[parent_j]+lcp] < ch) { //' No child interval starts with #ch#, so, return undefined interval. child_i = 1; child_j = 0; return NOERROR; } //' Step 2: #ch# is in the initial range, but not necessarily exists in the range. //' Step 2.1: Get first l-index childtab.l_idx(parent_i, parent_j, idx); assert(idx > parent_i && idx <= parent_j); if(text[suftab[idx-1]+lcp] == ch) { child_i = parent_i; child_j = idx-1; return NOERROR; } //' Step 3: Look for child interval which starts with #ch# // svnvish: BUGBUG //ec = childtab.NextlIndex(idx, idx_next); CHECKERROR(ec); idx_next = childtab[idx]; lcp_idx = lcptab[idx]; lcp_idx_next = lcptab[idx_next]; while(idx < idx_next && lcp_idx == lcp_idx_next && text[suftab[idx]+lcp] < ch) { idx = idx_next; // svnvish: BUGBUG // ec = childtab.NextlIndex(idx, idx_next); CHECKERROR(ec); idx_next = childtab[idx]; lcp_idx = lcptab[idx]; lcp_idx_next = lcptab[idx_next]; } if(text[suftab[idx]+lcp] == ch) { child_i = idx; if(idx < idx_next && lcp_idx == lcp_idx_next) child_j = idx_next - 1; else child_j = parent_j; return NOERROR; } //' Child interval starts with #ch# not found child_i = 1; child_j = 0; return NOERROR; } /** * Return the lcp value of a given interval, l-[i..j]. * * Pre: [i..j] \subseteq [0..size] * * \param i - (IN) Left bound of the interval. * \param j - (IN) Right bound of the interval. * \param val - (OUT) The lcp value of the interval. */ ErrorCode ESA::GetLcp(const UInt32 &i, const UInt32 &j, UInt32 &val) { //' Input validation assert(i < j && i >= 0 && j < size); //' Variables UInt32 up, down; //' 0-[0..size-1]. This is a shortcut! if(i == 0 && j == size) { val = 0; } else { childtab.up(j+1,up); if( (i < up) && (up <= j)) { val = lcptab[up]; } else { childtab.down(i,down); val = lcptab[down]; } } return NOERROR; } /** * Compare #pattern# string to text[suftab[#idx#]..size] and return the * length of the substring matched. * * \param idx - (IN) The index of esa. * \param depth - (IN) The start position of matching mechanism. * \param pattern - (IN) The pattern string. * \param p_len - (IN) The length of #pattern#. * \param matched_len - (OUT) The length of matched substring. */ ErrorCode ESA::Compare(const UInt32 &idx, const UInt32 &depth, SYMBOL *pattern, const UInt32 &p_len, UInt32 &matched_len) { //' Variables UInt32 min=0; min = (p_len < size-(suftab[idx]+depth)) ? p_len : size-(suftab[idx]+depth); matched_len = 0; for(UInt32 k=0; k < min; k++) { if(text[suftab[idx]+depth+k] == pattern[k]) matched_len++; else break; } return NOERROR; } /** * Find the longest matching of text and pattern. * * Note: undefinded interval := [i..j] where i>j * * Post: Return "floor" and "ceil" of longest substring of pattern that exists in text. * Otherwise, that is, no substring of pattern ever exists in text, * return the starting interval, [i..j]. * * \param i - (IN) Left bound of the starting interval. * \param j - (IN) Right bound of the starting interval. * \param offset - (IN) The number of characters between the head of suffix and the * position to start matching. * \param pattern - (IN) The pattern string to match to esa. * \param p_len - (IN) The length of #pattern# * \param lb - (OUT) The left bound of the interval containing * longest matched suffix. * \param rb - (OUT) The right bound of the interval containing * longest matched suffix. * \param matched_len - (OUT) The length of the longest matched suffix. * \param floor_lb - (OUT) Left bound of floor interval of [lb..rb]. * \param floor_rb - (OUT) Right bound of floor interval of [lb..rb]. * \param floor_len - (OUT) The lcp value of floor interval. */ ErrorCode ESA::ExactSuffixMatch(const UInt32 &i, const UInt32 &j, const UInt32 &offset, SYMBOL *pattern, const UInt32 p_len, UInt32 &lb, UInt32 &rb, UInt32 &matched_len, UInt32 &floor_lb, UInt32 &floor_rb, UInt32 &floor_len) { //' Input validation assert(i != j); //' Variables UInt32 min, lcp; bool queryFound = true; //' Initial setting. floor_lb = lb = i; floor_rb = rb = j; matched_len = offset; //' Step 1: Get lcp of floor/starting interval. GetLcp(floor_lb, floor_rb, lcp); floor_len = lcp; //' Step 2: Skipping #offset# characters while(lcp < matched_len) { floor_lb = lb; floor_rb = rb; floor_len = lcp; GetIntervalByChar(floor_lb, floor_rb, pattern[lcp], lcp, lb, rb); // printf("lb, rb : %i, %i\n", lb, rb); assert(lb <= rb); if(lb == rb) break; GetLcp(lb, rb, lcp); } //' Step 3: Continue matching from the point (either an interval or singleton) we stopped. while( (lb<=rb) && queryFound ) { if(lb != rb) { GetLcp(lb, rb, lcp); min = (lcp < p_len) ? lcp : p_len; while(matched_len < min) { queryFound = (text[suftab[lb]+matched_len] == pattern[matched_len]); if(queryFound) matched_len++; else return NOERROR; } assert(matched_len == min); //' Full pattern found! if(matched_len == p_len) return NOERROR; floor_lb = lb; floor_rb = rb; floor_len = lcp; GetIntervalByChar(floor_lb, floor_rb,pattern[matched_len],matched_len,lb,rb); }else { //' lb == rb, i.e. singleton interval. min = (p_len < size-suftab[lb]) ? p_len : size-suftab[lb]; while(matched_len rb) { lb = floor_lb; rb = floor_rb; } return NOERROR; } #endif kernlab/src/esa.h0000644000176000001440000001062112651720731013434 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ESA.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef ESA_H #define ESA_H #include "datatype.h" #include "errorcode.h" #include "lcp.h" #include "ctable.h" #include "ilcpfactory.h" #include "isafactory.h" #include #include //#define SLINK // #define SSARRAY // does not yeet work correctly, CW class ESA { private: int _verb; public: UInt32 size; //' The length of #text# SYMBOL *text; //' Text corresponds to SA #ifdef SSARRAY int *suftab; //' Suffix Array #else UInt32 *suftab; //' Suffix Array #endif LCP lcptab; //' LCP array ChildTable childtab; //' Child table (fields merged) UInt32 *suflink; //' Suffix link table. Two fields: l,r //' --- for bucket table --- UInt32 bcktab_depth; //' Number of char defining each bucket UInt32 bcktab_size; //' size of bucket table UInt32 *bcktab_val; //' value column of bucket table UInt32 *bcktab_key4; //' 4-bytes key column of Bucket table UInt32 *coef4; UInt32 hash_value4; UInt64 *bcktab_key8; //' 8-bytes key column of Bucket table UInt64 *coef8; UInt64 hash_value8; //' --- /// Constructors ESA(const UInt32 & size_, SYMBOL *text_, int verb=INFO); /// Destructor virtual ~ESA(); /// Construct child table ErrorCode ConstructChildTable(); /// Get suffix link interval ErrorCode GetSuflink(const UInt32 &i, const UInt32 &j, UInt32 &sl_i, UInt32 &sl_j); /// Find the suffix link ErrorCode FindSuflink(const UInt32 &parent_i, const UInt32 &parent_j, const UInt32 &child_i, const UInt32 &child_j, UInt32 &sl_i, UInt32 &sl_j); /// Construct suffix link table ErrorCode ConstructSuflink(); /// Construct bucket table ErrorCode ConstructBcktab(const UInt32 &alphabet_size=256); /// Get all non-singleton child-intervals ErrorCode GetChildIntervals(const UInt32 &lb, const UInt32 &rb, std::vector > &q); /// Get intervals by index ErrorCode GetIntervalByIndex(const UInt32 &parent_i, const UInt32 &parent_j, const UInt32 &start_idx, UInt32 &child_i, UInt32 &child_j); /// Get intervals by character ErrorCode GetIntervalByChar(const UInt32 &parent_i, const UInt32 &parent_j, const SYMBOL &start_ch, const UInt32 &depth, UInt32 &child_i, UInt32 &child_j); /// Get lcp value ErrorCode GetLcp(const UInt32 &i, const UInt32 &j, UInt32 &val); /// Compare pattern to text[suftab[idx]..length]. ErrorCode Compare(const UInt32 &idx, const UInt32 &depth, SYMBOL *pattern, const UInt32 &p_len, UInt32 &matched_len); /// Find longest substring of pattern in enhanced suffix array. ErrorCode Match(const UInt32 &i, const UInt32 &j, SYMBOL *pattern, const UInt32 p_len, UInt32 &lb, UInt32 &rb, UInt32 &matched_len); /// Similar to Match() but returns also floor interval of [lb..rb] ErrorCode ExactSuffixMatch(const UInt32 &i, const UInt32 &j, const UInt32 &offset, SYMBOL *pattern, const UInt32 p_len, UInt32 &lb, UInt32 &rb, UInt32 &matched_len, UInt32 &floor_lb, UInt32 &floor_rb, UInt32 &floor_len); }; #endif kernlab/src/cweight.cpp0000644000176000001440000000427412651720731014660 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ConstantWeight.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 // 12 Oct 2006 #ifndef CWEIGHT_CPP #define CWEIGHT_CPP #include "cweight.h" #include /** * Constant weight function. Computes number of common substrings. Every * matched substring is of same weight (i.e. 1) * W(y,t) := tau - gamma * * \param floor_len - (IN) Length of floor interval of matched substring. * (cf. gamma in VisSmo02). * \param x_len - (IN) Length of the matched substring. * (cf. tau in visSmo02). * \param weight - (OUT) The weight value. * */ ErrorCode ConstantWeight::ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) { //' Input validation assert(x_len >= floor_len); //' x_len == floor_len when the substring found ends on an interval. weight = (x_len - floor_len); // std::cout << "floor_len : " << floor_len // << " x_len : " << x_len // << " weight : " << weight << std::endl; return NOERROR; } #endif kernlab/src/errorcode.h0000644000176000001440000000374312651720731014657 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ErrorCode.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef _ERRORCODE_H_ #define _ERRORCODE_H_ #include "datatype.h" #include // Verbosity level enum verbosity {QUIET, INFO, DEBUG1}; #define ErrorCode UInt32 /** * for general use */ #define NOERROR 0 #define GENERAL_ERROR 1 #define MEM_ALLOC_FAILED 2 #define INVALID_PARAM 3 #define ARRAY_EMPTY 4 #define OPERATION_FAILED 5 /** * SuffixArray */ #define MATCH_NOT_FOUND 101 #define PARTIAL_MATCH 102 /** * LCP */ #define LCP_COMPACT_FAILED 201 #define CHECKERROR(i) { \ if((i) != NOERROR) { \ exit(EXIT_FAILURE); \ } \ } // #define MESSAGE(msg) { std::cout<<(msg)< #include #include extern double mymin(double, double); extern double mymax(double, double); extern void *xmalloc(size_t); /* LEVEL 1 BLAS */ /*extern double ddot_(int *, double *, int *, double *, int *);*/ /*extern int daxpy_(int *, double *, double *, int *, double *, int *);*/ /* LEVEL 2 BLAS */ /*extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *);*/ /* MINPACK 2 */ extern void dbreakpt(int, double *, double *, double *, double *, int *, double *, double *); extern void dgpstep(int, double *, double *, double *, double, double *, double *); void dprsrch(int n, double *x, double *xl, double *xu, double *A, double *g, double *w) { /* c ********** c c Subroutine dprsrch c c This subroutine uses a projected search to compute a step c that satisfies a sufficient decrease condition for the quadratic c c q(s) = 0.5*s'*A*s + g'*s, c c where A is a symmetric matrix and g is a vector. Given the c parameter alpha, the step is c c s[alpha] = P[x + alpha*w] - x, c c where w is the search direction and P the projection onto the c n-dimensional interval [xl,xu]. The final step s = s[alpha] c satisfies the sufficient decrease condition c c q(s) <= mu_0*(g'*s), c c where mu_0 is a constant in (0,1). c c The search direction w must be a descent direction for the c quadratic q at x such that the quadratic is decreasing c in the ray x + alpha*w for 0 <= alpha <= 1. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is set to the final point P[x + alpha*w]. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c A is a double precision array of dimension n*n. c On entry A specifies the matrix A c On exit A is unchanged. c c g is a double precision array of dimension n. c On entry g specifies the vector g. c On exit g is unchanged. c c w is a double prevision array of dimension n. c On entry w specifies the search direction. c On exit w is the step s[alpha]. c c ********** */ double one = 1, zero = 0; /* Constant that defines sufficient decrease. */ /* Interpolation factor. */ double mu0 = 0.01, interpf = 0.5; double *wa1 = (double *) xmalloc(sizeof(double)*n); double *wa2 = (double *) xmalloc(sizeof(double)*n); /* Set the initial alpha = 1 because the quadratic function is decreasing in the ray x + alpha*w for 0 <= alpha <= 1 */ double alpha = 1, brptmin, brptmax, gts, q; int search = 1, nbrpt, nsteps = 0, i, inc = 1; /* Find the smallest break-point on the ray x + alpha*w. */ dbreakpt(n, x, xl, xu, w, &nbrpt, &brptmin, &brptmax); /* Reduce alpha until the sufficient decrease condition is satisfied or x + alpha*w is feasible. */ while (search && alpha > brptmin) { /* Calculate P[x + alpha*w] - x and check the sufficient decrease condition. */ nsteps++; dgpstep(n, x, xl, xu, alpha, w, wa1); F77_CALL(dsymv)("U", &n, &one, A, &n, wa1, &inc, &zero, wa2, &inc); gts = F77_CALL(ddot)(&n, g, &inc, wa1, &inc); q = 0.5*F77_CALL(ddot)(&n, wa1, &inc, wa2, &inc) + gts; if (q <= mu0*gts) search = 0; else /* This is a crude interpolation procedure that will be replaced in future versions of the code. */ alpha *= interpf; } /* Force at least one more constraint to be added to the active set if alpha < brptmin and the full step is not successful. There is sufficient decrease because the quadratic function is decreasing in the ray x + alpha*w for 0 <= alpha <= 1. */ if (alpha < 1 && alpha < brptmin) alpha = brptmin; /* Compute the final iterate and step. */ dgpstep(n, x, xl, xu, alpha, w, wa1); F77_CALL(daxpy)(&n, &alpha, w, &inc, x, &inc); for (i=0;i * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/KSpectrumWeight.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef KSPECTRUMWEIGHT_CPP #define KSPECTRUMWEIGHT_CPP #include "kspectrumweight.h" #include /** * K-spectrum weight function. Compute number of common (exactly) k character substring. * * \param floor_len - (IN) Length of floor interval of matched substring. (cf. gamma in VisSmo02). * \param x_len - (IN) Length of the matched substring. (cf. tau in VisSmo02). * \param weight - (OUT) The weight value. * */ ErrorCode KSpectrumWeight::ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) { //' Input validation assert(x_len >= floor_len); //' x_len == floor_len when the substring found ends on an interval. weight = 0.0; if(floor_len < k && x_len >= k) weight = 1.0; // std::cout << "floor_len : " << floor_len // << " x_len : " << x_len // << " weight : " << weight << std::endl; return NOERROR; } #endif //' Question: Why return only 0 or 1? //' Answer : In k-spectrum method, any length of matched substring other than k //' does not play a significant role in the string kernel. So, returning 1 //' means that the substring weight equals to # of suffix in the current interval. //' When 0 is returned, it means that substring weight equals to the floor //' interval entry in val[]. (See the definition of substring weight in //' StringKernel.cpp) //' Question: Why is the following a correct implementation of k-spectrum ? //' Answer : [Val precomputation phase] Every Interval with lcp < k has val := 0. //' For intervals with (lcp==k) or (lcp>k but floor_lcp= k but floor interval //' has val := 0 (floor_lcp < k). Hence, returning weight:=1 will make substring //' weight equals to the size of the immediate ceil interval (# of substring in common). kernlab/src/svm.h0000644000176000001440000000235612651720731013477 0ustar ripleyusers#ifndef _LIBSVM_H #define _LIBSVM_H #ifdef __cplusplus extern "C" { #endif struct svm_node { int index; double value; }; struct svm_problem { int l, n; double *y; struct svm_node **x; }; enum { C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR, C_BSVC, EPSILON_BSVR, SPOC, KBB }; /* svm_type */ enum { LINEAR, POLY, RBF, SIGMOID, R, LAPLACE, BESSEL, ANOVA, SPLINE }; /* kernel_type */ struct svm_parameter { int svm_type; int kernel_type; int degree; /* for poly */ double gamma; /* for poly/rbf/sigmoid */ double coef0; /* for poly/sigmoid */ /* these are for training only */ double cache_size; /* in MB */ double eps; /* stopping criteria */ double C; /* for C_SVC, EPSILON_SVR and NU_SVR */ int nr_weight; /* for C_SVC */ int *weight_label; /* for C_SVC */ double* weight; /* for C_SVC */ double nu; /* for NU_SVC, ONE_CLASS, and NU_SVR */ double p; /* for EPSILON_SVR */ int shrinking; /* use the shrinking heuristics */ int qpsize; double Cbegin, Cstep; /* for linear kernel */ double lim; /* for bessel kernel */ double *K; /* pointer to kernel matrix */ int m; }; struct BQP { double eps; int n; double *x, *C, *Q, *p; }; #ifdef __cplusplus } #endif #endif /* _LIBSVM_H */ kernlab/src/dprecond.c0000644000176000001440000000200012651720731014445 0ustar ripleyusers#include #include #include #include /* LAPACK */ /* extern int dpotf2_(char *, int *, double *, int *, int *); */ double dcholfact(int n, double *A, double *L) { /* if A is p.d. , A = L*L' if A is p.s.d. , A + lambda*I = L*L'; */ int indef, i; static double lambda = 1e-3/512/512; memcpy(L, A, sizeof(double)*n*n); F77_CALL(dpotf2)("L", &n, L, &n, &indef); if (indef != 0) { memcpy(L, A, sizeof(double)*n*n); for (i=0;i> 8) | (value << 8)) #define SUFFIX_SORTED 0x80000000 // flag marks suffix as sorted. #define END_OF_CHAIN 0x3ffffffe // marks the end of a chain #define SORTED_BY_ENHANCED_INDUCTION 0x3fffffff // marks suffix which will be sorted by enhanced induction sort. #ifdef SORT_16_BIT_SYMBOLS #define SYMBOL_TYPE unsigned short #else #define SYMBOL_TYPE unsigned char #endif class MSufSort { public: MSufSort(); virtual ~MSufSort(); unsigned int Sort(SYMBOL_TYPE * source, unsigned int sourceLength); unsigned int GetElapsedSortTime(); unsigned int GetMemoryUsage(); unsigned int ISA(unsigned int index); bool VerifySort(); static void ReverseAltSortOrder(SYMBOL_TYPE * data, unsigned int nBytes); private: int CompareStrings(SYMBOL_TYPE * stringA, SYMBOL_TYPE * stringB, int len); bool IsTandemRepeat2(); bool IsTandemRepeat(); void PassTandemRepeat(); bool IsSortedByInduction(); bool IsSortedByEnhancedInduction(unsigned int suffixIndex); void ProcessSuffixesSortedByInduction(); // MarkSuffixAsSorted // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. void MarkSuffixAsSorted(unsigned int suffixIndex, unsigned int & sortedIndex); void MarkSuffixAsSorted2(unsigned int suffixIndex, unsigned int & sortedIndex); void MarkSuffixAsSortedByEnhancedInductionSort(unsigned int suffixIndex); // PushNewChainsOntoStack: // Moves all new suffix chains onto the stack of partially sorted // suffixes. (makes them ready for further sub sorting). void PushNewChainsOntoStack(bool originalChains = false); void PushTandemBypassesOntoStack(); // OnSortedSuffix: // Event which is invoked with each sorted suffix at the time of // its sorting. virtual void OnSortedSuffix(unsigned int suffixIndex); // Initialize: // Initializes this object just before sorting begins. void Initialize(); // InitialSort: // This is the first sorting pass which makes the initial suffix // chains from the given source string. Pushes these chains onto // the stack for further sorting. void InitialSort(); // Value16: // Returns the two 8 bit symbols located // at positions N and N + 1 where N = the sourceIndex. unsigned short Value16(unsigned int sourceIndex); // ProcessChain: // Sorts the suffixes of a given chain by the next two symbols of // each suffix in the chain. This creates zero or more new suffix // chains with each sorted by two more symbols than the original // chain. Then pushes these new chains onto the chain stack for // further sorting. void ProcessNextChain(); void AddToSuffixChain(unsigned int suffixIndex, unsigned short suffixChain); void AddToSuffixChain(unsigned int firstSuffixIndex, unsigned int lastSuffixIndex, unsigned short suffixChain); void ProcessSuffixesSortedByEnhancedInduction(unsigned short suffixId); void ResolveTandemRepeatsNotSortedWithInduction(); unsigned int m_sortTime; Stack m_chainMatchLengthStack; Stack m_chainCountStack; Stack m_chainHeadStack; unsigned int m_endOfSuffixChain[0x10000]; unsigned int m_startOfSuffixChain[0x10000]; // m_source: // Address of the string to sort. SYMBOL_TYPE * m_source; // m_sourceLength: // The length of the string pointed to by m_source. unsigned int m_sourceLength; unsigned int m_sourceLengthMinusOne; // m_ISA: // The address of the working space which, when the sort is // completed, will contain the inverse suffix array for the // source string. unsigned int * m_ISA; // m_nextSortedSuffixValue: unsigned int m_nextSortedSuffixValue; // unsigned int m_numSortedSuffixes; // m_newChainIds // Array containing the valid chain numbers in m_newChain array. unsigned short m_newChainIds[0x10000]; // m_numNewChains: // The number of new suffix chain ids stored in m_numChainIds. unsigned int m_numNewChains; Stack m_suffixesSortedByInduction; unsigned int m_suffixMatchLength; unsigned int m_currentSuffixIndex; // m_firstSortedPosition: // For use with enhanced induction sorting. unsigned int m_firstSortedPosition[0x10000]; unsigned int m_firstSuffixByEnhancedInductionSort[0x10000]; unsigned int m_lastSuffixByEnhancedInductionSort[0x10000]; unsigned int m_currentSuffixChainId; #ifdef SHOW_PROGRESS // ShowProgress: // Update the progress indicator. void ShowProgress(); // m_nextProgressUpdate: // Indicates when to update the progress indicator. unsigned int m_nextProgressUpdate; // m_progressUpdateIncrement: // Indicates how many suffixes should be sorted before // incrementing the progress indicator. unsigned int m_progressUpdateIncrement; #endif // members used if alternate sorting order should be applied. SYMBOL_TYPE m_forwardAltSortOrder[256]; static SYMBOL_TYPE m_reverseAltSortOrder[256]; // for tandem repeat sorting bool m_hasTandemRepeatSortedByInduction; unsigned int m_firstUnsortedTandemRepeat; unsigned int m_lastUnsortedTandemRepeat; bool m_hasEvenLengthTandemRepeats; unsigned int m_tandemRepeatDepth; unsigned int m_firstSortedTandemRepeat; unsigned int m_lastSortedTandemRepeat; unsigned int m_tandemRepeatLength; }; //inline unsigned short MSufSort::Value16(unsigned int sourceIndex) //{ // return (sourceIndex < m_sourceLengthMinusOne) ? *(unsigned short *)(m_source + sourceIndex) : m_source[sourceIndex]; //} // fix by Brian Ripley inline unsigned short MSufSort::Value16(unsigned int sourceIndex) { union {unsigned short u; unsigned char b[2];} u16; u16.b[0] = m_source[sourceIndex]; u16.b[1] = (sourceIndex < m_sourceLengthMinusOne) ? m_source[sourceIndex + 1] : 0; return u16.u; } inline bool MSufSort::IsSortedByInduction() { unsigned int n = m_currentSuffixIndex + m_suffixMatchLength - 1; #ifndef USE_INDUCTION_SORTING if (n < m_sourceLengthMinusOne) return false; #endif if ((m_ISA[n] & SUFFIX_SORTED) && ((m_ISA[n] & 0x3fffffff) < m_nextSortedSuffixValue)) { InductionSortObject i(0, m_ISA[n], m_currentSuffixIndex); m_suffixesSortedByInduction.Push(i); } else if ((m_ISA[n + 1] & SUFFIX_SORTED) && ((m_ISA[n + 1] & 0x3fffffff) < m_nextSortedSuffixValue)) { InductionSortObject i(1, m_ISA[n + 1], m_currentSuffixIndex); m_suffixesSortedByInduction.Push(i); } else return false; return true; } inline bool MSufSort::IsSortedByEnhancedInduction(unsigned int suffixIndex) { if (suffixIndex > 0) if (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION) return true; return false; } inline bool MSufSort::IsTandemRepeat() { #ifndef USE_TANDEM_REPEAT_SORTING return false; #else if ((!m_tandemRepeatDepth) && (m_currentSuffixIndex + m_suffixMatchLength) == (m_ISA[m_currentSuffixIndex] + 1)) return true; #ifndef SORT_16_BIT_SYMBOLS if ((!m_tandemRepeatDepth) && ((m_currentSuffixIndex + m_suffixMatchLength) == (m_ISA[m_currentSuffixIndex]))) { m_hasEvenLengthTandemRepeats = true; return false; } #endif return false; #endif } inline void MSufSort::PassTandemRepeat() { unsigned int nextIndex; unsigned int lastIndex; // unsigned int firstIndex = m_currentSuffixIndex; while ((m_currentSuffixIndex + m_suffixMatchLength) == ((nextIndex = m_ISA[m_currentSuffixIndex]) + 1)) { lastIndex = m_currentSuffixIndex; m_currentSuffixIndex = nextIndex; } if (IsSortedByInduction()) { m_hasTandemRepeatSortedByInduction = true; m_currentSuffixIndex = m_ISA[m_currentSuffixIndex]; } else { if (m_firstUnsortedTandemRepeat == END_OF_CHAIN) m_firstUnsortedTandemRepeat = m_lastUnsortedTandemRepeat = lastIndex; else m_lastUnsortedTandemRepeat = (m_ISA[m_lastUnsortedTandemRepeat] = lastIndex); } } inline void MSufSort::PushNewChainsOntoStack(bool originalChains) { // Moves all new suffix chains onto the stack of partially sorted // suffixes. (makes them ready for further sub sorting). #ifdef SORT_16_BIT_SYMBOLS unsigned int newSuffixMatchLength = m_suffixMatchLength + 1; #else unsigned int newSuffixMatchLength = m_suffixMatchLength + 2; #endif if (m_numNewChains) { if (m_hasEvenLengthTandemRepeats) { m_chainCountStack.Push(m_numNewChains - 1); m_chainMatchLengthStack.Push(newSuffixMatchLength); m_chainCountStack.Push(1); m_chainMatchLengthStack.Push(newSuffixMatchLength - 1); } else { m_chainCountStack.Push(m_numNewChains); m_chainMatchLengthStack.Push(newSuffixMatchLength); } if (m_numNewChains > 1) IntroSort(m_newChainIds, m_numNewChains); while (m_numNewChains) { unsigned short chainId = m_newChainIds[--m_numNewChains]; chainId = ENDIAN_SWAP_16(chainId); // unsigned int n = m_startOfSuffixChain[chainId]; m_chainHeadStack.Push(m_startOfSuffixChain[chainId]); m_startOfSuffixChain[chainId] = END_OF_CHAIN; m_ISA[m_endOfSuffixChain[chainId]] = END_OF_CHAIN; } } m_hasEvenLengthTandemRepeats = false; if (m_firstUnsortedTandemRepeat != END_OF_CHAIN) { // Tandem repeats with a terminating suffix that did not get // sorted via induction has occurred (at least once). // We have a suffix chain (indicated by m_firstTandemRepeatWithoutSuffix) // of the suffix in each tandem repeat which immediately proceeded the // terminating suffix in each chain. We want to sort them relative to // each other and then process the tandem repeats. unsigned int tandemRepeatLength = m_suffixMatchLength - 1; unsigned int numChains = m_chainHeadStack.Count(); m_chainHeadStack.Push(m_firstUnsortedTandemRepeat); m_chainCountStack.Push(1); m_chainMatchLengthStack.Push((m_suffixMatchLength << 1) - 1); m_ISA[m_lastUnsortedTandemRepeat] = END_OF_CHAIN; m_firstUnsortedTandemRepeat = END_OF_CHAIN; m_tandemRepeatDepth = 1; while (m_chainHeadStack.Count() > numChains) ProcessNextChain(); m_suffixMatchLength = tandemRepeatLength + 1; ResolveTandemRepeatsNotSortedWithInduction(); m_tandemRepeatDepth = 0; } } inline void MSufSort::AddToSuffixChain(unsigned int suffixIndex, unsigned short suffixChain) { if (m_startOfSuffixChain[suffixChain] == END_OF_CHAIN) { m_endOfSuffixChain[suffixChain] = m_startOfSuffixChain[suffixChain] = suffixIndex; m_newChainIds[m_numNewChains++] = ENDIAN_SWAP_16(suffixChain); } else m_endOfSuffixChain[suffixChain] = m_ISA[m_endOfSuffixChain[suffixChain]] = suffixIndex; } inline void MSufSort::AddToSuffixChain(unsigned int firstSuffixIndex, unsigned int lastSuffixIndex, unsigned short suffixChain) { if (m_startOfSuffixChain[suffixChain] == END_OF_CHAIN) { m_startOfSuffixChain[suffixChain] = firstSuffixIndex; m_endOfSuffixChain[suffixChain] = lastSuffixIndex; m_newChainIds[m_numNewChains++] = ENDIAN_SWAP_16(suffixChain); } else { m_ISA[m_endOfSuffixChain[suffixChain]] = firstSuffixIndex; m_endOfSuffixChain[suffixChain] = lastSuffixIndex; } } inline void MSufSort::OnSortedSuffix(unsigned int suffixIndex) { // Event which is invoked with each sorted suffix at the time of // its sorting. m_numSortedSuffixes++; #ifdef SHOW_PROGRESS if (m_numSortedSuffixes >= m_nextProgressUpdate) { m_nextProgressUpdate += m_progressUpdateIncrement; ShowProgress(); } #endif } #ifdef SORT_16_BIT_SYMBOLS inline void MSufSort::MarkSuffixAsSorted(unsigned int suffixIndex, unsigned int & sortedIndex) { // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. if (m_tandemRepeatDepth) { // we are processing a list of suffixes which we the second to last in tandem repeats // that were not terminated via induction. These suffixes are not actually to be // marked as sorted yet. Instead, they are to be linked together in sorted order. if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = suffixIndex; else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = suffixIndex); return; } m_ISA[suffixIndex] = (sortedIndex++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif #ifdef USE_ENHANCED_INDUCTION_SORTING if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { suffixIndex--; unsigned short symbol = Value16(suffixIndex); m_ISA[suffixIndex] = (m_firstSortedPosition[symbol]++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { suffixIndex--; symbol = ENDIAN_SWAP_16(symbol); if (m_firstSuffixByEnhancedInductionSort[symbol] == END_OF_CHAIN) m_firstSuffixByEnhancedInductionSort[symbol] = m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; else { m_ISA[m_lastSuffixByEnhancedInductionSort[symbol]] = suffixIndex; m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; } } } #endif } inline void MSufSort::MarkSuffixAsSorted2(unsigned int suffixIndex, unsigned int & sortedIndex) { // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. if (m_tandemRepeatDepth) { // we are processing a list of suffixes which we the second to last in tandem repeats // that were not terminated via induction. These suffixes are not actually to be // marked as sorted yet. Instead, they are to be linked together in sorted order. if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = suffixIndex; else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = suffixIndex); return; } m_ISA[suffixIndex] = (sortedIndex++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif #ifdef USE_ENHANCED_INDUCTION_SORTING if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { unsigned short symbol = Value16(suffixIndex); symbol = ENDIAN_SWAP_16(symbol); suffixIndex--; if (m_firstSuffixByEnhancedInductionSort[symbol] == END_OF_CHAIN) m_firstSuffixByEnhancedInductionSort[symbol] = m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; else { m_ISA[m_lastSuffixByEnhancedInductionSort[symbol]] = suffixIndex; m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; } } #endif } #else inline void MSufSort::MarkSuffixAsSorted(unsigned int suffixIndex, unsigned int & sortedIndex) { // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. if (m_tandemRepeatDepth) { // we are processing a list of suffixes which we the second to last in tandem repeats // that were not terminated via induction. These suffixes are not actually to be // marked as sorted yet. Instead, they are to be linked together in sorted order. if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = suffixIndex; else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = suffixIndex); return; } m_ISA[suffixIndex] = (sortedIndex++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif #ifdef USE_ENHANCED_INDUCTION_SORTING if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { suffixIndex--; unsigned short symbol = Value16(suffixIndex); m_ISA[suffixIndex] = (m_firstSortedPosition[symbol]++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { suffixIndex--; unsigned short symbol2 = symbol; symbol = Value16(suffixIndex); m_ISA[suffixIndex] = (m_firstSortedPosition[symbol]++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { if (m_source[suffixIndex] < m_source[suffixIndex + 1]) symbol2 = ENDIAN_SWAP_16(symbol); else symbol2 = ENDIAN_SWAP_16(symbol2); suffixIndex--; if (m_firstSuffixByEnhancedInductionSort[symbol2] == END_OF_CHAIN) m_firstSuffixByEnhancedInductionSort[symbol2] = m_lastSuffixByEnhancedInductionSort[symbol2] = suffixIndex; else { m_ISA[m_lastSuffixByEnhancedInductionSort[symbol2]] = suffixIndex; m_lastSuffixByEnhancedInductionSort[symbol2] = suffixIndex; } } } } #endif } inline void MSufSort::MarkSuffixAsSorted2(unsigned int suffixIndex, unsigned int & sortedIndex) { // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. if (m_tandemRepeatDepth) { // we are processing a list of suffixes which we the second to last in tandem repeats // that were not terminated via induction. These suffixes are not actually to be // marked as sorted yet. Instead, they are to be linked together in sorted order. if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = suffixIndex; else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = suffixIndex); return; } m_ISA[suffixIndex] = (sortedIndex++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif #ifdef USE_ENHANCED_INDUCTION_SORTING if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { unsigned short symbol; if (m_source[suffixIndex] < m_source[suffixIndex + 1]) symbol = Value16(suffixIndex); else symbol = Value16(suffixIndex + 1); symbol = ENDIAN_SWAP_16(symbol); suffixIndex--; if (m_firstSuffixByEnhancedInductionSort[symbol] == END_OF_CHAIN) m_firstSuffixByEnhancedInductionSort[symbol] = m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; else { m_ISA[m_lastSuffixByEnhancedInductionSort[symbol]] = suffixIndex; m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; } } #endif } #endif inline void MSufSort::ProcessNextChain() { // Sorts the suffixes of a given chain by the next two symbols of // each suffix in the chain. This creates zero or more new suffix // chains with each sorted by two more symbols than the original // chain. Then pushes these new chains onto the chain stack for // further sorting. while (--m_chainCountStack.Top() < 0) { m_chainCountStack.Pop(); m_chainMatchLengthStack.Pop(); } m_suffixMatchLength = m_chainMatchLengthStack.Top(); m_currentSuffixIndex = m_chainHeadStack.Pop(); #ifdef USE_ENHANCED_INDUCTION_SORTING if (m_chainMatchLengthStack.Count() == 1) { // one of the original buckets from InitialSort(). This is important // when enhanced induction sorting is enabled. unsigned short chainId = Value16(m_currentSuffixIndex); unsigned short temp = chainId; chainId = ENDIAN_SWAP_16(chainId); while (m_currentSuffixChainId <= chainId) ProcessSuffixesSortedByEnhancedInduction(m_currentSuffixChainId++); m_nextSortedSuffixValue = m_firstSortedPosition[temp]; } #endif if (m_ISA[m_currentSuffixIndex] == END_OF_CHAIN) MarkSuffixAsSorted(m_currentSuffixIndex, m_nextSortedSuffixValue); // only one suffix in bucket so it is sorted. else { do { if (IsTandemRepeat()) PassTandemRepeat(); else if ((m_currentSuffixIndex != END_OF_CHAIN) && (IsSortedByInduction())) m_currentSuffixIndex = m_ISA[m_currentSuffixIndex]; else { unsigned int firstSuffixIndex = m_currentSuffixIndex; unsigned int lastSuffixIndex = m_currentSuffixIndex; unsigned short targetSymbol = Value16(m_currentSuffixIndex + m_suffixMatchLength); unsigned int nextSuffix; do { nextSuffix = m_ISA[lastSuffixIndex = m_currentSuffixIndex]; if ((m_currentSuffixIndex = nextSuffix) == END_OF_CHAIN) break; else if (IsTandemRepeat()) { PassTandemRepeat(); break; } else if (IsSortedByInduction()) { m_currentSuffixIndex = m_ISA[nextSuffix]; break; } } while (Value16(m_currentSuffixIndex + m_suffixMatchLength) == targetSymbol); AddToSuffixChain(firstSuffixIndex, lastSuffixIndex, targetSymbol); } } while (m_currentSuffixIndex != END_OF_CHAIN); ProcessSuffixesSortedByInduction(); PushNewChainsOntoStack(); } } inline void MSufSort::ProcessSuffixesSortedByInduction() { unsigned int numSuffixes = m_suffixesSortedByInduction.Count(); if (numSuffixes) { InductionSortObject * objects = m_suffixesSortedByInduction.m_stack; if (numSuffixes > 1) IntroSort(objects, numSuffixes); if (m_hasTandemRepeatSortedByInduction) { // During the last pass some suffixes which were sorted via induction were also // determined to be the terminal suffix in a tandem repeat. So when we mark // the suffixes as sorted (where were sorted via induction) we make chain together // the preceding suffix in the tandem repeat (if there is one). unsigned int firstTandemRepeatIndex = END_OF_CHAIN; unsigned int lastTandemRepeatIndex = END_OF_CHAIN; unsigned int tandemRepeatLength = m_suffixMatchLength - 1; m_hasTandemRepeatSortedByInduction = false; for (unsigned int i = 0; i < numSuffixes; i++) { unsigned int suffixIndex = (objects[i].m_sortValue[1] & 0x3fffffff); if ((suffixIndex >= tandemRepeatLength) && (m_ISA[suffixIndex - tandemRepeatLength] == suffixIndex)) { // this suffix was a terminating suffix in a tandem repeat. // add the preceding suffix in the tandem repeat to the list. if (firstTandemRepeatIndex == END_OF_CHAIN) firstTandemRepeatIndex = lastTandemRepeatIndex = (suffixIndex - tandemRepeatLength); else lastTandemRepeatIndex = (m_ISA[lastTandemRepeatIndex] = (suffixIndex - tandemRepeatLength)); } MarkSuffixAsSorted(suffixIndex, m_nextSortedSuffixValue); } // now process each suffix in the tandem repeat list making each as sorted. // build a new list for tandem repeats which preceded each in the list until there are // no preceding tandem suffix for any suffix in the list. while (firstTandemRepeatIndex != END_OF_CHAIN) { m_ISA[lastTandemRepeatIndex] = END_OF_CHAIN; unsigned int suffixIndex = firstTandemRepeatIndex; firstTandemRepeatIndex = END_OF_CHAIN; while (suffixIndex != END_OF_CHAIN) { if ((suffixIndex >= tandemRepeatLength) && (m_ISA[suffixIndex - tandemRepeatLength] == suffixIndex)) { // this suffix was a terminating suffix in a tandem repeat. // add the preceding suffix in the tandem repeat to the list. if (firstTandemRepeatIndex == END_OF_CHAIN) firstTandemRepeatIndex = lastTandemRepeatIndex = (suffixIndex - tandemRepeatLength); else lastTandemRepeatIndex = (m_ISA[lastTandemRepeatIndex] = (suffixIndex - tandemRepeatLength)); } unsigned int nextSuffix = m_ISA[suffixIndex]; MarkSuffixAsSorted(suffixIndex, m_nextSortedSuffixValue); suffixIndex = nextSuffix; } } // finished. } else { // This is the typical branch on the condition. There were no tandem repeats // encountered during the last chain that were terminated with a suffix that // was sorted via induction. In this case we just mark the suffixes as sorted // and we are done. for (unsigned int i = 0; i < numSuffixes; i++) MarkSuffixAsSorted(objects[i].m_sortValue[1] & 0x3fffffff, m_nextSortedSuffixValue); } m_suffixesSortedByInduction.Clear(); } } inline void MSufSort::ProcessSuffixesSortedByEnhancedInduction(unsigned short suffixId) { // if (m_firstSuffixByEnhancedInductionSort[suffixId] != END_OF_CHAIN) { unsigned int currentSuffixIndex = m_firstSuffixByEnhancedInductionSort[suffixId]; unsigned int lastSuffixIndex = m_lastSuffixByEnhancedInductionSort[suffixId]; m_firstSuffixByEnhancedInductionSort[suffixId] = END_OF_CHAIN; m_lastSuffixByEnhancedInductionSort[suffixId] = END_OF_CHAIN; do { unsigned short symbol = Value16(currentSuffixIndex); unsigned int nextIndex = m_ISA[currentSuffixIndex]; MarkSuffixAsSorted2(currentSuffixIndex, m_firstSortedPosition[symbol]); if (currentSuffixIndex == lastSuffixIndex) { if (m_firstSuffixByEnhancedInductionSort[suffixId] == END_OF_CHAIN) return; currentSuffixIndex = m_firstSuffixByEnhancedInductionSort[suffixId]; lastSuffixIndex = m_lastSuffixByEnhancedInductionSort[suffixId]; m_firstSuffixByEnhancedInductionSort[suffixId] = END_OF_CHAIN; m_lastSuffixByEnhancedInductionSort[suffixId] = END_OF_CHAIN; } else currentSuffixIndex = nextIndex; } while (true); } } #ifdef SHOW_PROGRESS inline void MSufSort::ShowProgress() { // Update the progress indicator. double p = ((double)(m_numSortedSuffixes & 0x3fffffff) / m_sourceLength) * 100; // printf("Progress: %.2f%% %c", p, 13); } #endif #endif kernlab/src/wkasailcp.h0000644000176000001440000000337712651720731014654 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/W_kasai_lcp.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef W_KASAI_LCP_H #define W_KASAI_LCP_H #include "datatype.h" #include "errorcode.h" #include "ilcpfactory.h" #include "lcp.h" /** * Kasai et al's LCP array computation algorithm is * is slightly faster than Manzini's algorithm. However, * it needs inverse suffix array which costs extra memory. */ class W_kasai_lcp : public I_LCPFactory { public: /// Constructor W_kasai_lcp(){} /// Desctructor virtual ~W_kasai_lcp(){} /// Compute LCP array. ErrorCode ComputeLCP(const SYMBOL *text, const UInt32 &len, const UInt32 *sa, LCP& lcp); }; #endif kernlab/src/stringkernel.cpp0000644000176000001440000003356512651720731015742 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/StringKernel.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 // 10 Aug 2006 // 11 Oct 2006 #ifndef STRINGKERNEL_CPP #define STRINGKERNEL_CPP #include #include #include #include #include #include #include #include #include "stringkernel.h" StringKernel::StringKernel(): esa(0), weigher(0), val(0), lvs(0) {} /** * Construct string kernel given constructed enhanced suffix array. * * \param esa_ - ESA instance. */ StringKernel::StringKernel(ESA *esa_, int weightfn, Real param, int verb): esa(esa_), val(new Real[esa_->size + 1]), lvs(0), _verb(verb) { switch (weightfn) { case CONSTANT: weigher = new ConstantWeight(); break; case EXPDECAY: weigher = new ExpDecayWeight(param); break; case KSPECTRUM: weigher = new KSpectrumWeight(param); break; case BOUNDRANGE: weigher = new BoundedRangeWeight(param); break; default: int nothing = 0; } } /** * Construct string kernel when given only text and its length. * * \param text - (IN) The text which SuffixArray and StringKernel correspond to. * \param text_length - (IN) The length of #_text#. * \param verb - (IN) Verbosity level. */ StringKernel::StringKernel(const UInt32 &size, SYMBOL *text, int weightfn, Real param, int verb): lvs(0), _verb(verb) { // Build ESA. esa = new ESA(size, text, verb); // Allocate memory space for #val# val = new Real[esa->size + 1]; // Instantiate weigher. switch (weightfn) { case CONSTANT: weigher = new ConstantWeight(); break; case EXPDECAY: weigher = new ExpDecayWeight(param); break; case KSPECTRUM: weigher = new KSpectrumWeight(param); break; case BOUNDRANGE: weigher = new BoundedRangeWeight(param); break; default: int nothing = 0; } } /** * StringKernel destructor. * */ StringKernel::~StringKernel() { //' Delete objects and release allocated memory space. if (esa) { delete esa; esa = 0; } if (val) { delete [] val; val = 0; } if (lvs) { delete [] lvs; lvs = 0; } if (weigher) { delete weigher; weigher = 0; } } /** * An Iterative auxiliary function used in PrecomputeVal(). * * Note: Every lcp-interval can be represented by its first l-index. * Hence, 'val' is stored in val[] at the index := first l-index. * * Pre: val[] is initialised to 0. * * @param left Left bound of current interval * @param right Right bound of current interval */ void StringKernel::IterativeCompute(const UInt32 &left, const UInt32 &right) { //std::cout << "In IterativeCompute() " << std::endl; //' Variables queue > q; vector > childlist; pair p; UInt32 lb = 0; UInt32 rb = 0; UInt32 floor_len = 0; UInt32 x_len = 0; Real cur_val = 0.0; Real edge_weight = 0.0; //' Step 1: At root, 0-[0..size-1]. Store all non-single child-intervals onto #q#. lb = left; //' Should be equal to 0. rb = right; //' Should be equal to size-1. esa->GetChildIntervals(lb, rb, childlist); for (UInt32 jj = 0; jj < childlist.size(); jj++) q.push(childlist[jj]); //' Step 2: Do breadth-first traversal. For every interval, compute val and add //' it to all its non-singleton child-intervals' val-entries in val[]. //' Start with child-interval [i..j] of 0-[0..size-1]. //' assert(j != size-1) while (!q.empty()) { //' Step 2.1: Get an interval from queue, #q#. p = q.front(); q.pop(); //' step 2.2: Get the lcp of floor interval. UInt32 a = 0, b = 0; a = esa->lcptab[p.first]; //svnvish: BUGBUG // Glorious hack. We have to remove it later. // This gives the lcp of parent interval if (p.second < esa->size - 1) { b = esa->lcptab[p.second + 1]; } else { b = 0; } floor_len = (a > b) ? a : b; //' Step 2.3: Get the lcp of current interval. esa->GetLcp(p.first, p.second, x_len); //' Step 2.4: Compute val of current interval. weigher->ComputeWeight(floor_len, x_len, edge_weight); cur_val = edge_weight * (lvs[p.second + 1] - lvs[p.first]); //' Step 2.5: Add #cur_val# to val[]. UInt32 firstlIndex1 = 0; esa->childtab.l_idx(p.first, p.second, firstlIndex1); val[firstlIndex1] += cur_val; // std::cout << "p.first:"<GetChildIntervals(p.first, p.second, childlist); //' Step 2.7: (a) Add #cur_val# to child-intervals' val-entries in val[]. //' (b) Push child-interval onto #q#. for (UInt32 kk = 0; kk < childlist.size(); kk++) { //' (a) UInt32 firstlIndex2 = 0; pair tmp_p = childlist[kk]; if (esa->text[esa->suftab[tmp_p.first]] == SENTINEL) continue; esa->childtab.l_idx(tmp_p.first, tmp_p.second, firstlIndex2); // assert( val[firstlIndex2] == 0 ); val[firstlIndex2] = val[firstlIndex1]; // cur_val; //' (b) q.push(make_pair(tmp_p.first, tmp_p.second)); } } //std::cout << "Out IterativeCompute() " << std::endl; } /** * Precomputation of val(t) of string kernel. * Observation :Every internal node of a suffix tree can be represented by at * least one index of the corresponding lcp array. So, the val * of a node is stored in val[] at the index corresponding to that of * the fist representative lcp value in lcp[]. */ void StringKernel::PrecomputeVal() { //' Memory space requirement check. assert(val != 0); //' Initialise all val entries to zero! memset(val, 0, sizeof(Real)*esa->size + 1); //' Start iterative precomputation of val[] IterativeCompute(0, esa->size - 1); } /** * Compute k(text,x) by performing Chang and Lawler's matching statistics collection * algorithm on the enhanced suffix array. * * \param x - (IN) The input string which is to be evaluated together with * the text in esa. * \param x_len - (IN) The length of #x#. * \param value - (IN) The value of k(x,x'). */ void StringKernel::Compute_K(SYMBOL *x, const UInt32 &x_len, Real &value) { //' Variables UInt32 floor_i = 0; UInt32 floor_j = 0; UInt32 i = 0; UInt32 j = 0; UInt32 lb = 0; UInt32 rb = 0; UInt32 matched_len = 0; UInt32 offset = 0; UInt32 floor_len = 0; UInt32 firstlIndex = 0; Real edge_weight = 0.0; //' Initialisation value = 0.0; lb = 0; rb = esa->size - 1; //' for each suffix, xprime[k..xprime_len-1], find longest match in text for (UInt32 k = 0; k < x_len; k++) { //' Step 1: Matching esa->ExactSuffixMatch(lb, rb, offset, &x[k], x_len - k, i, j, matched_len, floor_i, floor_j, floor_len); //' Step 2: Get suffix link for [floor_i..floor_j] esa->GetSuflink(floor_i, floor_j, lb, rb); assert((floor_j - floor_i) <= (rb - lb)); //' Range check //' Step 3: Compute contribution of this matched substring esa->childtab.l_idx(floor_i, floor_j, firstlIndex); assert(firstlIndex > floor_i && firstlIndex <= floor_j); assert(floor_len <= matched_len); weigher->ComputeWeight(floor_len, matched_len, edge_weight); value += val[firstlIndex] + edge_weight * (lvs[j + 1] - lvs[i]); // std::cout << "i:"<size); //' Allocate memory space for lvs[] lvs = new (nothrow) Real[esa->size + 1]; assert(lvs); //' Assign leaf weight to lvs element according to its position in text. for (UInt32 j = 0; j < esa->size; j++) { pos = esa->suftab[j]; UInt32 *p = upper_bound(clen, clen + m, pos); //' O(log n) lvs[j + 1] = leafWeight[p - clen]; } //' Compute cumulative lvs[]. To be used in matching statistics computation later. lvs[0] = 0.0; partial_sum(lvs, lvs + esa->size + 1, lvs); //chteo: [101006] delete [] clen; clen = 0; } /** * Set lvs[i] = i, for i = 0 to esa->size * Memory space for lvs[] will be allocated. */ void StringKernel::Set_Lvs() { //' Clean up previous lvs, if any. if (lvs) { delete lvs; lvs = 0; } //' Allocate memory space for lvs[] lvs = new (nothrow) Real[esa->size + 1]; //' Check if memory correctly allocated. assert(lvs != 0); //' Range := [0..esa->size] UInt32 localsize = esa->size; for (UInt32 i = 0; i <= localsize; i++) lvs[i] = i; } #endif #include #include #include extern "C" { SEXP stringtv(SEXP rtext, // text document SEXP ltext, // list or vector of text documents to compute kvalues against SEXP nltext, // number of text documents in ltext SEXP vnchar, // number of characters in text SEXP vnlchar, // characters per document in ltext SEXP stype, // type of kernel SEXP param) // parameter for kernel { // R interface for text and list of text computation. Should return a vector of computed kernel values. // Construct ESASK UInt32 text_size = *INTEGER(vnchar); int number_ltext = *INTEGER(nltext); int *ltext_size = (int *) malloc (sizeof(int) * number_ltext); memcpy(ltext_size, INTEGER(vnlchar), number_ltext*sizeof(int)); int weightfn = *INTEGER(stype); const char *text = CHAR(STRING_ELT(rtext,0)); Real kparam = *REAL(param); double kVal; SEXP alpha; PROTECT(alpha = allocVector(REALSXP, number_ltext)); // Check if stringlength reported from R is correct if(strlen(text)!= text_size) text_size= strlen(text); StringKernel sk(text_size, (SYMBOL*)text, (weightfn - 1), kparam, 0); sk.Set_Lvs(); sk.PrecomputeVal(); for (int i=0; i * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ChildTable.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef CTABLE_H #define CTABLE_H #include #include #include "datatype.h" #include "errorcode.h" #include "lcp.h" // using namespace std; /** * ChildTable represents the parent-child relationship between * the lcp-intervals of suffix array. * Reference: AboKurOhl04 */ class ChildTable : public std::vector { private: // childtab needs lcptab to differentiate between up, down, and // nextlIndex values. LCP& _lcptab; public: // Constructors ChildTable(const UInt32 &size, LCP& lcptab): std::vector(size), _lcptab(lcptab){} // Destructor virtual ~ChildTable() {} // Get first l-index of an l-[i..j] interval ErrorCode l_idx(const UInt32 &i, const UInt32 &j, UInt32 &idx); // .up field ErrorCode up(const UInt32 &idx, UInt32 &val); // .down field ErrorCode down(const UInt32 &idx, UInt32 &val); // .next field can be retrieved by accessing the array directly. friend std::ostream& operator << (std::ostream& os, const ChildTable& ct); }; #endif kernlab/src/brweight.h0000644000176000001440000000325412651720731014503 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/BoundedRangeWeight.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef BRWEIGHT_H #define BRWEIGHT_H #include "datatype.h" #include "errorcode.h" #include "iweightfactory.h" #include //' Bounded Range weight class class BoundedRangeWeight : public I_WeightFactory { Real n; public: /// Constructor BoundedRangeWeight(const Real &n_=1): n(n_){} /// Destructor virtual ~BoundedRangeWeight(){} /// Compute weight ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight); }; #endif kernlab/src/isafactory.h0000644000176000001440000000306412651720731015033 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/I_SAFactory.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 //' Interface for Enhanced Suffix Array construction algorithms #ifndef I_SAFACTORY_H #define I_SAFACTORY_H #include "datatype.h" #include "errorcode.h" class I_SAFactory { public: ///Constructor I_SAFactory(){} ///Destructor virtual ~I_SAFactory(){} ///Methods virtual ErrorCode ConstructSA(SYMBOL *text, const UInt32 &len, UInt32 *&array) = 0; }; #endif kernlab/src/inductionsort.cpp0000644000176000001440000000264612651720731016133 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #include "inductionsort.h" InductionSortObject::InductionSortObject(unsigned int inductionPosition, unsigned int inductionValue, unsigned int suffixIndex) { // sort value is 64 bits long. // bits are ... // 63 - 60: induction position (0 - 15) // 59 - 29: induction value at induction position (0 - (2^30 -1)) // 28 - 0: suffix index for the suffix sorted by induction (0 - (2^30) - 1) m_sortValue[0] = inductionPosition << 28; m_sortValue[0] |= ((inductionValue & 0x3fffffff) >> 2); m_sortValue[1] = (inductionValue << 30); m_sortValue[1] |= suffixIndex; } kernlab/src/lcp.cpp0000644000176000001440000001264312651720731014003 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/LCP.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef LCP_CPP #define LCP_CPP #include "lcp.h" // Threshold for compacting LCP[] const Real THRESHOLD = 0.3; LCP::LCP(const UInt32 &size): _p_array(0), _idx_array(0), _val_array(0), _size(size), _is_compact(false), _beg(0), _end(0), _cache(0), _dist(0), array(new UInt32[size]){ } LCP::~LCP() { if(array) {delete [] array; array = 0;} if(_p_array) {delete [] _p_array; _p_array = 0;} if(_idx_array) {delete [] _idx_array; _idx_array = 0;} if(_val_array) {delete [] _val_array; _val_array = 0;} } /** * Compact initial/original lcp array of n elements (i.e. 4n bytes) * into a n byte array with 8 bytes of secondary storage. * */ ErrorCode LCP::compact(void){ // Validate pre-conditions //assert(!array.empty() && array.size() == _size); assert(array); // Already compact. Nothing to do if (_is_compact) return NOERROR; // Count number of lcp-values >= 255. // UInt32 idx_len = std::count_if(array.begin(), array.end(), // std::bind2nd(std::greater(),254)); #ifdef _RWSTD_NO_CLASS_PARTIAL_SPEC UInt32 idx_len = 0; std::count_if(array, array + _size, std::bind2nd(std::greater(),254), idx_len); #else UInt32 idx_len = std::count_if(array, array + _size, std::bind2nd(std::greater(),254)); #endif // Compact iff idx_len/|array| > THRESHOLD if((Real)idx_len/_size > THRESHOLD) { //std::cout<< "Not compacting " << std::endl; return NOERROR; } // std::cout<< "Compacting with : " << idx_len << std::endl; // We know how much space to use // _p_array.resize(_size); // _idx_array.resize(idx_len); // _val_array.resize(idx_len); _p_array = new Byte1[_size]; _idx_array = new UInt32[idx_len]; _val_array = new UInt32[idx_len]; // Hold pointers for later. Avoids function calls // _beg = _idx_array.begin(); // _end = _idx_array.end(); // _cache = _idx_array.begin(); _beg = _idx_array; _end = _idx_array + idx_len; _cache = _idx_array; _dist = 0; for(UInt32 i=0, j=0; i<_size; i++) { if(array[i] < 255){ _p_array[i] = array[i]; }else { _p_array[i] = 255; _idx_array[j] = i; _val_array[j] = array[i]; j++; } } //array.resize(0); // array.clear(); delete [] array; array = 0; _is_compact = true; return NOERROR; } /** * Retrieve lcp array values. * * \param idx - (IN) Index of lcp array */ UInt32 LCP::operator [] (const UInt32 &idx) { // input is valid? // assert (idx >= 0 && idx < _size); if(!_is_compact){ // LCP array has not been compacted yet! return array[idx]; } if(_p_array[idx] < 255){ // Found in primary index return (UInt32) _p_array[idx]; } // svnvish: BUGBUG // Do some caching here. // // Now search in secondary index as last resort // std::pair< const_itr, const_itr > p = equal_range(_beg, _end, idx); // return _val_array[std::distance(_beg, p.first)]; if (++_cache == _end){ _cache = _beg; _dist = 0; }else{ _dist++; } UInt32 c_idx = *(_cache); if (c_idx == idx){ return _val_array[_dist]; } // _cache = std::equal_range(_beg, _end, idx).first; _cache = std::lower_bound(_beg, _end, idx); #ifdef _RWSTD_NO_CLASS_PARTIAL_SPEC _dist = 0; std::distance(_beg, _cache, _dist); #else _dist = std::distance(_beg, _cache); #endif //std::cout << "here" << std::endl; // _cache = equal_range(_beg, _end, idx).first; // _dist = std::distance(_beg, _cache); return _val_array[_dist]; // if (c_idx > idx){ // _cache = equal_range(_beg, _cache, idx).first; // }else{ // _cache = equal_range(_cache, _end, idx).first; // } // //_cache = p.first; // _dist = std::distance(_beg, _cache); // return _val_array[_dist]; } /** * Dump array elements to output stream. * * \param os - (IN) Output stream * \param lcp - (IN) LCP object. */ std::ostream& operator << (std::ostream& os, LCP& lcp){ for( UInt32 i = 0; i < lcp._size; i++ ){ os << "lcp[ " << i << "]: " << lcp[i] << std::endl; } return os; } #endif kernlab/src/wmsufsort.h0000644000176000001440000000347512651720731014746 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/W_msufsort.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 13 Jul 2007 : use MSufSort v3.1 instead of v2.2 // Wrapper for Michael Maniscalco's MSufSort version 3.1 algorithm #ifndef W_MSUFSORT_H #define W_MSUFSORT_H #include "datatype.h" #include "isafactory.h" #include "msufsort.h" class W_msufsort : public I_SAFactory { public: ///Variables //'Declaration of object POINTERS, no initialization needed. //'If Declaration of objects, initialize them in member initialization list. MSufSort *msuffixsorter; ///Constructor W_msufsort(); ///Destructor virtual ~W_msufsort(); ///Methods ErrorCode ConstructSA(SYMBOL *text, const UInt32 &len, UInt32 *&array); }; #endif kernlab/src/svm.cpp0000644000176000001440000025315612651720731014040 0ustar ripleyusers#include #include #include #include #include #include #include #include #include #include #include "svm.h" typedef float Qfloat; typedef signed char schar; #ifndef min template inline T min(T x,T y) { return (x inline T max(T x,T y) { return (x>y)?x:y; } #endif template inline void swap(T& x, T& y) { T t=x; x=y; y=t; } template inline void clone(T*& dst, S* src, int n) { dst = new T[n]; memcpy((void *)dst,(void *)src,sizeof(T)*n); } inline double powi(double base, int times) { double tmp = base, ret = 1.0; for(int t=times; t>0; t/=2) { if(t%2==1) ret*=tmp; tmp = tmp * tmp; } return ret; } #define INF HUGE_VAL # define TAU 1e-12 #define Malloc(type,n) (type *)malloc((n)*sizeof(type)) #if 0 void info(char *fmt,...) { va_list ap; va_start(ap,fmt); //vprintf(fmt,ap); va_end(ap); } void info_flush() { fflush(stdout); } #else void info(char *fmt,...) {} void info_flush() {} #endif // // Kernel Cache // // l is the number of total data items // size is the cache size limit in bytes // class Cache { public: Cache(int l,long int size, int qpsize); ~Cache(); // request data [0,len) // return some position p where [p,len) need to be filled // (p >= len if nothing needs to be filled) int get_data(const int index, Qfloat **data, int len); void swap_index(int i, int j); // future_option private: int l; long int size; struct head_t { head_t *prev, *next; // a cicular list Qfloat *data; int len; // data[0,len) is cached in this entry }; head_t *head; head_t lru_head; void lru_delete(head_t *h); void lru_insert(head_t *h); }; Cache::Cache(int l_,long int size_,int qpsize):l(l_),size(size_) { head = (head_t *)calloc(l,sizeof(head_t)); // initialized to 0 size /= sizeof(Qfloat); size -= l * sizeof(head_t) / sizeof(Qfloat); size = max(size, (long int) qpsize*l); // cache must be large enough for 'qpsize' columns lru_head.next = lru_head.prev = &lru_head; } Cache::~Cache() { for(head_t *h = lru_head.next; h != &lru_head; h=h->next) free(h->data); free(head); } void Cache::lru_delete(head_t *h) { // delete from current location h->prev->next = h->next; h->next->prev = h->prev; } void Cache::lru_insert(head_t *h) { // insert to last position h->next = &lru_head; h->prev = lru_head.prev; h->prev->next = h; h->next->prev = h; } int Cache::get_data(const int index, Qfloat **data, int len) { head_t *h = &head[index]; if(h->len) lru_delete(h); int more = len - h->len; if(more > 0) { // free old space while(size < more) { head_t *old = lru_head.next; lru_delete(old); free(old->data); size += old->len; old->data = 0; old->len = 0; } // allocate new space h->data = (Qfloat *)realloc(h->data,sizeof(Qfloat)*len); size -= more; swap(h->len,len); } lru_insert(h); *data = h->data; return len; } void Cache::swap_index(int i, int j) { if(i==j) return; if(head[i].len) lru_delete(&head[i]); if(head[j].len) lru_delete(&head[j]); swap(head[i].data,head[j].data); swap(head[i].len,head[j].len); if(head[i].len) lru_insert(&head[i]); if(head[j].len) lru_insert(&head[j]); if(i>j) swap(i,j); for(head_t *h = lru_head.next; h!=&lru_head; h=h->next) { if(h->len > i) { if(h->len > j) swap(h->data[i],h->data[j]); else { // give up lru_delete(h); free(h->data); size += h->len; h->data = 0; h->len = 0; } } } } // // Kernel evaluation // // the static method k_function is for doing single kernel evaluation // the constructor of Kernel prepares to calculate the l*l kernel matrix // the member function get_Q is for getting one column from the Q Matrix // class QMatrix { public: virtual Qfloat *get_Q(int column, int len) const = 0; virtual double *get_QD() const = 0; virtual void swap_index(int i, int j) const = 0; virtual ~QMatrix() {} }; class Kernel: public QMatrix{ public: Kernel(int l, svm_node * const * x, const svm_parameter& param); virtual ~Kernel(); static double k_function(const svm_node *x, const svm_node *y, const svm_parameter& param); virtual Qfloat *get_Q(int column, int len) const = 0; virtual double *get_QD() const = 0; virtual void swap_index(int i, int j) const // no so const... { swap(x[i],x[j]); if(x_square) swap(x_square[i],x_square[j]); } protected: double (Kernel::*kernel_function)(int i, int j) const; private: const svm_node **x; double *x_square; // svm_parameter const int kernel_type; const int degree; const double gamma; const double coef0; const double lim; const double *K; const int m; static double dot(const svm_node *px, const svm_node *py); static double anova(const svm_node *px, const svm_node *py, const double sigma, const int degree); double kernel_linear(int i, int j) const { return dot(x[i],x[j]); } double kernel_poly(int i, int j) const { return powi(gamma*dot(x[i],x[j])+coef0,degree); } double kernel_rbf(int i, int j) const { return exp(-gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j]))); } double kernel_sigmoid(int i, int j) const { return tanh(gamma*dot(x[i],x[j])+coef0); } double kernel_laplace(int i, int j) const { return exp(-gamma*sqrt(fabs(x_square[i]+x_square[j]-2*dot(x[i],x[j])))); } double kernel_bessel(int i, int j) const { double bkt = gamma*sqrt(fabs(x_square[i]+x_square[j]-2*dot(x[i],x[j]))); if (bkt < 0.000001){ return 1 ; } else { return(powi(((jn((int)degree, bkt)/powi(bkt,((int)degree)))/lim),(int) coef0)); } } double kernel_anova(int i, int j) const { return anova(x[i], x[j], gamma, degree); } double kernel_spline(int i, int j) const { double result=1.0; double min; double t1,t4; const svm_node *px = x[i], *py= x[j]; // px = x[i]; // py = x[j]; while(px->index != -1 && py->index != -1) { if(px->index == py->index) { min=((px->valuevalue)?px->value:py->value); t1 = (px->value * py->value); t4 = min*min; result*=( 1.0 + t1 + (t1*min) ) - ( ((px->value+py->value)/2.0) * t4) + ((t4 * min)/3.0); } ++px; ++py; } return result; } double kernel_R(int i, int j) const { return *(K + m*i +j); } }; Kernel::Kernel(int l, svm_node * const * x_, const svm_parameter& param) :kernel_type(param.kernel_type), degree(param.degree), gamma(param.gamma), coef0(param.coef0), lim(param.lim), K(param.K), m(param.m) { switch(kernel_type) { case LINEAR: kernel_function = &Kernel::kernel_linear; break; case POLY: kernel_function = &Kernel::kernel_poly; break; case RBF: kernel_function = &Kernel::kernel_rbf; break; case SIGMOID: kernel_function = &Kernel::kernel_sigmoid; break; case LAPLACE: kernel_function = &Kernel::kernel_laplace; break; case BESSEL: kernel_function = &Kernel::kernel_bessel; break; case ANOVA: kernel_function = &Kernel::kernel_anova; break; case SPLINE: kernel_function = &Kernel::kernel_spline; break; case R: kernel_function = &Kernel::kernel_R; break; } clone(x,x_,l); if(kernel_type == RBF || kernel_type == LAPLACE || kernel_type == BESSEL) { x_square = new double[l]; for(int i=0;iindex != -1 && py->index != -1) { if(px->index == py->index) { sum += px->value * py->value; ++px; ++py; } else { if(px->index > py->index) ++py; else ++px; } } return sum; } double Kernel::anova(const svm_node *px, const svm_node *py, const double sigma, const int degree) { double sum = 0; double tv; while(px->index != -1 && py->index != -1) { if(px->index == py->index) { tv = (px->value - py->value) * (px->value - py->value); sum += exp( - sigma * tv); ++px; ++py; } else { if(px->index > py->index) { sum += exp( - sigma * (py->value * py->value)); ++py; } else { sum += exp( - sigma * (px->value * px->value)); ++px; } } } return (powi(sum,degree)); } double Kernel::k_function(const svm_node *x, const svm_node *y, const svm_parameter& param) { switch(param.kernel_type) { case LINEAR: return dot(x,y); case POLY: return powi(param.gamma*dot(x,y)+param.coef0,param.degree); case RBF: { double sum = 0; while(x->index != -1 && y->index !=-1) { if(x->index == y->index) { double d = x->value - y->value; sum += d*d; ++x; ++y; } else { if(x->index > y->index) { sum += y->value * y->value; ++y; } else { sum += x->value * x->value; ++x; } } } while(x->index != -1) { sum += x->value * x->value; ++x; } while(y->index != -1) { sum += y->value * y->value; ++y; } return exp(-param.gamma*sum); } case SIGMOID: return tanh(param.gamma*dot(x,y)+param.coef0); default: return 0; /* Unreachable */ } } // Generalized SMO+SVMlight algorithm // Solves: // // min 0.5(\alpha^T Q \alpha) + p^T \alpha // // y^T \alpha = \delta // y_i = +1 or -1 // 0 <= alpha_i <= Cp for y_i = 1 // 0 <= alpha_i <= Cn for y_i = -1 // // Given: // // Q, p, y, Cp, Cn, and an initial feasible point \alpha // l is the size of vectors and matrices // eps is the stopping criterion // // solution will be put in \alpha, objective value will be put in obj // class Solver { public: Solver() {}; virtual ~Solver() {}; struct SolutionInfo { double obj; double rho; double upper_bound_p; double upper_bound_n; double r; // for Solver_NU }; void Solve(int l, const QMatrix& Q, const double *p_, const schar *y_, double *alpha_, double Cp, double Cn, double eps, SolutionInfo* si, int shrinking); protected: int active_size; schar *y; double *G; // gradient of objective function enum { LOWER_BOUND, UPPER_BOUND, FREE }; char *alpha_status; // LOWER_BOUND, UPPER_BOUND, FREE double *alpha; const QMatrix *Q; const double *QD; double eps; double Cp,Cn; double *p; int *active_set; double *G_bar; // gradient, if we treat free variables as 0 int l; bool unshrink; // XXX double get_C(int i) { return (y[i] > 0)? Cp : Cn; } void update_alpha_status(int i) { if(alpha[i] >= get_C(i)) alpha_status[i] = UPPER_BOUND; else if(alpha[i] <= 0) alpha_status[i] = LOWER_BOUND; else alpha_status[i] = FREE; } bool is_upper_bound(int i) { return alpha_status[i] == UPPER_BOUND; } bool is_lower_bound(int i) { return alpha_status[i] == LOWER_BOUND; } bool is_free(int i) { return alpha_status[i] == FREE; } void swap_index(int i, int j); void reconstruct_gradient(); virtual int select_working_set(int &i, int &j); virtual double calculate_rho(); virtual void do_shrinking(); private: bool be_shrunk(int i, double Gmax1, double Gmax2); }; void Solver::swap_index(int i, int j) { Q->swap_index(i,j); swap(y[i],y[j]); swap(G[i],G[j]); swap(alpha_status[i],alpha_status[j]); swap(alpha[i],alpha[j]); swap(p[i],p[j]); swap(active_set[i],active_set[j]); swap(G_bar[i],G_bar[j]); } void Solver::reconstruct_gradient() { // reconstruct inactive elements of G from G_bar and free variables if(active_size == l) return; int i,j; int nr_free = 0; for(j=active_size;j 2*active_size*(l-active_size)) { for(i=active_size;iget_Q(i,active_size); for(j=0;jget_Q(i,l); double alpha_i = alpha[i]; for(j=active_size;jl = l; this->Q = &Q; QD=Q.get_QD(); clone(p, p_,l); clone(y, y_,l); clone(alpha,alpha_,l); this->Cp = Cp; this->Cn = Cn; this->eps = eps; unshrink = false; // initialize alpha_status { alpha_status = new char[l]; for(int i=0;iINT_MAX/100 ? INT_MAX : 100*l); int counter = min(l,1000)+1; while(iter < max_iter) { // show progress and do shrinking if(--counter == 0) { counter = min(l,1000); if(shrinking) do_shrinking(); } int i,j; if(select_working_set(i,j)!=0) { // reconstruct the whole gradient reconstruct_gradient(); // reset active set size and check active_size = l; if(select_working_set(i,j)!=0) break; else counter = 1; // do shrinking next iteration } ++iter; // update alpha[i] and alpha[j], handle bounds carefully const Qfloat *Q_i = Q.get_Q(i,active_size); const Qfloat *Q_j = Q.get_Q(j,active_size); double C_i = get_C(i); double C_j = get_C(j); double old_alpha_i = alpha[i]; double old_alpha_j = alpha[j]; if(y[i]!=y[j]) { double quad_coef = QD[i]+QD[j]+2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; double delta = (-G[i]-G[j])/quad_coef; double diff = alpha[i] - alpha[j]; alpha[i] += delta; alpha[j] += delta; if(diff > 0) { if(alpha[j] < 0) { alpha[j] = 0; alpha[i] = diff; } } else { if(alpha[i] < 0) { alpha[i] = 0; alpha[j] = -diff; } } if(diff > C_i - C_j) { if(alpha[i] > C_i) { alpha[i] = C_i; alpha[j] = C_i - diff; } } else { if(alpha[j] > C_j) { alpha[j] = C_j; alpha[i] = C_j + diff; } } } else { double quad_coef = QD[i]+QD[j]-2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; double delta = (G[i]-G[j])/quad_coef; double sum = alpha[i] + alpha[j]; alpha[i] -= delta; alpha[j] += delta; if(sum > C_i) { if(alpha[i] > C_i) { alpha[i] = C_i; alpha[j] = sum - C_i; } } else { if(alpha[j] < 0) { alpha[j] = 0; alpha[i] = sum; } } if(sum > C_j) { if(alpha[j] > C_j) { alpha[j] = C_j; alpha[i] = sum - C_j; } } else { if(alpha[i] < 0) { alpha[i] = 0; alpha[j] = sum; } } } // update G double delta_alpha_i = alpha[i] - old_alpha_i; double delta_alpha_j = alpha[j] - old_alpha_j; for(int k=0;k= max_iter) { if(active_size < l) { // reconstruct the whole gradient to calculate objective value reconstruct_gradient(); active_size = l; } } // calculate rho si->rho = calculate_rho(); // calculate objective value { double v = 0; int i; for(i=0;iobj = v/2; } // put back the solution { for(int i=0;iupper_bound_p = Cp; si->upper_bound_n = Cn; delete[] p; delete[] y; delete[] alpha; delete[] alpha_status; delete[] active_set; delete[] G; delete[] G_bar; } // return 1 if already optimal, return 0 otherwise int Solver::select_working_set(int &out_i, int &out_j) { // return i,j such that // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha) // j: minimizes the decrease of obj value // (if quadratic coefficeint <= 0, replace it with tau) // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha) double Gmax = -INF; double Gmax2 = -INF; int Gmax_idx = -1; int Gmin_idx = -1; double obj_diff_min = INF; for(int t=0;t= Gmax) { Gmax = -G[t]; Gmax_idx = t; } } else { if(!is_lower_bound(t)) if(G[t] >= Gmax) { Gmax = G[t]; Gmax_idx = t; } } int i = Gmax_idx; const Qfloat *Q_i = NULL; if(i != -1) // NULL Q_i not accessed: Gmax=-INF if i=-1 Q_i = Q->get_Q(i,active_size); for(int j=0;j= Gmax2) Gmax2 = G[j]; if (grad_diff > 0) { double obj_diff; double quad_coef = QD[i]+QD[j]-2.0*y[i]*Q_i[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } else { if (!is_upper_bound(j)) { double grad_diff= Gmax-G[j]; if (-G[j] >= Gmax2) Gmax2 = -G[j]; if (grad_diff > 0) { double obj_diff; double quad_coef = QD[i]+QD[j]+2.0*y[i]*Q_i[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } } if(Gmax+Gmax2 < eps) return 1; out_i = Gmax_idx; out_j = Gmin_idx; return 0; } bool Solver::be_shrunk(int i, double Gmax1, double Gmax2) { if(is_upper_bound(i)) { if(y[i]==+1) return(-G[i] > Gmax1); else return(-G[i] > Gmax2); } else if(is_lower_bound(i)) { if(y[i]==+1) return(G[i] > Gmax2); else return(G[i] > Gmax1); } else return(false); } void Solver::do_shrinking() { int i; double Gmax1 = -INF; // max { -y_i * grad(f)_i | i in I_up(\alpha) } double Gmax2 = -INF; // max { y_i * grad(f)_i | i in I_low(\alpha) } // find maximal violating pair first for(i=0;i= Gmax1) Gmax1 = -G[i]; } if(!is_lower_bound(i)) { if(G[i] >= Gmax2) Gmax2 = G[i]; } } else { if(!is_upper_bound(i)) { if(-G[i] >= Gmax2) Gmax2 = -G[i]; } if(!is_lower_bound(i)) { if(G[i] >= Gmax1) Gmax1 = G[i]; } } } if(unshrink == false && Gmax1 + Gmax2 <= eps*10) { unshrink = true; reconstruct_gradient(); active_size = l; } for(i=0;i i) { if (!be_shrunk(active_size, Gmax1, Gmax2)) { swap_index(i,active_size); break; } active_size--; } } } double Solver::calculate_rho() { double r; int nr_free = 0; double ub = INF, lb = -INF, sum_free = 0; for(int i=0;i0) r = sum_free/nr_free; else r = (ub+lb)/2; return r; } // // Solver for nu-svm classification and regression // // additional constraint: e^T \alpha = constant // class Solver_NU: public Solver { public: Solver_NU() {} void Solve(int l, const QMatrix& Q, const double *p, const schar *y, double *alpha, double Cp, double Cn, double eps, SolutionInfo* si, int shrinking) { this->si = si; Solver::Solve(l,Q,p,y,alpha,Cp,Cn,eps,si,shrinking); } private: SolutionInfo *si; int select_working_set(int &i, int &j); double calculate_rho(); bool be_shrunk(int i, double Gmax1, double Gmax2, double Gmax3, double Gmax4); void do_shrinking(); }; // return 1 if already optimal, return 0 otherwise int Solver_NU::select_working_set(int &out_i, int &out_j) { // return i,j such that y_i = y_j and // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha) // j: minimizes the decrease of obj value // (if quadratic coefficeint <= 0, replace it with tau) // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha) double Gmaxp = -INF; double Gmaxp2 = -INF; int Gmaxp_idx = -1; double Gmaxn = -INF; double Gmaxn2 = -INF; int Gmaxn_idx = -1; int Gmin_idx = -1; double obj_diff_min = INF; for(int t=0;t= Gmaxp) { Gmaxp = -G[t]; Gmaxp_idx = t; } } else { if(!is_lower_bound(t)) if(G[t] >= Gmaxn) { Gmaxn = G[t]; Gmaxn_idx = t; } } int ip = Gmaxp_idx; int in = Gmaxn_idx; const Qfloat *Q_ip = NULL; const Qfloat *Q_in = NULL; if(ip != -1) // NULL Q_ip not accessed: Gmaxp=-INF if ip=-1 Q_ip = Q->get_Q(ip,active_size); if(in != -1) Q_in = Q->get_Q(in,active_size); for(int j=0;j= Gmaxp2) Gmaxp2 = G[j]; if (grad_diff > 0) { double obj_diff; double quad_coef = QD[ip]+QD[j]-2*Q_ip[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } else { if (!is_upper_bound(j)) { double grad_diff=Gmaxn-G[j]; if (-G[j] >= Gmaxn2) Gmaxn2 = -G[j]; if (grad_diff > 0) { double obj_diff; double quad_coef = QD[in]+QD[j]-2*Q_in[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } } if(max(Gmaxp+Gmaxp2,Gmaxn+Gmaxn2) < eps) return 1; if (y[Gmin_idx] == +1) out_i = Gmaxp_idx; else out_i = Gmaxn_idx; out_j = Gmin_idx; return 0; } bool Solver_NU::be_shrunk(int i, double Gmax1, double Gmax2, double Gmax3, double Gmax4) { if(is_upper_bound(i)) { if(y[i]==+1) return(-G[i] > Gmax1); else return(-G[i] > Gmax4); } else if(is_lower_bound(i)) { if(y[i]==+1) return(G[i] > Gmax2); else return(G[i] > Gmax3); } else return(false); } void Solver_NU::do_shrinking() { double Gmax1 = -INF; // max { -y_i * grad(f)_i | y_i = +1, i in I_up(\alpha) } double Gmax2 = -INF; // max { y_i * grad(f)_i | y_i = +1, i in I_low(\alpha) } double Gmax3 = -INF; // max { -y_i * grad(f)_i | y_i = -1, i in I_up(\alpha) } double Gmax4 = -INF; // max { y_i * grad(f)_i | y_i = -1, i in I_low(\alpha) } // find maximal violating pair first int i; for(i=0;i Gmax1) Gmax1 = -G[i]; } else if(-G[i] > Gmax4) Gmax4 = -G[i]; } if(!is_lower_bound(i)) { if(y[i]==+1) { if(G[i] > Gmax2) Gmax2 = G[i]; } else if(G[i] > Gmax3) Gmax3 = G[i]; } } if(unshrink == false && max(Gmax1+Gmax2,Gmax3+Gmax4) <= eps*10) { unshrink = true; reconstruct_gradient(); active_size = l; } for(i=0;i i) { if (!be_shrunk(active_size, Gmax1, Gmax2, Gmax3, Gmax4)) { swap_index(i,active_size); break; } active_size--; } } } double Solver_NU::calculate_rho() { int nr_free1 = 0,nr_free2 = 0; double ub1 = INF, ub2 = INF; double lb1 = -INF, lb2 = -INF; double sum_free1 = 0, sum_free2 = 0; for(int i=0;i 0) r1 = sum_free1/nr_free1; else r1 = (ub1+lb1)/2; if(nr_free2 > 0) r2 = sum_free2/nr_free2; else r2 = (ub2+lb2)/2; si->r = (r1+r2)/2; return (r1-r2)/2; } /////////////////// BSVM code class Solver_SPOC { public: Solver_SPOC() {}; ~Solver_SPOC() {}; void Solve(int l, const Kernel& Q, double *alpha_, short *y_, double *C_, double eps, int shrinking, int nr_class); private: int active_size; double *G; // gradient of objective function short *y; bool *alpha_status; // free:true, bound:false double *alpha; const Kernel *Q; double eps; double *C; int *active_set; int l, nr_class; bool unshrinked; double get_C(int i, int m) { if (y[i] == m) return C[m]; return 0; } void update_alpha_status(int i, int m) { if(alpha[i*nr_class+m] >= get_C(i, m)) alpha_status[i*nr_class+m] = false; else alpha_status[i*nr_class+m] = true; } void swap_index(int i, int j); double select_working_set(int &q); void solve_sub_problem(double A, double *B, double C, double *nu); void reconstruct_gradient(); void do_shrinking(); }; void Solver_SPOC::swap_index(int i, int j) { Q->swap_index(i, j); swap(y[i], y[j]); swap(active_set[i], active_set[j]); for (int m=0;mget_Q(i,l); double alpha_i_m = alpha[i*nr_class+m]; for (int j=active_size;jl = l; this->nr_class = nr_class; this->Q = &Q; clone(y,y_,l); clone(alpha,alpha_,l*nr_class); C = C_; this->eps = eps; unshrinked = false; int i, m, q, old_q = -1; // initialize alpha_status { alpha_status = new bool[l*nr_class]; for(i=0;i 0) solve_sub_problem(A, B, C[y[q]], nu); else { i = 0; for (m=1;m B[i]) i = m; nu[i] = -C[y[q]]; } nu[y[q]] += C[y[q]]; for (m=0;m 1e-12) #endif { alpha[q*nr_class+m] = nu[m]; update_alpha_status(q, m); for (i=0;i 0) nSV++; } //info("\noptimization finished, #iter = %d, obj = %lf\n",iter, obj); // info("nSV = %d, nFREE = %d\n",nSV,nFREE); // put back the solution { for(int i=0;i vio_q) { q = i; vio_q = lb - ub; } } return vio_q; } void Solver_SPOC::do_shrinking() { int i, m; double Gm = select_working_set(i); if (Gm < eps) return; // shrink for (i=0;i= th) goto out; for (m++;m= th) goto out; --active_size; swap_index(i, active_size); --i; out: ; } // unshrink, check all variables again before final iterations if (unshrinked || Gm > 10*eps) return; unshrinked = true; reconstruct_gradient(); for (i=l-1;i>=active_size;i--) { double *G_i = &G[i*nr_class]; double th = G_i[y[i]] - Gm/2; for (m=0;m= th) goto out1; for (m++;m= th) goto out1; swap_index(i, active_size); ++active_size; ++i; out1: ; } } int compar(const void *a, const void *b) { if (*(double *)a > *(double *)b) return -1; else if (*(double *)a < *(double *)b) return 1; return 0; } void Solver_SPOC::solve_sub_problem(double A, double *B, double C, double *nu) { int r; double *D; clone(D, B, nr_class+1); qsort(D, nr_class, sizeof(double), compar); D[nr_class] = -INF; double phi = D[0] - A*C; for (r=0;phi<(r+1)*D[r+1];r++) phi += D[r+1]; delete[] D; phi /= (r+1); for (r=0;r 0)? Cp : Cn; } void update_alpha_status(int i) { if(alpha[i] >= get_C(i)) alpha_status[i] = UPPER_BOUND; else if(alpha[i] <= 0) alpha_status[i] = LOWER_BOUND; else alpha_status[i] = FREE; } bool is_upper_bound(int i) { return alpha_status[i] == UPPER_BOUND; } bool is_lower_bound(int i) { return alpha_status[i] == LOWER_BOUND; } bool is_free(int i) { return alpha_status[i] == FREE; } virtual void swap_index(int i, int j); virtual void reconstruct_gradient(); virtual void shrink_one(int k); virtual void unshrink_one(int k); double select_working_set(int &q); void do_shrinking(); private: double Cp, Cn; double *b; schar *y; }; void Solver_B::swap_index(int i, int j) { Q->swap_index(i,j); swap(y[i],y[j]); swap(G[i],G[j]); swap(alpha_status[i],alpha_status[j]); swap(alpha[i],alpha[j]); swap(b[i],b[j]); swap(active_set[i],active_set[j]); swap(G_bar[i],G_bar[j]); } void Solver_B::reconstruct_gradient() { // reconstruct inactive elements of G from G_bar and free variables if(active_size == l) return; int i; for(i=active_size;iget_Q(i,l); double alpha_i = alpha[i]; for(int j=active_size;jl = l; this->Q = &Q; b = b_; clone(y, y_, l); clone(alpha,alpha_,l); this->Cp = Cp; this->Cn = Cn; this->eps = eps; this->qpsize = qpsize; unshrinked = false; // initialize alpha_status { alpha_status = new char[l]; for(int i=0;i1e-12) { alpha[working_set[i]] = qp.x[i]; Qfloat *QB_i = QB[i]; for(j=0;jobj = v/2; } // juggle everything back /*{ for(int i=0;iupper_bound = new double[2]; si->upper_bound[0] = Cp; si->upper_bound[1] = Cn; // info("\noptimization finished, #iter = %d\n",iter); // put back the solution { for(int i=0;i= positive_max[j]) break; positive_max[j-1] = positive_max[j]; positive_set[j-1] = positive_set[j]; } positive_max[j-1] = v; positive_set[j-1] = i; } } for (i=0;i0) continue; } if (v > positive_max[0]) { for (j=1;j= -Gm) continue; } else continue; --active_size; shrink_one(k); --k; // look at the newcomer } // unshrink, check all variables again before final iterations if (unshrinked || Gm > eps*10) return; unshrinked = true; reconstruct_gradient(); for(k=l-1;k>=active_size;k--) { if (is_lower_bound(k)) { if (G[k] > Gm) continue; } else if (is_upper_bound(k)) { if (G[k] < -Gm) continue; } else continue; unshrink_one(k); active_size++; ++k; // look at the newcomer } } class Solver_B_linear : public Solver_B { public: Solver_B_linear() {}; ~Solver_B_linear() {}; int Solve(int l, svm_node * const * x_, double *b_, schar *y_, double *alpha_, double *w, double Cp, double Cn, double eps, SolutionInfo* si, int shrinking, int qpsize); private: double get_C(int i) { return (y[i] > 0)? Cp : Cn; } void swap_index(int i, int j); void reconstruct_gradient(); double dot(int i, int j); double Cp, Cn; double *b; schar *y; double *w; const svm_node **x; }; double Solver_B_linear::dot(int i, int j) { const svm_node *px = x[i], *py = x[j]; double sum = 0; while(px->index != -1 && py->index != -1) { if(px->index == py->index) { sum += px->value * py->value; ++px; ++py; } else { if(px->index > py->index) ++py; else ++px; } } return sum; } void Solver_B_linear::swap_index(int i, int j) { swap(y[i],y[j]); swap(G[i],G[j]); swap(alpha_status[i],alpha_status[j]); swap(alpha[i],alpha[j]); swap(b[i],b[j]); swap(active_set[i],active_set[j]); swap(x[i], x[j]); } void Solver_B_linear::reconstruct_gradient() { int i; for(i=active_size;iindex != -1;px++) sum += w[px->index]*px->value; sum += w[0]; G[i] = y[i]*sum + b[i]; } } int Solver_B_linear::Solve(int l, svm_node * const * x_, double *b_, schar *y_, double *alpha_, double *w, double Cp, double Cn, double eps, SolutionInfo* si, int shrinking, int qpsize) { this->l = l; clone(x, x_, l); clone(b, b_, l); clone(y, y_, l); clone(alpha,alpha_,l); this->Cp = Cp; this->Cn = Cn; this->eps = eps; this->qpsize = qpsize; this->w = w; unshrinked = false; // initialize alpha_status { alpha_status = new char[l]; for(int i=0;iindex != -1;px++) sum += w[px->index]*px->value; sum += w[0]; G[i] += y[i]*sum; } } // optimization step int iter = 0; int counter = min(l*2/qpsize,2000/qpsize)+1; while(1) { // show progress and do shrinking if(--counter == 0) { counter = min(l*2/qpsize, 2000/qpsize); if(shrinking) do_shrinking(); // info("."); } int i,j,q; if (select_working_set(q) < eps) { // reconstruct the whole gradient reconstruct_gradient(); // reset active set size and check active_size = l; // info("*");info_flush(); if (select_working_set(q) < eps) break; else counter = 1; // do shrinking next iteration } if (counter == min(l*2/qpsize, 2000/qpsize)) { bool same = true; for (i=0;i1e-12) { alpha[Bi] = qp.x[i]; update_alpha_status(Bi); double yalpha = y[Bi]*d; for (const svm_node *px = x[Bi];px->index != -1;px++) w[px->index] += yalpha*px->value; w[0] += yalpha; } } for(j=0;jindex != -1;px++) sum += w[px->index]*px->value; sum += w[0]; G[j] = y[j]*sum + b[j]; } } // calculate objective value { double v = 0; int i; for(i=0;iobj = v/2; } // juggle everything back /*{ for(int i=0;iupper_bound = new double[2]; si->upper_bound[0] = Cp; si->upper_bound[1] = Cn; // info("\noptimization finished, #iter = %d\n",iter); // put back the solution { for(int i=0;iget_Q(real_i[i],real_l); double alpha_i = alpha[i], t; int y_i = y[i], yy_i = yy[i], ub, k; t = 2*alpha_i; ub = start2[yy_i*nr_class+y_i+1]; for (j=start2[yy_i*nr_class+y_i];jl = l; this->nr_class = nr_class; this->real_l = l/(nr_class - 1); this->Q = &Q; this->lin = lin; clone(y,y_,l); clone(alpha,alpha_,l); C = C_; this->eps = eps; this->qpsize = qpsize; unshrinked = false; // initialize alpha_status { alpha_status = new char[l]; for(int i=0;i 1e-12) { alpha[Bi] = qp.x[i]; Qfloat *QB_i = QB[i]; int y_Bi = y[Bi], yy_Bi = yy[Bi], ub, k; double t = 2*d; ub = start1[yy_Bi*nr_class+y_Bi+1]; for (j=start1[yy_Bi*nr_class+y_Bi];jobj = v/4; } clone(si->upper_bound,C,nr_class); //info("\noptimization finished, #iter = %d\n",iter); // put back the solution { for(int i=0;i0;i--) swap_index(start2[i], start2[i-1]); t = s + 1; for (i=nr_class*nr_class;i>t;i--) swap_index(start1[i], start1[i-1]); t = nr_class*nr_class; for (i=s+1;i<=t;i++) start1[i]++; for (i=0;i<=s;i++) start2[i]++; } // // Q matrices for various formulations // class BSVC_Q: public Kernel { public: BSVC_Q(const svm_problem& prob, const svm_parameter& param, const schar *y_) :Kernel(prob.l, prob.x, param) { clone(y,y_,prob.l); cache = new Cache(prob.l,(int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[1]; QD[0] = 1; } Qfloat *get_Q(int i, int len) const { Qfloat *data; int start; if((start = cache->get_data(i,&data,len)) < len) { for(int j=start;j*kernel_function)(i,j) + 1); } return data; } double *get_QD() const { return QD; } void swap_index(int i, int j) const { cache->swap_index(i,j); Kernel::swap_index(i,j); swap(y[i],y[j]); } ~BSVC_Q() { delete[] y; delete cache; delete[] QD; } private: schar *y; Cache *cache; double *QD; }; class BONE_CLASS_Q: public Kernel { public: BONE_CLASS_Q(const svm_problem& prob, const svm_parameter& param) :Kernel(prob.l, prob.x, param) { cache = new Cache(prob.l,(int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[1]; QD[0] = 1; } Qfloat *get_Q(int i, int len) const { Qfloat *data; int start; if((start = cache->get_data(i,&data,len)) < len) { for(int j=start;j*kernel_function)(i,j) + 1; } return data; } double *get_QD() const { return QD; } ~BONE_CLASS_Q() { delete cache; delete[] QD; } private: Cache *cache; double *QD; }; class BSVR_Q: public Kernel { public: BSVR_Q(const svm_problem& prob, const svm_parameter& param) :Kernel(prob.l, prob.x, param) { l = prob.l; cache = new Cache(l,(int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[1]; QD[0] = 1; sign = new schar[2*l]; index = new int[2*l]; for(int k=0;kget_data(real_i,&data,l) < l) { for(int j=0;j*kernel_function)(real_i,j) + 1; } // reorder and copy Qfloat *buf = buffer[next_buffer]; next_buffer = (next_buffer+1)%q; schar si = sign[i]; for(int j=0;j*kernel_function)(i,i); } Qfloat *get_Q(int i, int len) const { Qfloat *data; int start; if((start = cache->get_data(i,&data,len)) < len) { for(int j=start;j*kernel_function)(i,j)); } return data; } double *get_QD() const { return QD; } void swap_index(int i, int j) const { cache->swap_index(i,j); Kernel::swap_index(i,j); swap(y[i],y[j]); swap(QD[i],QD[j]); } ~SVC_Q() { delete[] y; delete cache; delete[] QD; } private: schar *y; Cache *cache; double *QD; }; class ONE_CLASS_Q: public Kernel { public: ONE_CLASS_Q(const svm_problem& prob, const svm_parameter& param) :Kernel(prob.l, prob.x, param) { cache = new Cache(prob.l,(long int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[prob.l]; for(int i=0;i*kernel_function)(i,i); } Qfloat *get_Q(int i, int len) const { Qfloat *data; int start; if((start = cache->get_data(i,&data,len)) < len) { for(int j=start;j*kernel_function)(i,j); } return data; } double *get_QD() const { return QD; } void swap_index(int i, int j) const { cache->swap_index(i,j); Kernel::swap_index(i,j); swap(QD[i],QD[j]); } ~ONE_CLASS_Q() { delete cache; delete[] QD; } private: Cache *cache; double *QD; }; class SVR_Q: public Kernel { public: SVR_Q(const svm_problem& prob, const svm_parameter& param) :Kernel(prob.l, prob.x, param) { l = prob.l; cache = new Cache(l,(long int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[2*l]; sign = new schar[2*l]; index = new int[2*l]; for(int k=0;k*kernel_function)(k,k); QD[k+l]=QD[k]; } buffer[0] = new Qfloat[2*l]; buffer[1] = new Qfloat[2*l]; next_buffer = 0; } void swap_index(int i, int j) const { swap(sign[i],sign[j]); swap(index[i],index[j]); swap(QD[i],QD[j]); } Qfloat *get_Q(int i, int len) const { Qfloat *data; int real_i = index[i]; if(cache->get_data(real_i,&data,l) < l) { for(int j=0;j*kernel_function)(real_i,j); } // reorder and copy Qfloat *buf = buffer[next_buffer]; next_buffer = 1 - next_buffer; schar si = sign[i]; for(int j=0;jsvm_type; if(svm_type != C_BSVC && svm_type != EPSILON_BSVR && svm_type != KBB && svm_type != SPOC) return "unknown svm type"; // kernel_type int kernel_type = param->kernel_type; if(kernel_type != LINEAR && kernel_type != POLY && kernel_type != RBF && kernel_type != SIGMOID && kernel_type != R && kernel_type != LAPLACE&& kernel_type != BESSEL&& kernel_type != ANOVA) return "unknown kernel type"; // cache_size,eps,C,nu,p,shrinking if(kernel_type != LINEAR) if(param->cache_size <= 0) return "cache_size <= 0"; if(param->eps <= 0) return "eps <= 0"; if(param->C <= 0) return "C <= 0"; if(svm_type == EPSILON_BSVR) if(param->p < 0) return "p < 0"; if(param->shrinking != 0 && param->shrinking != 1) return "shrinking != 0 and shrinking != 1"; if(svm_type == C_BSVC || svm_type == KBB || svm_type == SPOC) if(param->qpsize < 2) return "qpsize < 2"; if(kernel_type == LINEAR) if (param->Cbegin <= 0) return "Cbegin <= 0"; if(kernel_type == LINEAR) if (param->Cstep <= 1) return "Cstep <= 1"; return NULL; } const char *svm_check_parameter(const svm_problem *prob, const svm_parameter *param) { // svm_type int svm_type = param->svm_type; if(svm_type != C_SVC && svm_type != NU_SVC && svm_type != ONE_CLASS && svm_type != EPSILON_SVR && svm_type != NU_SVR) return "unknown svm type"; // kernel_type int kernel_type = param->kernel_type; if(kernel_type != LINEAR && kernel_type != POLY && kernel_type != RBF && kernel_type != SIGMOID && kernel_type != R && kernel_type != LAPLACE&& kernel_type != BESSEL&& kernel_type != ANOVA&& kernel_type != SPLINE) return "unknown kernel type"; // cache_size,eps,C,nu,p,shrinking if(param->cache_size <= 0) return "cache_size <= 0"; if(param->eps <= 0) return "eps <= 0"; if(svm_type == C_SVC || svm_type == EPSILON_SVR || svm_type == NU_SVR) if(param->C <= 0) return "C <= 0"; if(svm_type == NU_SVC || svm_type == ONE_CLASS || svm_type == NU_SVR) if(param->nu < 0 || param->nu > 1) return "nu < 0 or nu > 1"; if(svm_type == EPSILON_SVR) if(param->p < 0) return "p < 0"; if(param->shrinking != 0 && param->shrinking != 1) return "shrinking != 0 and shrinking != 1"; // check whether nu-svc is feasible if(svm_type == NU_SVC) { int l = prob->l; int max_nr_class = 16; int nr_class = 0; int *label = Malloc(int,max_nr_class); int *count = Malloc(int,max_nr_class); int i; for(i=0;iy[i]; int j; for(j=0;jnu*(n1+n2)/2 > min(n1,n2)) { free(label); free(count); return "specified nu is infeasible"; } } } } return NULL; } #include #include #include extern "C" { struct svm_node ** sparsify (double *x, int r, int c) { struct svm_node** sparse; int i, ii, count; sparse = (struct svm_node **) malloc (r * sizeof(struct svm_node *)); for (i = 0; i < r; i++) { /* determine nr. of non-zero elements */ for (count = ii = 0; ii < c; ii++) if (x[i * c + ii] != 0) count++; /* allocate memory for column elements */ sparse[i] = (struct svm_node *) malloc ((count + 1) * sizeof(struct svm_node)); /* set column elements */ for (count = ii = 0; ii < c; ii++) if (x[i * c + ii] != 0) { sparse[i][count].index = ii; sparse[i][count].value = x[i * c + ii]; count++; } /* set termination element */ sparse[i][count].index = -1; } return sparse; } struct svm_node ** transsparse (double *x, int r, int *rowindex, int *colindex) { struct svm_node** sparse; int i, ii, count = 0, nnz = 0; sparse = (struct svm_node **) malloc (r * sizeof(struct svm_node*)); for (i = 0; i < r; i++) { /* allocate memory for column elements */ nnz = rowindex[i+1] - rowindex[i]; sparse[i] = (struct svm_node *) malloc ((nnz + 1) * sizeof(struct svm_node)); /* set column elements */ for (ii = 0; ii < nnz; ii++) { sparse[i][ii].index = colindex[count]; sparse[i][ii].value = x[count]; count++; } /* set termination element */ sparse[i][ii].index = -1; } return sparse; } void tron_run(const svm_problem *prob, const svm_parameter* param, double *alpha, double *weighted_C, Solver_B::SolutionInfo* sii, int nr_class, int *count) { int l = prob->l; int i; double Cp = param->C; double Cn = param->C; if(param->nr_weight > 0) { Cp = param->C*param->weight[0]; Cn = param->C*param->weight[1]; } switch(param->svm_type) { case C_BSVC: { // double *alpha = new double[l]; double *minus_ones = new double[l]; schar *y = new schar[l]; for(i=0;iy[i] > 0) y[i] = +1; else y[i]=-1; } if (param->kernel_type == LINEAR) { double *w = new double[prob->n+1]; for (i=0;i<=prob->n;i++) w[i] = 0; Solver_B_linear s; int totaliter = 0; double Cpj = param->Cbegin, Cnj = param->Cbegin*Cn/Cp; while (Cpj < Cp) { totaliter += s.Solve(l, prob->x, minus_ones, y, alpha, w, Cpj, Cnj, param->eps, sii, param->shrinking, param->qpsize); if (Cpj*param->Cstep >= Cp) { for (i=0;i<=prob->n;i++) w[i] = 0; for (i=0;i= Cpj) alpha[i] = Cp; else if (y[i] == -1 && alpha[i] >= Cnj) alpha[i] = Cn; else alpha[i] *= Cp/Cpj; double yalpha = y[i]*alpha[i]; for (const svm_node *px = prob->x[i];px->index != -1;px++) w[px->index] += yalpha*px->value; w[0] += yalpha; } } else { for (i=0;iCstep; for (i=0;i<=prob->n;i++) w[i] *= param->Cstep; } Cpj *= param->Cstep; Cnj *= param->Cstep; } totaliter += s.Solve(l, prob->x, minus_ones, y, alpha, w, Cp, Cn, param->eps, sii, param->shrinking, param->qpsize); //info("\noptimization finished, #iter = %d\n",totaliter); delete[] w; } else { Solver_B s; s.Solve(l, BSVC_Q(*prob,*param,y), minus_ones, y, alpha, Cp, Cn, param->eps, sii, param->shrinking, param->qpsize); } // double sum_alpha=0; // for(i=0;iC*prob->l)); // for(i=0;ip - prob->y[i]; y[i] = 1; alpha2[i+l] = 0; linear_term[i+l] = param->p + prob->y[i]; y[i+l] = -1; } if (param->kernel_type == LINEAR) { double *w = new double[prob->n+1]; for (i=0;i<=prob->n;i++) w[i] = 0; struct svm_node **x = new svm_node*[2*l]; for (i=0;ix[i]; Solver_B_linear s; int totaliter = 0; double Cj = param->Cbegin; while (Cj < param->C) { totaliter += s.Solve(2*l, x, linear_term, y, alpha, w, Cj, Cj, param->eps, sii, param->shrinking, param->qpsize); if (Cj*param->Cstep >= param->C) { for (i=0;i<=prob->n;i++) w[i] = 0; for (i=0;i<2*l;i++) { if (alpha[i] >= Cj) alpha[i] = param->C; else alpha[i] *= param->C/Cj; double yalpha = y[i]*alpha[i]; for (const svm_node *px = x[i];px->index != -1;px++) w[px->index] += yalpha*px->value; w[0] += yalpha; } } else { for (i=0;i<2*l;i++) alpha[i] *= param->Cstep; for (i=0;i<=prob->n;i++) w[i] *= param->Cstep; } Cj *= param->Cstep; } totaliter += s.Solve(2*l, x, linear_term, y, alpha2, w, param->C, param->C, param->eps, sii, param->shrinking, param->qpsize); //info("\noptimization finished, #iter = %d\n",totaliter); } else { Solver_B s; s.Solve(2*l, BSVR_Q(*prob,*param), linear_term, y, alpha2, param->C, param->C, param->eps, sii, param->shrinking, param->qpsize); } double sum_alpha = 0; for(i=0;iC*l)); delete[] y; delete[] alpha2; delete[] linear_term; } break; case KBB: { Solver_B::SolutionInfo si; int i=0 , j=0 ,k=0 , ll = l*(nr_class - 1); double *alpha2 = Malloc(double, ll); short *y = new short[ll]; for (i=0;iy[q]; else q += count[j]; } Solver_MB s; s.Solve(ll, BONE_CLASS_Q(*prob,*param), -2, alpha2, y, weighted_C, 2*param->eps, &si, param->shrinking, param->qpsize, nr_class, count); //info("obj = %f, rho = %f\n",si.obj,0.0); int *start = Malloc(int,nr_class); start[0] = 0; for(i=1;iy[i]; } Solver_SPOC s; s.Solve(l, ONE_CLASS_Q(*prob, *param), alpha, y, weighted_C, param->eps, param->shrinking, nr_class); free(weighted_C); delete[] y; } break; } } SEXP tron_optim(SEXP x, SEXP r, SEXP c, SEXP y, SEXP K, SEXP colindex, SEXP rowindex, SEXP sparse, SEXP nclass, SEXP countc, SEXP kernel_type, SEXP svm_type, SEXP cost, SEXP eps, SEXP gamma, SEXP degree, SEXP coef0, SEXP Cbegin, SEXP Cstep, SEXP weightlabels, SEXP weights, SEXP nweights, SEXP weightedc, SEXP cache, SEXP epsilon, SEXP qpsize, SEXP shrinking ) { struct svm_parameter param; struct svm_problem prob; int i ,*count = NULL; double *alpha2 = NULL; SEXP alpha3 = NULL; int nr_class; const char* s; struct Solver_B::SolutionInfo si; param.svm_type = *INTEGER(svm_type); param.kernel_type = *INTEGER(kernel_type); param.degree = *INTEGER(degree); param.gamma = *REAL(gamma); param.coef0 = *REAL(coef0); param.cache_size = *REAL(cache); param.eps = *REAL(epsilon); param.C = *REAL(cost); param.Cbegin = *REAL(Cbegin); param.Cstep = *REAL(Cstep); param.K = REAL(K); param.qpsize = *INTEGER(qpsize); nr_class = *INTEGER(nclass); param.nr_weight = *INTEGER(nweights); if (param.nr_weight > 0) { param.weight = (double *) malloc (sizeof(double) * param.nr_weight); memcpy (param.weight, REAL(weights), param.nr_weight * sizeof(double)); param.weight_label = (int *) malloc (sizeof(int) * param.nr_weight); memcpy (param.weight_label, INTEGER(weightlabels), param.nr_weight * sizeof(int)); } param.p = *REAL(eps); param.shrinking = *INTEGER(shrinking); param.lim = 1/(gammafn(param.degree+1)*powi(2,param.degree)); /* set problem */ prob.l = *INTEGER(r); prob.n = *INTEGER(c); prob.y = (double *) malloc (sizeof(double) * prob.l); memcpy(prob.y, REAL(y), prob.l*sizeof(double)); if (*INTEGER(sparse) > 0) prob.x = transsparse(REAL(x), *INTEGER(r), INTEGER(rowindex), INTEGER(colindex)); else prob.x = sparsify(REAL(x), *INTEGER(r), *INTEGER(c)); s = svm_check_parameterb(&prob, ¶m); //if (s) //printf("%s",s); //else { double *weighted_C = Malloc(double, nr_class); memcpy(weighted_C, REAL(weightedc), nr_class*sizeof(double)); if(param.svm_type == 7) { alpha2 = (double *) malloc (sizeof(double) * prob.l*nr_class); } if(param.svm_type == 8) { count = Malloc(int, nr_class); memcpy(count, INTEGER(countc), nr_class*sizeof(int)); alpha2 = (double *) malloc (sizeof(double) * prob.l*(nr_class-1)); } if(param.svm_type == 5||param.svm_type==6) { alpha2 = (double *) malloc (sizeof(double) * prob.l); } tron_run(&prob, ¶m, alpha2, weighted_C , &si, nr_class, count); //} /* clean up memory */ if (param.nr_weight > 0) { free(param.weight); free(param.weight_label); } if(param.svm_type == 7) { PROTECT(alpha3 = allocVector(REALSXP, (nr_class*prob.l + 1))); UNPROTECT(1); for (i = 0; i < prob.l; i++) free (prob.x[i]); for (i = 0; i l; int i; switch(param->svm_type) { case C_SVC: { double Cp,Cn; double *minus_ones = new double[l]; schar *y = new schar[l]; for(i=0;iy[i] > 0) y[i] = +1; else y[i]=-1; } if(param->nr_weight > 0) { Cp = C*param->weight[0]; Cn = C*param->weight[1]; } else Cp = Cn = C; Solver s; //have to weight cost parameter for multiclass. problems s.Solve(l, SVC_Q(*prob,*param,y), minus_ones, y, alpha, Cp, Cn, param->eps, si, param->shrinking); delete[] minus_ones; delete[] y; } break; case NU_SVC: { schar *y = new schar[l]; double nu = param->nu; double sum_pos = nu*l/2; double sum_neg = nu*l/2; for(i=0;iy[i]>0) { y[i] = +1; alpha[i] = min(1.0,sum_pos); sum_pos -= alpha[i]; } else { y[i] = -1; alpha[i] = min(1.0,sum_neg); sum_neg -= alpha[i]; } double *zeros = new double[l]; for(i=0;ieps, si, param->shrinking); double r = si->r; //info("C = %f\n",1/r); for(i=0;irho /= r; si->obj /= (r*r); si->upper_bound_p = 1/r; si->upper_bound_n = 1/r; delete[] y; delete[] zeros; } break; case ONE_CLASS: { double *zeros = new double[l]; schar *ones = new schar[l]; int n = (int)(param->nu*l); // # of alpha's at upper bound // set initial alpha probably usefull for smo for(i=0;inu * l - n; for(i=n+1;ieps, si, param->shrinking); delete[] zeros; delete[] ones; } break; case EPSILON_SVR: { double *alpha2 = new double[2*l]; double *linear_term = new double[2*l]; schar *y = new schar[2*l]; for(i=0;ip - prob->y[i]; y[i] = 1; alpha2[i+l] = 0; linear_term[i+l] = param->p + prob->y[i]; y[i+l] = -1; } Solver s; s.Solve(2*l, SVR_Q(*prob,*param), linear_term, y, alpha2, param->C, param->C, param->eps, si, param->shrinking); double sum_alpha = 0; for(i=0;iC*l)); delete[] alpha2; delete[] linear_term; delete[] y; } break; case NU_SVR: { double C = param->C; double *alpha2 = new double[2*l]; double *linear_term = new double[2*l]; schar *y = new schar[2*l]; double sum = C * param->nu * l / 2; for(i=0;iy[i]; y[i] = 1; linear_term[i+l] = prob->y[i]; y[i+l] = -1; } Solver_NU s; s.Solve(2*l, SVR_Q(*prob,*param), linear_term, y, alpha2, C, C, param->eps, si, param->shrinking); //info("epsilon = %f\n",-si->r); for(i=0;i 0) { param.weight = (double *) malloc (sizeof(double) * param.nr_weight); memcpy (param.weight, REAL(weights), param.nr_weight * sizeof(double)); param.weight_label = (int *) malloc (sizeof(int) * param.nr_weight); memcpy (param.weight_label, INTEGER(weightlabels), param.nr_weight * sizeof(int)); } param.p = *REAL(eps); param.shrinking = *INTEGER(shrinking); param.lim = 1/(gammafn(param.degree+1)*powi(2,param.degree)); /* set problem */ prob.l = *INTEGER(r); prob.y = REAL(y); prob.n = *INTEGER(c); if (*INTEGER(sparse) > 0) prob.x = transsparse(REAL(x), *INTEGER(r), INTEGER(rowindex), INTEGER(colindex)); else prob.x = sparsify(REAL(x), *INTEGER(r), *INTEGER(c)); double *alpha2 = (double *) malloc (sizeof(double) * prob.l); s = svm_check_parameter(&prob, ¶m); //if (s) { //printf("%s",s); //} //else { solve_smo(&prob, ¶m, alpha2, &si, *REAL(cost), REAL(linear_term)); //} PROTECT(alpha = allocVector(REALSXP, prob.l+2)); /* clean up memory */ if (param.nr_weight > 0) { free(param.weight); free(param.weight_label); } for (i = 0; i < prob.l; i++) {free (prob.x[i]); REAL(alpha)[i] = *(alpha2+i); } free (prob.x); REAL(alpha)[prob.l] = si.rho; REAL(alpha)[prob.l+1] = si.obj; free(alpha2); UNPROTECT(1); return alpha; } } kernlab/src/solvebqp.c0000644000176000001440000000316012651720731014512 0ustar ripleyusers#include #include #include /* LEVEL 1 BLAS */ /*extern double ddot_(int *, double *, int *, double *, int *); */ /* LEVEL 2 BLAS */ /*extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *);*/ /* MINPACK 2 */ extern void dtron(int, double *, double *, double *, double, double, double, double, int, double); struct BQP { double eps; int n; double *x, *C, *Q, *p; }; int nfev, inc = 1; double one = 1, zero = 0, *A, *g0; int uhes(int n, double *x, double **H) { *H = A; return 0; } int ugrad(int n, double *x, double *g) { /* evaluate the gradient g = A*x + g0 */ memcpy(g, g0, sizeof(double)*n); F77_CALL(dsymv)("U", &n, &one, A, &n, x, &inc, &one, g, &inc); return 0; } int ufv(int n, double *x, double *f) { /* evaluate the function value f(x) = 0.5*x'*A*x + g0'*x */ double *t = (double *) malloc(sizeof(double)*n); F77_CALL(dsymv)("U", &n, &one, A, &n, x, &inc, &zero, t, &inc); *f = F77_CALL(ddot)(&n, x, &inc, g0, &inc) + 0.5 * F77_CALL(ddot)(&n, x, &inc, t, &inc); free(t); return ++nfev; } void solvebqp(struct BQP *qp) { /* driver for positive semidefinite quadratic programing version of tron */ int i, n, maxfev; double *x, *xl, *xu; double frtol, fatol, fmin, gtol, cgtol; n = qp->n; maxfev = 1000; /* ? */ nfev = 0; x = qp->x; xu = qp->C; A = qp->Q; g0 = qp->p; xl = (double *) malloc(sizeof(double)*n); for (i=0;ieps; dtron(n, x, xl, xu, gtol, frtol, fatol, fmin, maxfev, cgtol); free(xl); } kernlab/src/stack.h0000644000176000001440000000630512651720731013775 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #ifndef MSUFSORT_STACK_H #define MSUFSORT_STACK_H //============================================================================================= // A quick and dirty stack class for use with the MSufSort algorithm // // Author: M.A. Maniscalco // Date: 7/30/04 // email: michael@www.michael-maniscalco.com // // This code is free for non commercial use only. // //============================================================================================= #include "memory.h" template class Stack { public: Stack(unsigned int initialSize, unsigned int maxExpandSize, bool preAllocate = false): m_initialSize(initialSize), m_maxExpandSize(maxExpandSize), m_preAllocate(preAllocate) { Initialize(); } virtual ~Stack(){SetSize(0);} void Push(T value); T & Pop(); T & Top(); void SetSize(unsigned int stackSize); void Initialize(); unsigned int Count(); void Clear(); T * m_stack; T * m_stackPtr; T * m_endOfStack; unsigned int m_stackSize; unsigned int m_initialSize; unsigned int m_maxExpandSize; bool m_preAllocate; }; template inline void Stack::Clear() { m_stackPtr = m_stack; } template inline unsigned int Stack::Count() { return (unsigned int)(m_stackPtr - m_stack); } template inline void Stack::Initialize() { m_stack = m_endOfStack = m_stackPtr = 0; m_stackSize = 0; if (m_preAllocate) SetSize(m_initialSize); } template inline void Stack::Push(T value) { if (m_stackPtr >= m_endOfStack) { unsigned int newSize = (m_stackSize < m_maxExpandSize) ? m_stackSize + m_maxExpandSize : (m_stackSize << 1); SetSize(newSize); } *(m_stackPtr++) = value; } template inline T & Stack::Pop() { return *(--m_stackPtr); } template inline T & Stack::Top() { return *(m_stackPtr - 1); } template inline void Stack::SetSize(unsigned int stackSize) { if (m_stackSize == stackSize) return; T * newStack = 0; if (stackSize) { newStack = new T[stackSize]; unsigned int bytesToCopy = (unsigned int)(m_stackPtr - m_stack) * (unsigned int)sizeof(T); if (bytesToCopy) memcpy(newStack, m_stack, bytesToCopy); m_stackPtr = &newStack[m_stackPtr - m_stack]; m_endOfStack = &newStack[stackSize]; m_stackSize = stackSize; } if (m_stack) delete [] m_stack; m_stack = newStack; } #endif kernlab/src/Makevars.win0000644000176000001440000000006012651720731014777 0ustar ripleyusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) kernlab/src/dgpstep.c0000644000176000001440000000275112651720731014332 0ustar ripleyusersvoid dgpstep(int n, double *x, double *xl, double *xu, double alpha, double *w, double *s) { /* c ********** c c Subroutine dgpstep c c This subroutine computes the gradient projection step c c s = P[x + alpha*w] - x, c c where P is the projection on the n-dimensional interval [xl,xu]. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is unchanged. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c alpha is a double precision variable. c On entry alpha specifies the scalar alpha. c On exit alpha is unchanged. c c w is a double precision array of dimension n. c On entry w specifies the vector w. c On exit w is unchanged. c c s is a double precision array of dimension n. c On entry s need not be specified. c On exit s contains the gradient projection step. c c ********** */ int i; for (i=0;i xu[i]) s[i] = xu[i] - x[i]; else s[i] = alpha*w[i]; } kernlab/src/brweight.cpp0000644000176000001440000000435112651720731015035 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/BoundedRangeWeight.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef BRWEIGHT_CPP #define BRWEIGHT_CPP #include "brweight.h" #include #define MIN(x,y) (((x) < (y)) ? (x) : (y)) #define MAX(x,y) (((x) > (y)) ? (x) : (y)) /** * Bounded Range weight function. * W(y,t) := max(0,min(tau,n)-gamma) * * \param floor_len - (IN) Length of floor interval of matched substring. * (cf. gamma in VisSmo02). * \param x_len - (IN) Length of the matched substring. * (cf. tau in visSmo02). * \param weight - (OUT) The weight value. * */ ErrorCode BoundedRangeWeight::ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) { //' Input validation assert(x_len >= floor_len); //' x_len == floor_len when the substring found ends on an interval. Real tau = (Real)x_len; Real gamma = (Real)floor_len; weight = MAX(0,MIN(tau,n)-gamma); // std::cout << "floor_len:"< * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ExpDecayWeight.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef EXPDECAYWEIGHT_CPP #define EXPDECAYWEIGHT_CPP #include #include #include "expdecayweight.h" using namespace std; /** * Exponential Decay weight function. * W(y,t) := (lambda^{-gamma} - lambda^{-tau}) / (lambda - 1) * * \param floor_len - (IN) Length of floor interval of matched substring. * (cf. gamma in VisSmo02). * \param x_len - (IN) Length of the matched substring. * (cf. tau in visSmo02). * \param weight - (OUT) The weight value. * */ ErrorCode ExpDecayWeight::ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) // ErrorCode // ExpDecayWeight::ComputeWeight(const Real &floor_len, const Real &x_len, Real &weight) { //' Input validation assert(x_len >= floor_len); //' x_len == floor_len when the substring found ends on an interval. if(floor_len == x_len) { //' substring ended on an interval, so, get the val from val[] weight = 0.0; } else { //weight = (pow(-(floor_len-1), lambda) - pow(-x_len, lambda)) / (1-lambda); //weight = (pow(lambda,((Real)floor_len)) - pow(lambda, (Real)x_len+1)) / (1-lambda); // double a=floor_len*-1.0; // double b=x_len*-1.0; // weight = (pow(lambda,a) - pow(lambda, b)) / (lambda-1); weight = (pow(lambda,Real(-1.0*floor_len)) - pow(lambda, Real(-1.0*x_len))) / (lambda-1); } // std::cout << "floor_len : " << floor_len // << " x_len : " << x_len // << " pow1 : " << pow(lambda,-((Real)floor_len)) // << " pow2 : " << pow(lambda,-(Real)x_len) // << " weight : " << weight << std::endl; return NOERROR; } #endif kernlab/src/ilcpfactory.h0000644000176000001440000000304512651720731015205 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/I_LCPFactory.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef ILCPFACTORY_H #define ILCPFACTORY_H #include "datatype.h" #include "errorcode.h" #include "lcp.h" class I_LCPFactory { public: /// Constructor I_LCPFactory(){} /// Destructor virtual ~I_LCPFactory(){} /// Methods virtual ErrorCode ComputeLCP(const SYMBOL *text, const UInt32 &length, const UInt32 *sa, LCP& lcp) = 0; }; #endif kernlab/src/msufsort.cpp0000644000176000001440000002404712651720731015110 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #include "msufsort.h" #include #include #include #include //============================================================================= // MSufSort. //============================================================================= SYMBOL_TYPE MSufSort::m_reverseAltSortOrder[256]; // chteo: Changed the member initialisation order to get rid of compilation warning [181006] // MSufSort::MSufSort():m_ISA(0), m_chainHeadStack(8192, 0x20000, true), m_suffixesSortedByInduction(120000, 1000000, true), // m_chainMatchLengthStack(8192, 0x10000, true), m_chainCountStack(8192, 0x10000, true) MSufSort::MSufSort():m_chainMatchLengthStack(8192, 0x10000, true), m_chainCountStack(8192, 0x10000, true), m_chainHeadStack(8192, 0x20000, true), m_ISA(0), m_suffixesSortedByInduction(120000, 1000000, true) { // constructor. unsigned char array[10] = {'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'}; int n = 0; for (; n < 10; n++) { m_forwardAltSortOrder[array[n]] = n; m_reverseAltSortOrder[n] = array[n]; } for (int i = 0; i < 256; i++) { bool unresolved = true; for (int j = 0; j < 10; j++) if (array[j] == i) unresolved = false; if (unresolved) { m_forwardAltSortOrder[i] = n; m_reverseAltSortOrder[n++] = i; } } } MSufSort::~MSufSort() { // destructor. // delete the inverse suffix array if allocated. if (m_ISA) delete [] m_ISA; m_ISA = 0; } void MSufSort::ReverseAltSortOrder(SYMBOL_TYPE * data, unsigned int nBytes) { #ifndef SORT_16_BIT_SYMBOLS for (unsigned int i = 0; i < nBytes; i++) data[i] = m_reverseAltSortOrder[data[i]]; #endif } unsigned int MSufSort::GetElapsedSortTime() { return m_sortTime; } unsigned int MSufSort::GetMemoryUsage() { /* unsigned int ret = 5 * m_sourceLength; ret += (m_chainStack.m_stackSize * 4); ret += (m_suffixesSortedByInduction.m_stackSize * 8); ret += sizeof(*this); */ return 0; } unsigned int MSufSort::Sort(SYMBOL_TYPE * source, unsigned int sourceLength) { ///tch: //printf("\nIn MSufSort::Sort()\n"); // set the member variables to the source string and its length. m_source = source; m_sourceLength = sourceLength; m_sourceLengthMinusOne = sourceLength - 1; Initialize(); unsigned int start = clock(); InitialSort(); while (m_chainHeadStack.Count()) ProcessNextChain(); while (m_currentSuffixChainId <= 0xffff) ProcessSuffixesSortedByEnhancedInduction(m_currentSuffixChainId++); unsigned int finish = clock(); m_sortTime = finish - start; ///tch: //printf("\nFinished MSufSort::Sort()\nPress any key to continue...\n"); //printf("%s\n",m_source); //system("pause"); //getchar(); // printf(" %c", 13); return ISA(0); } void MSufSort::Initialize() { // Initializes this object just before sorting begins. if (m_ISA) delete [] m_ISA; m_ISA = new unsigned int[m_sourceLength + 1]; memset(m_ISA, 0, m_sourceLength + 1); m_nextSortedSuffixValue = 0; m_numSortedSuffixes = 0; m_suffixMatchLength = 0; m_currentSuffixChainId = 0; m_tandemRepeatDepth = 0; m_firstSortedTandemRepeat = END_OF_CHAIN; m_hasTandemRepeatSortedByInduction = false; m_hasEvenLengthTandemRepeats = false; m_firstUnsortedTandemRepeat = END_OF_CHAIN; for (unsigned int i = 0; i < 0x10000; i++) m_startOfSuffixChain[i] = m_endOfSuffixChain[i] = m_firstSuffixByEnhancedInductionSort[i] = END_OF_CHAIN; for (unsigned int i = 0; i < 0x10000; i++) m_firstSortedPosition[i] = 0; m_numNewChains = 0; #ifdef SHOW_PROGRESS m_progressUpdateIncrement = (unsigned int)(m_sourceLength / 100); m_nextProgressUpdate = 1; #endif } void MSufSort::InitialSort() { // This is the first sorting pass which makes the initial suffix // chains from the given source string. Pushes these chains onto // the stack for further sorting. #ifndef SORT_16_BIT_SYMBOLS #ifdef USE_ALT_SORT_ORDER for (unsigned int suffixIndex = 0; suffixIndex < m_sourceLength; suffixIndex++) m_source[suffixIndex] = m_forwardAltSortOrder[m_source[suffixIndex]]; #endif #endif #ifdef USE_ENHANCED_INDUCTION_SORTING m_ISA[m_sourceLength - 1] = m_ISA[m_sourceLength - 2] = SORTED_BY_ENHANCED_INDUCTION; m_firstSortedPosition[Value16(m_sourceLength - 1)]++; m_firstSortedPosition[Value16(m_sourceLength - 2)]++; for (int suffixIndex = m_sourceLength - 3; suffixIndex >= 0; suffixIndex--) { unsigned short symbol = Value16(suffixIndex); m_firstSortedPosition[symbol]++; #ifdef SORT_16_BIT_SYMBOLS unsigned short valA = ENDIAN_SWAP_16(m_source[suffixIndex]); unsigned short valB = ENDIAN_SWAP_16(m_source[suffixIndex + 1]); if ((suffixIndex == m_sourceLengthMinusOne) || (valA > valB)) m_ISA[suffixIndex] = SORTED_BY_ENHANCED_INDUCTION; else AddToSuffixChain(suffixIndex, symbol); #else bool useEIS = false; if ((m_source[suffixIndex] > m_source[suffixIndex + 1]) || ((m_source[suffixIndex] < m_source[suffixIndex + 1]) && (m_source[suffixIndex] > m_source[suffixIndex + 2]))) useEIS = true; if (!useEIS) { if (m_endOfSuffixChain[symbol] == END_OF_CHAIN) { m_endOfSuffixChain[symbol] = m_startOfSuffixChain[symbol] = suffixIndex; m_newChainIds[m_numNewChains++] = ENDIAN_SWAP_16(symbol); } else { m_ISA[suffixIndex] = m_startOfSuffixChain[symbol]; m_startOfSuffixChain[symbol] = suffixIndex; } } else m_ISA[suffixIndex] = SORTED_BY_ENHANCED_INDUCTION; #endif } #else for (unsigned int suffixIndex = 0; suffixIndex < m_sourceLength; suffixIndex++) { unsigned short symbol = Value16(suffixIndex); AddToSuffixChain(suffixIndex, symbol); } #endif #ifdef USE_ENHANCED_INDUCTION_SORTING unsigned int n = 1; for (unsigned int i = 0; i < 0x10000; i++) { unsigned short p = ENDIAN_SWAP_16(i); unsigned int temp = m_firstSortedPosition[p]; if (temp) { m_firstSortedPosition[p] = n; n += temp; } } #endif MarkSuffixAsSorted(m_sourceLength, m_nextSortedSuffixValue); PushNewChainsOntoStack(true); } void MSufSort::ResolveTandemRepeatsNotSortedWithInduction() { unsigned int tandemRepeatLength = m_suffixMatchLength - 1; unsigned int startOfFinalList = END_OF_CHAIN; while (m_firstSortedTandemRepeat != END_OF_CHAIN) { unsigned int stopLoopAtIndex = startOfFinalList; m_ISA[m_lastSortedTandemRepeat] = startOfFinalList; startOfFinalList = m_firstSortedTandemRepeat; unsigned int suffixIndex = m_firstSortedTandemRepeat; m_firstSortedTandemRepeat = END_OF_CHAIN; while (suffixIndex != stopLoopAtIndex) { if ((suffixIndex >= tandemRepeatLength) && (m_ISA[suffixIndex - tandemRepeatLength] == suffixIndex)) { if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = (suffixIndex - tandemRepeatLength); else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = (suffixIndex - tandemRepeatLength)); } suffixIndex = m_ISA[suffixIndex]; } } m_tandemRepeatDepth--; if (!m_tandemRepeatDepth) { while (startOfFinalList != END_OF_CHAIN) { unsigned int next = m_ISA[startOfFinalList]; MarkSuffixAsSorted(startOfFinalList, m_nextSortedSuffixValue); startOfFinalList = next; } } else { m_firstSortedTandemRepeat = startOfFinalList; } } unsigned int MSufSort::ISA(unsigned int index) { return (m_ISA[index] & 0x3fffffff); } int MSufSort::CompareStrings(SYMBOL_TYPE * stringA, SYMBOL_TYPE * stringB, int len) { #ifdef SORT_16_BIT_SYMBOLS while (len) { unsigned short valA = ENDIAN_SWAP_16(stringA[0]); unsigned short valB = ENDIAN_SWAP_16(stringB[0]); if (valA > valB) return 1; if (valA < valB) return -1; stringA++; stringB++; len--; } #else while (len) { if (stringA[0] > stringB[0]) return 1; if (stringA[0] < stringB[0]) return -1; stringA++; stringB++; len--; } #endif return 0; } bool MSufSort::VerifySort() { //printf("\n\nVerifying sort\n\n"); bool error = false; int progressMax = m_sourceLength; int progressValue = 0; int progressUpdateStep = progressMax / 100; int nextProgressUpdate = 1; unsigned int * suffixArray = new unsigned int[m_sourceLength]; for (unsigned int i = 0; ((!error) && (i < m_sourceLength)); i++) { if (!(m_ISA[i] & 0x80000000)) error = true; unsigned int n = (m_ISA[i] & 0x3fffffff) - 1; suffixArray[n] = i; } // all ok so far. // now compare the suffixes in lexicographically sorted order to confirm the sort was good. for (unsigned int suffixIndex = 0; ((!error) && (suffixIndex < (m_sourceLength - 1))); suffixIndex++) { if (++progressValue == nextProgressUpdate) { nextProgressUpdate += progressUpdateStep; //printf("Verify sort: %.2f%% complete%c", ((double)progressValue / progressMax) * 100, 13); } SYMBOL_TYPE * ptrA = &m_source[suffixArray[suffixIndex]]; SYMBOL_TYPE * ptrB = &m_source[suffixArray[suffixIndex + 1]]; int maxLen = (ptrA < ptrB) ? m_sourceLength - (ptrB - m_source) : m_sourceLength - (ptrA - m_source); int c = CompareStrings(ptrA, ptrB, maxLen); if (c > 0) error = true; else if ((c == 0) && (ptrB > ptrA)) error = true; } //printf(" %c", 13); delete [] suffixArray; return !error; } kernlab/src/cweight.h0000644000176000001440000000322312651720731014316 0ustar ripleyusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ConstantWeight.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 // 12 Oct 2006 #ifndef CWEIGHT_H #define CWEIGHT_H #include "datatype.h" #include "errorcode.h" #include "iweightfactory.h" #include //' Constant weight class class ConstantWeight : public I_WeightFactory { public: /// Constructor ConstantWeight(){} /// Destructor virtual ~ConstantWeight(){} /// Compute weight ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight); }; #endif kernlab/NAMESPACE0000644000176000001440000000527512547246130013154 0ustar ripleyusersuseDynLib("kernlab") import("methods") importFrom("stats", "coef", "delete.response", "fitted", "kmeans", "median", "model.extract", "model.matrix", "na.action", "na.omit", "predict", "quantile", "rnorm", "runif", "sd", "terms", "var") importFrom("graphics", "axis", "filled.contour", "plot", "points", "title") importFrom("grDevices", "hcl") export( ## kernel functions "rbfdot", "laplacedot", "besseldot", "polydot", "tanhdot", "vanilladot", "anovadot", "splinedot", "stringdot", "kernelMatrix", "kernelMult", "kernelPol", "kernelFast", "as.kernelMatrix", ## High level functions "kmmd", "kpca", "kcca", "kha", "specc", "kkmeans", "ksvm", "rvm", "gausspr", "ranking", "csi", "lssvm", "kqr", ## Utility functions "ipop", "inchol", "couple", "sigest", ## Accessor functions ## VM "type", "prior", "alpha", "alphaindex", "kernelf", "kpar", "param", "scaling", "xmatrix", "ymatrix", "lev", "kcall", "error", "cross", "SVindex", "nSV", "RVindex", "prob.model", "b", "obj", ## kpca "rotated", "eig", "pcv", ## ipop "primal", "dual", "how", ## kcca "kcor", "xcoef", "ycoef", ## "xvar", ## "yvar", ## specc "size", "centers", "withinss", ## rvm "mlike", "nvar", ## ranking "convergence", "edgegraph", ## onlearn "onlearn", "inlearn", "buffer", "rho", ## kfa "kfa", ## inc.chol "pivots", "diagresidues", "maxresiduals", ## csi "R", "Q", "truegain", "predgain", ## kmmd "H0", "AsympH0", "Radbound", "Asymbound", "mmdstats" ) exportMethods("coef", "fitted", "plot", "predict", "show") exportClasses("ksvm", "kmmd", "rvm", "ipop", "gausspr", "lssvm", "kpca", "kha", "kcca", "kernel", "rbfkernel", "laplacekernel", "besselkernel", "tanhkernel", "polykernel","fourierkernel", "vanillakernel", "anovakernel", "splinekernel", "stringkernel", "specc", "ranking", "inchol", "onlearn", "kfa", "csi","kqr", "kernelMatrix","kfunction") kernlab/data/0000755000176000001440000000000012560371302012630 5ustar ripleyuserskernlab/data/spirals.rda0000644000176000001440000001115412560430722015001 0ustar ripleyusers]xy4n dl!)eR[JQI%)PJ E$Dٮ`XfA }{kmno b/B  "]H|%e묻bsddYN˹@}zbE l; :]>D@'y2P4VJR BCdc>[x SCe ?_Nr2v>"5<D=̙ ̶J[S5Z#y>mbۅ6>/o9Q Ē/RQ8Z8p,׿r}3z@ͺ{T䟈X%?_GjG.glqr#sPm~Udud<ݜj;oUn_gاCټD[(VX5Io ʱɿ&1u6 1ƪBc.Yc]k#4zFDLaUSLv5>g9ojRkl& }@W0s8㜽.0WC=KREW3k;XYbI)%N|x/h5_Ae ddY].?{n5h'Ċ<\(O}jڂS@}.vjY2bg9{Dnv c8%u\J&R C:JoP@l;Hk} |lu?or UgHXP9ǟ _ ]HWFQևgunv߼hҗ^oe`oSrz|x-x.xE"v;(uIٗD$a({«`bP)0# g5UzYCn7BIbC¿7vpucḕ8y_73  m> m,xci5A*.^G+K;O4doewU]GIiϨYP)/9=ǘ+0&h߃~ꋳԟ0r䠷̟KXLճ0/9 [x`ӯ|P{{w>wL<3Rjƛ0=HLlU[ R0K lbhW--bOarS3ޣTSR >]8GVهmgRMcweK̨Hܹ z:5HfI~owSP#ƎeB)saѨg .yVw&~>ai+tauop&1*=.3b/N㘡{O (v0A(^/d( <{,wB,L,wӹ0lwDG,*9os4]w2€D;7BKT#9zcnwAG[׺FH`ߎWKcGZ4&"o="?N򡦐>>O ׶(Ej7&"LX4N`.C>`QΤ%~G4n‘7F* KYHԓ8)Sͱ@0˕0l 4< 5/.js*3C*@2H̫mYׇv"tIkze{ƇC7d oqF$>/߾7ideSw]=qld6%<ŤYg)`YŴe ,6/| 㛓=AIBہFK߽9[)kqӡ[fF|wZyBFw6dMZ{L4/Bq8fNq OqoA}ޝ8iS+ 'SrH+`";&LIdk>>pH`+9 d&"2w>|O*|u97:AZAJ\ܘEOàXwѳn߸eA]ǻ8#9)4zD:n4ǗP~`abf UiMr_lRF3U SQwO°PXxE>t1h ԛ56R{F臀'V[~H!cyD?};X}(Rۉ}>KۙǗ >dhAWaM +FCD $ qn{ t^gBf0])z>TNs DE)#FUQ{U'mEf,v/PH=n֚7Z@jǟniRji7P Cǻ)0Hߘԣ}6=w.K(3,MM=X~wF-Ah]Q3"ʋxl:P]e⏌BSpxmp{tcJzC{y }[)%܀966uޥ[ Gح˽z۔]/y_)zlM68XF#̒$pXBMgrW}YֆW sB` aNu+BF~2i '7#9y8uhv2~^KC^"VaȒC\Y]Z8P5 aLL8+?C6"ณ\0j1B6$],u+^5< Nw\(4e㗾q}5ȓYQ?9$Y\xP >y JiS!qC7CM2:LJ|a*7>0&::qDFM(:_jBwjiHv-Dq?q89!$ E8'kGZ6/HAG 0IT`w[VkJlNqZREUU| voWB^׳Bِz8#0s5%٭|{ m,]T)4|)c/agO{m U7s=rLeBWEؚP f&-ݷrVxp"F Ṹ@fIbwOiv34.@V!e zuZwaHs!#27׮a,T9+#t On\ĩL"Z ;{KFo_W#g(k6bMLl;6 kol:mKK6߄z?6{?`gz8j1rY8u6"a4!.'?k=.pTmE>).0uq2Vl&0k770qa5 FUBˑU2('^O%KXI_]Z"4F^{N+Ģw~M<| < =xWK]q7k TwA /Zt$͛bڧC9#(n8{g##u S~1TR~-Z X Yؗi`U{pP6EcSMb"a0mzd9 b/Zt\=03ԑ^ɴۭ{ ?u`+|9 dC qȞ>Yky"4ȳ{n"y (T>3w0zK༛"0"alckernlab/data/reuters.rda0000644000176000001440000003773012560430721015024 0ustar ripleyusers}ݎYrhf%Yi0;8A P]'bf9MR잙]AUu*2kE|;`_XO ;/KMYN壅k,mNT=z6ˬv ~*Ӥi(YzuV*ͦtyi6m^UUQڣF`*};i[de5J/o Nu/]J6x}.˪Xlt ) H^^tu1ͅ B}ZlFˢ&AD`]ݸ^Pԟ\UisGIYur$T}S>QKTʟuPlU^vH0 mU|BU6-N(滔P/d1S#ʰi.pqIk&yei%VM/G¡Y'ܜB7*v+P"B+KZ%mWԤzA>PoW黥fVj#x7]fOڭ:HKW_agIsM )LUTR>ꌚWm$9*"\wl&"7- Ur#_ODQOu& '*K7T3Gb ?_ɣѭXv4I8dOYfr'v#B+*Rgyi؄&Y`im?|%KA4Nu-F~W-ir^d!mWxwqbl~_\]ld魦&B]pR}0석}Qɏp߹)$6Oo|щVɤ安=[ӍhĮn?/Mv[`,PU E:5nnSvO!Wٓӟ滑QN(lNryuep^RZ{>O8b=!kOhKd 15אEZ.IzW],|O`-\ZVUi=‘\(_faQ䷇mdYUlgnm yZz9 {=e}gɭwge)bЪވS2Dsp*1=887WtF9xzۚak*F~ו8*.^ aRe5];rftzWNY@"-|։ 6"U,1V~ z~wpVG.R." ٘-L^f+ia"88\mұ:ٿ|#xS^SL7yQnlR<w_|Fl{T:]//ꉃegjndV|PdwunW%̱1mPR$z~c5ҥ>%zۚ{۲wbQl VݽW {bp0>T!T2a3ʫr!,73W[~V*3;~?N^$ WVAA^zݛԑO Ѡ[2H&}{[QNňWBÇv[$FpnTEYWE8g('" -ܑ;7nm]rzI߹Ϣ1* yFϠ{E]:[0wVV: :9stBnV?grCôwMV|+f4]"fA3$l- 6Uh;{q:x84Sۀk1GALEU6W,C=A'QhdQ)&N6fMHg}TRB5%&"% gH_`%n]jvYmX"QS*Q^gٳ>/'-?t}dg tMॊ'ǃMa|vj69xʈe)wC\BD,2 @-bFl`"7Y.:%X2S共 ̀QAKSx bAKEN6L,GSߺU}T+歚 UI?FL7ӢZ7“?8y4:IS'# 3Üߎ Ok;y2fHCCx]bS?ZOJBI'aj.m22-?Su]Ȇ|Su_Ǐ=f=Q+ mM^0Td`ݭ*kA5뼔G./ULWqF鳚j\4^#?5o|6'5g6cL Fr)m8O7;kW+/=sCǴӬNƵkcM :ctdQU3Oi)0^Jj>P|2ͫuͦ^)P ז*/𝹔U\ JW&B ehqv8З|T8>OK'6lR! J%=qʰ1 d&F3 0& 2HCd U: XA*sEj/3nG_[rë3ͫۻF)(wb+Ѡ̸m-UFl^0!o|-ox QljM>TFoBFH8ݨQaa;Ԗ}F0 ޤoҍBTa1'&hs⮛LX URK,Xf"'  @DxnT t4?ֆ49e:Ԝ}v{be8Jbn6qV-IGYg [BZ^3X^Xooۂes !#oA VDnJC($ו> H|Pԡ] HZբ7%)}xpZ8'N >0Ȯ %6 { a'3þgz1 S>40*~s.D ԗN\k|N,Wk @awk#mP(lj[ lH.PoZy>vJםEe{ϲi"ca_mSŮ,J?TȩpT&G sN!c#q5]b pfk۹&}+E?"i}g}WHnh8 Z}45 :?!:> 9~Ovi{$\y+ec@5B.BYx2ՕbgEFi8-ˎfy6jsR&'ȑмd_&bp׹H[e7гbfceJ(g.tgqy/ n]i`,`/y0R26$drH$(3OQ`<_+DaɌEPo[RKLI3!; +Q16I]];d%hP+uFD^T}MiUidboRj{׿8.cА%KI&5zףypةrCJi3EVYuEjBaCPuT0H( M:ɝ]1`c'YHrfcο=%or\;|%S\Si.7vKkk+U(,B-8"IRXi$Xptn8 낼Z4{VF1UxiHZl6/EgZZ8Nem{Üw0pi#u)O'e{T{4bܹM(,=w볥xk$뮖y"ꋿy5Cyi5 `0XmMs~[2D%|SGUusx$*ٔ/>|,(* ux`Qd=q95D@ڞ% YЕG+dDf'f|ĕ D ׎Edĥt_[3 я3qgeL{'ܷ/?J^*$Ĥ%^HײΨ&x;4҇jc|릃ݔgGrG6E}F]*!x'|>ԃ#OG'O}fDc]6m7 g"B7r* k*!;J;7ȇ~25C|qdT}d0Tl/m_#!Pz1۟㼅1Ӑ f Vi:ۻ\jkt~x* x ž%Vb+ HFxg_e' P]nTPեψ (+#j53U ߍU1Օ@G(2"-5n!ڒ|@.Y>G\|fp\w}=(T*L,;G|*<>GOg`wbi"(D"#$<# sO!ͺ6 e֟])#'\.7=_Cbxz9`!ߌ KCE%F~qOVa7]9Gy}>^4ևeUU!:x% ptrz::8=dBkyɓщhIQQB}ǧ!tYԭw;x>v~E t Qfn\)Տ>Lz!Yg9s1I>=:~hlA;]۫ruջ񕚗gUpz QQ!")oZZӷ__F1 YCHl*J~cFf7ޏ錾qx7b<lNp>3՘lMh8VtXs- M视(䮱>)AC GAEtR6Q:4xr\!axkb/**xœ`!ٸZ}GEq>5=?ygh',{v}8ԉSmt/y[;-%O9eJYExTh l*q\ Ew:ZX-[!#T '9 !~ gOgG[mFǫzxu])saa\5@$cq. ȁyW7E:Nߣ]JR o'߼Z.K]T3ѣ֬pZ lZ "u*ؿOV W{Q+jz\*`CcM֑pO? [*+;<77dM G/Ju}O߾{FK\;"m}4ڪޘRfG>Z[=⏋= XbZ¸eEp/GGf(gFޞ ߿;LGBj嵆9:>pE 8\e"d==pӉ;<]p5DpDADq|;=`xAA/jي:4~SҌ^:|8xV8(am)0)@L>hm\fR`Tj?:F~`8yѱ60F0xpa3(-!_ c^l}G, ƃI;j$4Ć?U zz'?J4(gA"ZO+jumeי.W ȳMwKfk=0$j $kZ4^u+|֓Fz#HpmqmpO2:KD 0fZ1[x/>y<|3h27}^-$VZ$ jiky}rϲ~4#?"?b4tOE[扪Je"FwEHVsX4eI">$ן?{U]4 Pfm$!% >,A~{UT|#wف-곲#SʢdEG邥)_Q{aڒx3σka=vY#Iߎ/(ĵf*`[.u/) Ӑ)G%n1*UTQϪS!M@&W x;5}.2[qNc FNm3yc}t 3 |&ہ&l 3"(Փ<!}Ī^AĄ r2~ A{uUf.xj1SLYUW.CdD1=:)DAهn5|& _VT{#Od|K{/羟7T:Yܥj2LZfseV d3jB2+ځm ʄoGX?Gȥaec~`ow OhYD+MJvPe[0mfǴb9Dr4>Mr y}`'RLZJn`a$Ј}4kf7Y&sn3G48QLK^xijÁL CnSdh[c3:Z)O3tmЬ|ŽIp6{ك=CЍ|Q[]#8AcmB8@kjosU " bpz p|ޓ4Gf1@#LliqTt!d-dVƇ`lovY;aE5Mv{2sˬ 0.a7y])}U䘌lCȀ MFht.w@9Vr(VkxjZ{`C|߿\kZr$u*܁slaJi<C4: f3&UJLIkYiggTe>fZ^4a˪;i^>Y።Nƙ~!,ڶ%JH?%@ *϶L_RWl$M>̙j W4/t]?r/LC~b-i͸`Zak 'o6`h8!Q0yTƐQ ⵉά{SIW]>MMUz٦x?{h#D{%>1̭g<,X;;Nr*ZA7 m8jFd Ѥ73,/vǏN/e? ۝89?ˢ1R ۘps-X-i:""/HaeE q~k[ə&teBթT^*Xu^m] 2Kn}P1䪋x?&)Y.f?ɝRlļE.e2+y/IYD,WWg򹿐qBCݟF4'־k(9;ujl/8)CIjm=7 j$oyrfe"$~--Z;WkVPo ?{M `D0+(,?8<QLXJ&վSq%tY:u("4$4WlgagϏZ&U^kCK)TS+4vA.P'.qJ{Q+N QnFܷ# Dt +9WV~j#݆b2K/! S3c6y~(XNxG՞ k30R^0ۦf@;?ܦEy5O_=TT"kn3}}3  k+D +tK)QT74}V#@]oCZL/K}lg O]ʅfT`l&[=x|~GFB[ud&\$Z1< U^c4ā鬮Ь$܇+ q 6q fSoFCmSs[9h*?\hyB1x9pumSB(-zu(~+oxrg*6FBϥ4]H2,5b9BQ4y#Bs<>0>5ŇdR4l^rBUvhmN}^8sG w:<5G ,S Վ^hI5f7p$o'& *E^ ]V`vݬOt&ޤ@AH6,jj-$p`4%Ca+0; K>(J>`n֑A)0Cf?qD>+FՕXjQT" U&~FET},vQK*k,t<"+tCdm.8.w(+XEfPiitAx|U@&>#|NT 8 ܎%E0JAOmuJbw|ؠzYQd]ih$5D fܤ%ed# inVm>8grp"jPJzjtZw:nݨW͠v"v;4ZyC1Vw棹)G[PҭgA/޷W8*lPtFLw 9ZQ_{Y_jWV#RBA>eDGUCaZ؀()jy`nWLSa3ĩZB4OAϨQGʗʐ–OS(|cquWyxMQMg 4}4<Ш] 'ly|Wjf#H;X: oxã&\ =Hw هx@)bw%eӿOSP|eHx{kk|jӃE-~[КjnNEMUܚl5\~བ-).0 ͳk pޟQc"00'W٠exQVhqme5(ʞ{e8 s&4QcQЦ;r7 pՉ\s߷%]{P$=b'Ƒ?DPiLPm,eAb[QKq-fF;iYV.S6,Q%Ńmz:<2?i4917Ŵ_ޤUOi sfA!zUrtH3o!6(`;'զZ  TF]U4gYQ# V ewV=Eͣ 3akkXjʧ{#!_hrK)f+-9t RJJ"@(M0G[{j,Th>-RhCV~kգIBvu\]hAo;@Uٲٮ_Sȟn/]PG웥,QpE H$mԴ=E΍KZ#ɨxVӎ0t)cؘnkzݽ~$oc3=u>V4~:]Ai4SnIxBQF/ʷgML{݄ވc|)5b;U,H<Ъ=I΍'FGn Q\W6t#@/StGx[-$Z)`nI}Q~Zo4o$5P=#PjXIr>CGB鷏=ל |?.5y(iqB<$Cj==zra֏>ƧǏ}p]GW>;I3|׽$joh>բq7|nכt3Jde b h85`jm6.t|`Թeծ\jѷĝ~"E6*e:C+}=er6u٧,LPY}'O^W>>lf?iL+C'_c| _IȵD4PY"o<ѩZ鐼&pƁpy5s7m+6E61l[>(kGllm'M,ly8AU[S4_1Ԟ22N xtjpwew{Ү[LFXlvIeH$FF+㍉hQ[-  < ۮZm=LySvnx;HvhϬꙂh0ku2xIx؄czLzs]eOONOi%rR:X<4Vt+Z$[\Z6 "2F+}p3J}24  9aٍofMմcb3ӵ]۾Laq;ͦeĴYf$2LZ( r{ڳ 5k!ڷrg:?DZKH >c;^٫Yy @qt] uD@kl^*陮SofGJle ½ŰI?*k}z" ?Pj.Ooke*`fM@x7zg-nVBAoǜ!B9υˍN2Į0=Par[ݷN֊ _UxډlML;#}}Go\slgX\WH#wB.n7EZ:jO\ha?҉,E^D/&L|dp?3]N]f vy`Q1&0=W{",-orrl_g"lO ?BХo&=ķ'o- ^g_7TDqUQt ܓf7z%ǟz5_/meD}$ܥxHu]3LQ fU  |HImÞĪes," IuF6ǝڛ;xDDjv*a,+7b5Kӝ1K|W[8P`2| ̊ 1i3ΞGhtHZQr9PI *9մ8k{{e9ho޽n^To.=Xc&&O~cΗ_=wxe=huL"e6iYu`j;k-&(a*X_hC}=m] rдjaYyok f64AʢZУ1EmDZ77|g׌~ߘwh.އn8Vq aGeG y9Pӵ4۶Ovثi׆Ig6s"YkE1mX]<,Jl0`'+j(j@ϲ%2##P;0?-h9BB>˿^!@BK2]9-aO,&b6 a:FGx>o=/@UQi| 8y.(grF =cia6-Zf0\y+sy̛ 9?P<_'|tǓN7f^R?# 1$cI6 >|ۃuӯ܌5d ˽}u?lݧٺ/[{ a?>0ٙ,~1`r[;S[;6v_`FZ@zc|ZCڎM$w{*ϵ&Ck|kmD+4*Ivu#Vrݬh>ȚtGM+7ҏ73[@ Xv%Jd`Yi^LVvl ,OF‹WFQU9}*d;UЋ- Jot9pqFV2gb $Z+o&a2Wk_y^cʎyTPA}Zvm Ckj`'S 3M16?۵`:]OeȄ@ j&Q^/޸T4b%R0SE^Y'u+O"gַ]d$ZG=`4|;y= Nt~: I\H2^9R]?WO}cSZ`H2_i(>=FP3J߱(GPcQRG$@xoaimHg@H/M}!A΅^Nq1GĦ'+(lm1mz+a\DNmqsbK.aCq?b` ytnR)iKs~M(rD']faL{dPneأvoyP@A46-CmČ l8T&$_ddzLw_:tQEy(S,&1f{q3,}_ B y=a\DBnB't ̓ZGqP 01K$9I7P-qBo*Ǐ%# $%~k1 cm hC4),f̐ZDè!.':Z%&F!;óN|N`hMT iM:ɒu=w!>ZEAX~TMONvpiVp;+X;p&UL!E_kmOt,G= ]Y>VhweH_Q#[i͎-[YxnT:?6F. 9x4?ҧz!]r0u0کǗ 4R"-N' s5T5:γuS6" ɏ/CC+ξZ̚]aUu:{sR=/M59=\pxQZGrC@ ۖ|&*>_XeHFiLaJbgmi8@~~(&&Xm6D/ _ $6,=?ޙyw:\m,lՈ5*۔]/*I/\ÜI$z~XVDXp1}y9U­66RU3㢨&3""{AJ}{fW3p(=8[r$ywee-m:; (u2dݨl `|x Ǐ1ַOFoih'*&{cf9'a ]yfMphwX;o}37BćI?2O"[!5??D&S_oʦ؏LO_ o9Tkernlab/data/income.rda0000644000176000001440000012266012560430720014601 0ustar ripleyusers7zXZi"6!X[ q])TW"nRʟ ,bA5,To3aNq'1Be$QJ^texDލ52̥hPi礦ڹu]1z/s2)3[K1Q\g;w#9 u_iUi]:TKxeBV6іWJ <{צp;Ų5h~F&M^FJ׊_"5غ"QmxQ; U$s1.\mG5â`qV2 7&Y`s%܌ ?J[u,R+ %:+u|zw V>2XV%b M\&y Gy_}wZ Y#I*U),]Z!eK@WJ!xg/QDg~tioke hw [#eP]kJX1`l`r;E dz Ym&7 Đ@64)=2!y?΃: JtcMY :AWUW]H (?}7tiMavN6% jA0k$]*_Z^\7@1nFt6w?x"O~Y_ADXS!%H<@3h% b8UI ʢN3s73 jmxP=HJ$ϭfTɷ=r8PnXl̀ A6:{By6k<=aO\8PLhl>Œ'plݯ., U:7nb1M<26DŽV ]5(g!gsQIux\iPvJbvm%ݡӁPl "_5.2Ҍрg];$n8}ؿscb6 i` K(~6 \`GaD J[## e!of 4:dڧɮPK㙁oeÓ Y C(J $P@w2%O\BӃhb0zf~BMk9|P%fL/hAB"1GG0I E{D.: #9VeI7beҗpIK1ql@g/LX2!.r40$5do3:u$tZRl(1p)VD?LQm܅RKj(SOk2gl kv}}([{=[\3B]-5w(+d6'J4Ѳ@SqoZmvH!<;Ѫa++#ڴBuݗZk/$;#G蹑/ϢIv}V\6.FK@^Yj/&H5)H^Z'&|V(!b59]\S0|dc"nFfsݪs$Ia:glE0ӵN^! F+TƱő@鮽_j_?r.i,,b(GBҘhQQZ2+y PL:Nѓ̀*=[(EZWT̍hAUuۓ.X'J{!F1+Z*Ih/wD㿧f@vbȆz0c2'\He#bۧӪ+]kLԠAh^ 537?5m ݠcNt >D}|˂D\hϧ r70߆E`hRm7]1 ߡDu[sE7ayd?aA sPpkZNvE b?l^z)FgO] ^RN27;"=D"5DrpSx -$NWpR0t#ټ8(N|kz wX\1(Ml`quSw鯺1 7 M<キ}ǔ9[frI8z z/J ԧ83m^3}Ώh %v I>\f+ERx>~6ˊ+2wDGK/~(=BzDg->hz#:E373lAD{('\.ĦX*aϪ"ixӝ`)#pa!b =ɳ՛asTD6{9ir僢IQ;7.V_G;onߞ37{'6޳W8{M9č:8>qI"wFfe/e{`#5,hT"qz&S ,e,\$P37ET&$E>P ajTeO?#b(aB&WT.܂B@ MK_QM:rVQ?|`d\ׂӽRaaj.5)nxRȆoFVZj80SA[\4uuma/ yj7=%:#PZF6/\ JVJx9V§Q_C4$=?f_2}Bt]ik r/VK"QES NIpU Yk]0EG@$i' mL@~o7o8Դ_+i zm$Ʈ=VBL{p\| N8(fr_ #ۊI|!er14#خK!s`p?[T|:hae%BcA|jHtaR,zhW£YX;N7SJdVS5U9ިJT{&):;·Y7o~s5| Zus_U %ǗN9+[ ulE \&Zd*䡪)6.DTȦ(8cbR`Q|hͅîeG˹.O|iwMG%ra=`mivƎ~I/H 6YY[oz_\ѬjJCh!Xc'_\1U#%.iγ@h $xXu骎py2:TUr"JyN 4{%K)g!Px1|TX]G1(bvguTF/TW8)Ew`UD~|0sڞP%R贕6HXi0qeI{SQ7ØJ ǠNY %g*T̯ fx267 &EX?!xnvFq,r#6̥|8emQ؁~<96ת*רUyC#FCNUmna=ܩd3Xӽο6 eX]ם 4xVau0gOln)n~LSmK8D?'Hs)8"-$)7eIE @SU!u/ūVLx@As~_n:GYþD?ɠ~~54KGYw`~Ŷ~L0Y)R=dG]z?KC"QFyo$"dَsWcl u>;I séO@^G͝aEn2^gO)Z}lLp\4sQz+w]92^>u K >2D~e,X:) ҙ^¡͝gO|vs3뻵>9~)\ G6![V]E?.g/MoYDE?#pX^KzaDu}G\l'=%(>jq*Q| 0_e d">X8M.|e\5y]yklh;[ `/240Ki  @.v*q3+avvy 32TG[[QS+`iMƧ -;+f$~c=q5PJ|Q20r# ЙkL \W"5e\OZ*z}$ + ƽ37QVi#Z Uevy0Xmצ0{Vde `$R OVҎt DÒhjL¤-90X|@ +zP/g 0 8 Tz8QSl~*tzy6>בdkde2%@sONN_0x)@sy1F%+ qb4ϘW3طtx>cZFu!DGAW^RXts˵DLŸON9)?cŚF-jGz>:Z]w'"T`3 iB߂v {mRP%l' 3T]s$5-o0Q[)Dsc׹r$*̟ dP쾪LL\;D9qgұk] ,%E}йjmNQIwԸ[-G mWv;VvTH/NLx.B{E;9Ddly]za&x>b`'62`KӾNh"O@%$y0ZLծM-v H/㒃{6EߟksFAoYu˚AZ| WW}Ya0 5vuUƘգ6A!ƒ<~49hj[;D<SFo6vz 0 i2MtG5:27&rO~ϛqf1o9'SPs1zx{ڛTcReЋe` &./NeMDYo@\|ڑWEUG2+}v~b\iO1Z|ZPcq ɺj)!X潰vR5l%ʹp0T;M 38tvM1)%TsF %JfNggϒ[_xaΗVAhsV"5@HAl߹ΉY](bNI[)ј6|0re4@v7FQRcG=Z%; 9ߊe2B˻[m;w9U*2wɦ;Tl y&箊φ*raK/I!md 9/=6ff~P &\ zFnBv]c1!\wg-t_ dޝi'ghpw AHѻLrmEHM:3o3N,Z#%53JlZZmG * r:HW\ihIZW^7,8xJ.4c9#VIHSg 21I[kːi"3Lxt%L > w*5u!_RIz@j62B SY' \OڡSw`ww5@ߥX*3ZaԂU13PfYQy \>j:H57{.%kb6hubX}M!ލozֶj X}ʼnbÞOCCLX%su B{cDX, <mbgwLI468߽l*d 5E0Ϛ&ø)q@7 @JQgsսR\2Su',n.8不B[ @MabjlXq}9`BJ+ =GsK4k*srhH:}I*BM/G/c2:^0"9(RhP,#lvdDV3C AB<ҝ9sk@ɾz3{4JN?pܼvl-HK!y!4rz`;ΪJ'U#wdbQc kT/iDfӻS-tm2Fr *` ۣl=v5r1v <d "W`NbLB:;~N3# eгEzt-C|k1rweUaQU71 .|7MQ 1ŰkHۦc$k X'iXs.V+;AY`{3LOٟZ~!A²u;D8M0,C=ae|8{cO nCɦ%sm] 3kd yOBV$Ni`U Yre C#(CaJyc%xm֨ f;!RRLN <3 N;e2B, /lL"zR69ɚRIN9X JX] ^@((JeVœwZh=\3e?}R+'bV"#!Ca%w:zUL(|0@%Ϯr-M 鰰h{p_WTkPZzKMr> E-h ww"! Y=>XDw* >ߙkv~HV:/QYRN'!n?|Z/j}W]u(>j}!ӭǿ ՛=}e5>=k>K8Ԛ҆ ݍ1kc.ÑRd$nE3hK?mO#J]ZnK?eqNb uo0bY叒B+FޕJ^* rhKe~) .Ό pqd/6Z)oG?oay]{--6Bu8YXo),b#%Fk֊W !R rqN NN{ β` *ˡ[xi%ы1d~"ZzM~#4tԬ/HVxvs1&^ _@8 y/sHR$fd[9掣07x+> {I[DnOO2~uX;Zq^Y#8LpY ?7[t~խ]Ky/dw-Ћk/Uœ(g-K&[ĝvj'æΣyαZ ~UErXۘp4rgީ;̭N6"yà%PЁKhs $#Aܼy.h Oʛ} [|}9jD?`'QI_ Xqq%ec&Ҝ4}>Mrۚ`X]AD8=d1ڋka6(K>X^AM/gL lpz%]w/V/Cy{Bc,D pNfBbE.@y޿I-g"Kmyڨᇨs<џ^q5dGn;ă\(oؗdv^*MLSaA>Fmj9E3,2] ؘ(֦k'{bR P1nH;[?t")n6ˍK8SƘRİ~K,UޗUkbҚKrK3=!B(tyV*<-ARjV"''^@ =Ћ;,ogO'BO&rxX Rq%7Ù~eG{Ռ0K.3Eha^Uܮ]R c>D~{6;/aP}F q3tc7f_1=sug {džb_=ezs0[h>!#v3 q3FKhO-n)gT= 1sdA@by\@!!DgM4[.xg ѶZ;@]xmHzNo߁mwy*!?<}j:簛) rC:MW[r#T-LdL#.t1ˮ]!/\RBwl%'*fw4%tNݠ{z%1sxO?O{DŽ^dIh[hܺ[lKyL .KL&1lZqdv3gtz]DRFTZ8ꂴgX.&Ӂf"*%AcN˶# ]ҮE(uy;t0;:kg}/z.&]V hϒ5*@LOfcE,ghÃ\ql,`;<ۺZiy:~Yå?W[3DE-_5v91${Nq&#(b2 bc0C!4I;Dʘqsq9sNÙHDtuvL^nWk/O*h_uՐA*hR5q=#.-بa !lTj,]s0ͺ~t5xwWNϠRw(i& 3/CmlfJgf:*rϋ':݆ABF΋m%J*tH.POEL?$~O&9D8xnYQͯTP WAK;4KyHx=>5z| @ќNnYZ $$/j9o jd=^wyrPax-GF8 yARYt ( $Y)P~XAt܇}@-Fs  w8hsD:"c^Zx8+)W"PL=f]^׹#qeM2ǟ-WV  y!s> \TݍM.U6ݏ94C!}k`;A}wgXZ#iù >b#Wa^le*[?KVO74tF hۅSva$D7)rɱ#?g;f,Z=0t$;}#)hLJ,+:o{#ж#<ۧ8 GvcI+ jH4v`ȸ7 Y[M &i@7Hӱ%nB7ВtwtXf;B^5WJ0?Ѱn2fTP>p|zϪ=뱗C5AEӻ5km2e3z抽DeMCPjMGMrgW[47o5k.|ɽ+,Ir83zq%1 ڟl ٻi_kw)~T?b9N4rW^+Ҝ콝9bgYeWyEOsC`\# ɬ?Me؈{({!zQ$Tv]i/2(NeH[9 nIW~mYb"يa\qpV}pDbԒp!Et+walџA{q)/Ѹzʵ`#"ܣO:~2⎜ŨB6]?+S^6NbH78F3OQŦ?<.`b0"N͙Z +5rhv6'81yQ:<4(m}|%A9.Z30GA6TDť*?(δ v4WYoG3) 4/pα$?!?'ZX$Փ՗-eTÐ\%@"a)'^o  Od'B 6NCEf06,sMTtihKlB~芏a%M& 9Ke_d*ϫ5F= Q-g /Q@XmLE,. wEhE8]EzWJgv߅(iG-PVb2u UzJn1ðg i:X"?:Iњ?Gv~T>kh~o {iUt׏;Jq5:v֨n?/+QݚJӵE:@+ARV:I1r%0xai^dhUdڒR*SYS!;FuٲYxhʈns⬐>EXKlR[V!aYx Lx\V-YC/[=DCyO 8~0+2 a4]MLQQqhrAt I7KE k>{އu'(zBjx!1t+୮kܪ3b)Jhi!Z:ݚkOXƜgV3xae3>>]+Xmq XѼtx E8e.Ƽ֥[ߣ\{~KBC6%x*{B@A#'K6*"nǕ?Cj\ #ڈQ6rp{ta.& !/"m_{V\kzX8D&ʺYP@OZT#tC㽯T6znFJ"*Qp"Ԃ)Eۄrw.XY9>(Z>&n5eU/es*T7WIk QMbHra7i>ݛ w{GBN IzjJoU6}hz5LTUCovJxçbxf#U0M>0Q9m:ì4ǘ]:aJ78Y;=vd%֐i8N|,͂s8'p6B)EuJ,k0Fn"GL0n`ɇ(M*k]pa`lL;7ng\  ե`]3&ɀR4R7CHjF1/OD:" `ӆ,p6 kʼn%8Pb sBʎWiP?LJp?aq?\q8XRGߓ*PPΠBŠ$ ߈LOL| b_s$fNe!?Xjڠ 2m:O46΃sHX8W%ć ޺N] M͆dRQ¹g_.**sS5v2w]Aaꊕm-x/I4 ZDU8 G=#?ыFID{ٮ[l`>/!-!_w2.K0ۼCn#N%f2-5{ߦ_F`~ &)1KH̑[;~8"ZnH"lAYv)l%5_usބ2Un/otVjܮf\rk;.#=N/gQ %\2d?*-E{^--&dl"s9!! B裂׮ZL>ű k35Ċ$|@'ܗw,m0۷22nɲf6jȐjRY4 rKȽ(e L#S^ -9~Wk(޲ ЬXR636Х#*V/zNP=-` wkNx b{ aB(!]u=:Q92[}IOqh>2T(˹J[5QY0]_HgɫAj8o.a`جt%n[{NO|PGv SӐL ꤚWJq mtҘf)&b=$n)G?owxSJE"|pI|sr4dZ'vB5|mObvD5ݨ2F)h2ys_T`A@L-6_p'' ;E3"tߧ)SAl-0-HF I,ݮ[8o5BzّHhu;%+r7oKrJFt> 20;2ՓOGrn;9|e{; iD''ʲXh:yo5oU+ef$\8ȍ'[&=.aoob̛pB<9%T+S} Ziu銄v̻}./kmi(5bxR .m+H}CX29Ζ;lZת^{1X$ ,2~(xY;0p'ѿ0Ԙ$Qͣ S&/̤xkT^2Xl1KWu2R^ϑ9Ǥ*[x׳j.6'l AfyܾBw.TS}wpV d%7srb)/c=M^9HϾB2ce3Z-u#0@>U6 ZEW38)(DS֏)Ź&`-yӝ`ʰb+V*#6ܒ%cKՎ@n4Q1 `V_ R/OpWDebYv:@/ߌfzz6TЧ忥g~Қ+@-3*`wUFBU!B5{Mfթ/1|Ʊ^&bnb޾@x3`ڳ9jUώZuQϟ H>X9O UXx=і{:| f/WB79:8D}8g,:ᖧRN.u4:EqGxKxAӭJtslz:2V;%O{x" klm)Rc*?:C׭*vKNj^64laq.?xWj dUKwzCVՓOՙY,`֚iGmŌD4ĄwiKڌ\FJmE-2G,: u #gnd R8W:;P˫Fh_q3C]#Km*ȯr^Sl,rm8rV=o H$9Ci{ Iqq'YG@`nU&SŮ-# @ |ٲL4(l&hm{&Y*h!v]ܝLҙ &Я qC-@3Ktcz ܟ'%Z_܇z2C;T RL ܁ElYAk54FIy츄6JV㓒 ˈm#!*J 3{krha%"P!<-kk=o!O{nASp x1MLzÑوe6L-ڀ.CVU[֋PPcG路g/ K|+2|RwUC#ݚiP1[Z+6j1+ /-+,ͩeWtw GM" (rxB .)sxRh&Ex ѹA&tz39v<ep`J3/x\|Ƽ_ OO@7sZ;#M#5h^3-8-l2{ׄ}!gUy`Y7 r\Ϙbz*ZbVy!|+$~ Ni|xEeBrۤ6)X5pGNzvLeCK,aQ "avx{e+4]Y*9of;МS5 o4f\AKāN1w1)Pl%BCʡШy|n w E k/{n(h/&s k17< ߺ>؈T{u|gyj}_ٔ]I+RE_H M)D܆.ɸm榇j 6'ɛLKKR_s[)m/HyBW&X"lR]9IK|pn雝žX(]\@*r*Tv[~FѸӒ3מeLR# Ҟϴwۇ!dDBU{3Bhǽh(MZ^x'F趼)>^Ӛ!CA.UPHcQքQ¼S|Ɋ.}iġK(disxH9m:mRK/|#_-mIVr"րm8rѣ s僽'DpBzvv POaɢ2HGaYqQozvE; PIeq= R;-*NyZ8"ߧ+XC˳d-ox?\ܷFZҽtêl`8Hj[zSTP]Mtqd}heox`KCƈw52ҞdXܹv}7Wn1u( 8;` sPġ/ HIJY8j(rAr%`gɩRTUn_j?h{݃ρT%6c|w| +ש~bR Ӕ] 70 Aq8y8=*'~ Y4.6d (mEZ)A35|Ao԰0kE=v& @1= _ v6O#å+&I˳V\[*f?7OYRYdkKk'^B+[q+;( ᑿAĥ}KF+A,bK;yPwɓi)QNQ>^8]-g%<(Vc2}.;z"ڜ>Ht_OS'="vօZK"]}"N5vEGQw2ÌZ ]j… ,`/T5!WnfAu(5ȴ@&=6߂^u܇]أ\.?L*xOC5qa> }C LqIt<48@G#Rd;ӵ|oSI2M/5#]88!#ùu^2Dwgv{W`r䩙Am+y UQ-Rވ|^[פΉWq5Y,C|IJTڠSö~ھܴ8Iǟ-G *Q)<aa+6^tz9<eԾYz]PŅ=a nRohmAεB@w{~7p! Cfw$Ѕ҆%v-Se*NkTF>Ar˷Sʡ{M28S, se6RP L|f=@El9eoB,X_5 9rɖyK~ xf"윑%"4dw7Rs(s'մUn}| a|:AY/ Q䈢rT BeONhq\PkaeppһcfÞH:`GU̠)B3⾎yz}BHdb;b]4e{~F?41lӶꂲ@xx%$L 7W?^.IGϵF@N ,L5 nUVZ pWoOq΅{E#Z3ͥ`(v/wST$uEL 99FXFjHr.@pMMr}\ [q.i j-O n /"_`qnrG >3e2oꎬp#1(iI3nAs[ggk$ϗ{['BcӊMxc 8d#8Dȗ!4o{dO.1ZxkGE@?@ƒhP ̗:$SZ򏪮Srw)@0\+d@PLH{8G-x# 43hhf[-ªOR1𙠿>)9 L&FOnlņyͺKúW}+zL"\[4a_!p=+3tC92߳O."e.mMO\u:/xėz {Nj$6@w& f{#H6EQ3*$bʎcXY,ER3}®밼[3_ߡ3U4>?[\{a"xO)DOZtsaR{^2I< PsJJ٬b -3CVؗ7ELǯ6vLq4  _䊹Y򢕪V`!= ܒM<x8*C4HEv>O*3߃F۠_pv7u`q9!W<(Gg.P1}y81 . fdN&q~lV.R06sLV# Em*$U<NG|ʸI*Uhߕ8VJ:n{HK yNr3%A9Q,> Y t‚z@CHb*V"HXi)9ٲo=~]Л J}@I%_|Ҥ44\a QK>2S6zٶo+eȭZ`[3Zq+!R[ɡ&qENj^G``Bh1P1݋6DdZP:lt8].%n"A].G}U'N&{[ƛВ(SB~#31Xey@')<}Df+Dx@յ:oVEd'a`{`@ZW\PVHuomVfLhl.rBGNro( ~S*YNmT6 J* AK܊rQwb#1x-I)oOw◵+ onMw(k!SF@~XZ1*b|<Toh=gd> VW;h:-i%̑un'i%ZMV.4pyF󰃜(9SY\E%tAn`0O?哲߼r_ R#-ZxM[/j(y7ŸWT2Qg"I dmQ^V4cߒ^𿥓;Udl|ޢ>}*.ztESDJVQY2:(N o"R_)y?O.*r,l!*ZCU2X$oŗtmY4¯3ŧ#kݚpo:ۀo?P=S-+({l+곣]f,-/S+~\PAanzKWETCQ<=4o+Z@׍fc~6vJI{4]4b .Y.Usdu>zZ>,{G^rZ+ymΓ?k oau3V> jٔx3>/wr'v0{ ,xVfK.Zi:{aAR֐w_,۷[ʉ4jJo`LY8h]?@oa/){'aGN"F¸έiWFuL[`5'H`~mEP xgկU\$UKiLWȔf5/|[۶MH;QK5 &nLj% /1A5ˇG_ߕ-խ[F4w7ZْK?Y "?78 ?&*:Dw0?.&wB>Z<0}g:kiG av(ٟ qx Ke憁uqv {bC= /RP =c N8 {`kg4۔_ZDPsl? _-k9<٣ed^_cCi(,uS'&G2Uw->FF'ߞZ?!r*XkOsȇVU+mY=nWyc輬T#-I3jܜcR4y7K_G#b:MEjb(A!uS.zj?zm4*Pg4i8@捔-x J|!BS#2I@vڵ.q-g1j&v LE!_̊?kN ERfBgɲF{||y|( 8dIlQ u.J{ m!&yv5h7xD468GЪli'XZ'_Z ;)Og @ۘڶ< B`;3/Ƞ"TbHU I_ӚTp`kIAr){ XM"nQ,] %:1Y/RIrAOgb>F)VžHkddccLdypa1$xx=ͤGˊ𬮚Θl5#Z4 m9Տb>aڅ5o U8c5]~fo0k#+D(^;YW=>Q8 a }RsSFB]c\YYIvfݥ25BI/=8?±Eޥ4ScWs)Jc~XcebJqPVslhϽ%Ne46|H H\K@-,M_eGj -PT`ђiJ@f$V4y@kE U5/b :g V-DPd(/be! T9#@ͣ)p :@pbP鷰M Ѿ7rKtu>1NZ+ L%Z+̀vRơp|uwX´Ϣ8fѲJFE7[ $8rqi{1r^s#,hg X NCN7, J_gB.JV|4l@p 2$uFQjejc$\>Jt0#s7T3KqEiGbDېZ? zI1;&;y^¬ "Of7.E^5w */Cl9ZU MhQČWg,67j?<( !8}W"/W X"<ޱ>{ #3.髪mC~mOq.ꢻ4<τeTO·g|O @XsLj mdo Z@[/G)reNKحAo SU,xQ^YC5tFÀ&<5+618w瑍$ 5W"³Z"8-Kɞ{C<3ixu|8*fϫ_8cTGnz#H>,*{l%=cl%!6t-CJ6ѧ2 ZU*x+iGk[?3".R.Y=x_w491# \Nh&Ô tIcф>.B`vZ^KW&ћ5\V;Zi辒uCՠO.)jy!+tp,Vy泠I) w:=hU:{^TxǜNfɞ’-wOڃIdB,t~\a_h4Nl) D&\6aZ1 ֽh̝8$I' tSJc]i:WH2`I\MRxm1Qtd-]&`9XxU)]Tsx .!R}R~K-6Dž[jN1pe# v#Y<-=w+9';Hŀ), d{?Ko,KJkd0E#Q /PߌDWča`r2dT%~u~9OTKP8i ,2cgɬvvؘ'$#*Fm 9-VwZ' lW'|tg)t<6VpO%0n:1:P>#seŰk92hYk8sː[qQDMdcYm†޺z~]dͻQ2.uʊ1'U2V]0VkRUNh.2J6 !Z{z059V:ZϞ YK9Ȅ9Ĝzt 7v5 .^` s8xC?=lROU//=Q N[JŻ U@84Cӫ߰~ ?;cU 1ۧ7eWE5\8GKsiH65kڎN(ŝ4]qP# pl®z|;¶\,Cf1R\ {sUED,ERE ߋYTSTuc5am ;NZӟ1~#$1qCJQ_.B0\]dhuFǡ's׊)A w~`*"SHhcJGz )+W|Ȇ?%~d1g4؆e.xfqV?`pP)'L TV{:CaL޻^r/XESH!o{Ay)Y`#̏:w$9ZND,$ +^:.1E%~S̲>ڛE}k[qHwVH#jbϒ\ W /a럛ƂAxU] 2Ă2;w|B,܇8#.IF=0Ԇ)0@1b.V쏜ʏՃc@shOm^s܈Wb"1 Q4LS[ 0ZٻhѻV:xAO\=j6oY;7]@@2'=zb+@ "%@IQshu ;-Um$)CDMyBc`U,ntF·~ JS1a1Ea]78l$l™p"o#vxEZ!g?c x<&uhYdo g!gʊIs+~[,ka5yP<(W|!^ig6NJ/̚_XD 䏨y] 4LW6=w*1rx?X])`UjQ;5GE嫟!AsSGw #=q,AtCl@9w,jD)3(պLJ.? ޔ$mƃEU(a5sh5I8qأȲIG a`qmX(Fmg7סnb ,<;@CrUX}iqN8a+Z~xM|U> ͙a4r04%hFsξ0K<|4J9{o&Y뤐''{(.B!@[L IVe]Dy}9tg-$Q`ZitE{`=aKO);"I\R:ي u[ijQ%u8N޿qێ6sπs^wW0u1cRV=)iȌOQԲ$jaXQLGU;N^`(kt$Qgio1795 J/0ɂ (ňl/]Sڔ+ @J,&O:S{NT'*†W04>PuN=leĪzCjj7 0^F?"vo=L?5 ˄W$~Rŀe~{s*ڔ 0VTDk/yoMςS_Kb&7lY}>zwkyC4YzV(qDHU );&<,mL ?h:0mX*xEkݓz̡ga`. qvU>$ܼ#BM>b\OՂ[~g\=#Iзz~6:y7DdAQ{|梩 }-1J`$]1+_r:X"U$ށơWm(鉘6{ < KBq'Op1MĤbv3w|ոvz}'qr?$z9PZB:FsmX }%F].X[)&kw='j\# C*aja^pO4޸V4/O8'WܑNl^}'.x"5wL,7u/ gX xҏ@j"8l-53=jl* (ֈBQmEMxu77&+ ,MFǬF,͸l&)?U;qI, J Z1&ߐAPBc.%r_Ar)$Sӕ-rXI .ԚP<[06ìE}oVQm2(~w޿u_Z WCK{/(h5m6ݫ5X+q3\5+o+6a[ױ"X\:p\WzJxEgnem#H !\FeulA!*)ӚX0WܧrDEwwʑf>`UӖ~5ڌ* Sk>(u-+`Qk1RCgy1uLB%v 6Tfzt2v4+#NoJu0Zd"{z]?PlT-b ˦Ћ2"֮6P8,JI#?RwD1WkVW󇖽P-`S#*BH(b _`RՌRM&8̙? ,F'7ʲ`s<ⓠ!65@}hTOзc`{d0nqs-<jS7dӀ#+c3mŝ@HN&\DPd:3GkHrP C L[n@RBDmac&+_BV?M/oJdmy )n]1\9E" V~Sy0W4WVVO*a/S;ǩр=|C5JV6}4ǍO!}{G!aʺ \>EBᩀղu'NԃӰVjP7]|Q؅1 :}] **QAP~l'7%Q%?gHLqֻH A Ljޢ:qo1)/3U#:O 9AO3~f]dRJyj~{bW!zxOq]|CgY#A39$= {+:h]ꢔCG 5-hYO#}CO1@݄К~uae$A \sS̓Az`:8{5Vђ {Ʃ̀T=EMnmc>*/Pt_ ;gtH-ґҩoG:O]GT(4!'bȫRe e0+0pYw:7 v¦jma!FMTGh,7pJR@*~3axt5Ŏw*C0ξz1m]$ŭ^8g!&e5( d3B{̫N@Hc\F`sWgZ3fB}Tv }'F>T=SJӥ w'U1A,Լٗi~t ؋(FR( Hw޻{0'qhQm=޲28Hʲ"Q9.u&21?9~!\l]&A9G?4gMXBB 5C]BiL=r7IqW*ҙuy|?v.]M䷘%qW QۺT06ov˴#Tq%ߔ uH!?MU`&싱}Jj>uAc=SI)>:ޓ,Nd)h9NЊI*#x3ՕEI/P뤳'*A{AmP!f_5NĀwL28h K_f,栓<]Ɗm[}V" .,[8.?^ՍK"&)7vPI;2nr\l<&֓w ,QQWY)hDLg@RN{z vY*K>y,%ljuav`+J \EP( M XZq_]-=Lx,g6s!'ׯFCt$۔uӑZ9!z~`=Ofi@>X:h4rxF^P},eT3ǸQ%vVq_'YvMPNCJ6˛K"w+d= AݳD;OcSa  $"FəY٘OµҭB|;64o˹Qj{Ğ9N^ݶuTHc :7rpHsCcr&k8kY;:S?o#M|v[%iG &ѣhIG[촖ZķZ(*—-_m [%b"ū@hN4)/lo[ Jma =TlQ0ǰz$ų Gu)dv^hџϧKyv HJYEKUy*|f>y\JdFB$*#@x^ D mE b{Xc(agf#i}xJp8Vb?C?% [.@Z{jlz\8j~-"=}ȝWޝ?B_eeJ~RdmW#Xe̬}k?BDYlhtv@>#劒7VXR&kZ0q]+ż0Cq#0ג2nTu .\pqLqK.ø[Φ(ٵ%rK[t/XEK#uc*'3($&~.)LƆ ڤiDs^fլ|(~/r%lEB{!̵r020UW9{$alkfv/[#}JShVY6\Ҩ~4|~=[[dhf,`1wKSXp*E0f4|n/Gewu˄اZ-|l (<d}mtGq+#X x bjS9m ,|e9}ڕ"I{DBhY9Xj3\^/?6?ndDzrRD-R0VAms@"]+d=d AX5WP[:?BRK2/Ys%(]o-zr!?}dJQhzܜBn 4 (S>a:[';yXm/WJ^=GH.d֞g?E\zJYAbT/Vxa5[*j4R.D14n<,A^@"`}>8cyk#gcQ„ qKs7PZLׇ mkt06 в֣B`0mOZ+k~2cmHHCXr u ZW4/O}ڿ N T3W1FDJsoڕsAل!ؚyҔoTS F_ rXk Xd'EpgF*.BM^< ᕵCeZD>~m츛%s㞙܈_ug hEMǑi ' 0'1Xf.Q=2,,n˷u'11CIIc"5?+hdaP?1)/Zg݃杂 r.C=c;e +[`N@҆0̇?bfY^4m$.JIC8sL:J%ύDX9ɂ3m$N|1REbNMWev|HEqͷUFe.9ڍi-:$U+ l ELNZ m4cwFgZ%̊F{vEFA-"} Enly;9B?i/QaؙU=7;?<|vO\kNu#Rp~])|fgR^ H.,S(f6(s="T<ת*As_%9'Zg,6=IqfJa_$f*| Z)\.o8}{1l,#x: #e˸IHH7oZ'8{ɶ"{$ndۢhs۾̞պ=L?ۡ'cأk" *QTmA0zofrD~%k\U\o`Vxu)֋be৖6,X϶̚d QN<PIY$Tl1Siq_}{;`p;6 z<^b/4r0k[\{W86Nl@Mx˧S/$#B;*:F*n9ƥguw:9;l h ['0 YZkernlab/data/spam.rda0000644000176000001440000032231312560430722014266 0ustar ripleyusersBZh91AY&SYH7<¢TP AQ"R$PU 5@  **p$*f T;P`E@ $ )Btŀ>@>P7#y8Bxn:9kxwblC7A]YTJ@*RJ +GRMTRUUU;t(`>3h$P@A@ PP DHD HP"$E$R$(HRIH"RHJ!JU%)HI RQJ)UIRUU JU D$UH JTP) RPP*RI B#Z@@Zg0 HIm"j{5.Xu<#qa+{+^GӀfvw&v)z7 o :F:AJh)pdx`{8×D@Y=a=zOz9 <ǯ^G@VxWEb9fTh 0#h2h 4h40hĞdѨ2S&B) 4 &&Od4SMO66Ȟ54zb1F"imM#Di4ɦG*MR?UT*zhPS@4442iA D&TScSzlhfC 4Ȧ4z0(jxLLDAzM=ChzzLA hh$JRHMHzjh=@h2@Li@ HDbhhi 14 C)ɦ 'O!&)hdL&L2MM4Ʉ4jv!'ubշտ x[f,*utxte嗖 _|'Ukۅbn1H4PAYwBJBAES'oN̘t/&U}90_eD+9.f1WHhL:Ojk-O%F!_cP%JU sht+K|/Tto)G#R9Lp>J}6X9yȁO֙\~ڠ6gi:~ >F@gl}? ="|xb4~!qS7xɨ~vX^/y |Jx[8dGբH`O*{3"@xTϖe|/R٬"c0X?B W+P !ei\/Qɳ: /.2zhC{rM's1c#gtK#yCqoչ$#JS' HC BA#K$ R@Wݺ]|Є%[(AbSGX br(@GŻļJ[K{ilWBw|4-b"!eV ;KI(7TOvB=g q=~EsŒ+ꩅA'Jy+`-~/,taGZexHNW~ms}wDYv4g⮑~:/ӠdmKLh$4C܁Qu ;՚ܶ)cpP芅7lHAy80!pZ(z'_D8*W^H1"@Hނ$#4'p J#$HDP#O1 7U^3C|Cύ(h\ygdA |;$`tޢsF$!ϙO4īޠ,_ԀPD.eP)rQ]jDW2е60Iʺ7.B S3sPz-{n|q/+D*k-`c36Kd I@{W=*  Z%q=Tg<3}H$T|m| ؜@3"M('$ 3o* 'b;7'@ȋp %JۭH*;Vl{z1:#k7-1 J{U)%j@oOLXDaP&*OS $\ !w'Ah4owBx >!p;4rvT[7P:9y]"MD 1'`lQpN [3HvJ_Q1{[llħJvwZfg4~$#&YK)h-ɹ&oT~~Fz"V~=U Ow%3H τH,CGӐ"p*UA^ij.&6=JRK޼"ofch '' "L / 9#F&A@24oFKE֦#%1W1m;A2Eg7Tq5,Wl{ iwkd6Ӻk&W /+Y;]TG.x!qo ` Yd1JQcGywPVH%-Z[:̃__[uKqRDb Tp`QC-eCr'%hYhoB q'l1/]\uEvΚ|`{a&C X e2MQ-o WDtc1qn+[yGX8uK$o=:9 1[ܵ}uMam|4i!TaPrp9k6vj -YɞV>\X |pgC{J"$ LnDFߥʰ!DlI_hӌH5Gy.XBhr048̫bS;ŊS{.Jk;S6nXzx̷+sʎVu)R˭|d^\"|8WrahҽR`C (Rrb"x]~z~'nG75DB XegϓVo^eH[@@B]Ŷ)w bV6b8-9 -*^P“dQœ}I:ybl$pbHo/)u׷3x *j-&KkQ@!@ix9H@w9 I0hLS 84 c6KfA0@TUI4gq$.n;gQT@&Z;յ#}^?HdL|O96={&h=a:GfD㙧BWR+"L D|IHiσE8'}R]}WϘN|{ #J~";gm#U8 P[u(Bwl/J-*wyRG~l }9ztf;-b N.( oD3,&^R#iUDQՙ!M=enl9k[9gI,omNQgV2O[_{t7Jj͋ &B 7ĸ>JSXtf)٪O9i)+P}(˴2 5k%|ř(}pKbe[Fc"H tn4^Koq翯D]"̵حxR4I<[oĤbU+Ҟt[Iϗ}{o: O sJEE2֎vCy$CPkc( cn/8aY{o+Ӝk]6nO+pI)Dr/uh8mؐYO-9xYi|&if1sqaX!WF~w$rlȑ:)~5cYߓ @vޘ|nlה[L/ gǧ"9O~). ٝ.dMŌJ/%&DDzd2d&MձC&fJDYs^- 0lI3Zf=zc%#o-ÐIS PW4] 8."sbf5Ux]0=IwZɬ\$ cLX cG=tơxg{O1!fGؖs,dϙy {UGm5JXwN(Z s.7,t&xjuLñ-CG^y7`K0̣ ˢ܂)^s )a$qnM]P`aWv@~f!vsQ%VMĊqb)ί1̹{{]UiKտ{jލ{Cg+*`%@'ZpqhhHX.lOUyIb5ZlL)IEzZ5Kz3[vĝ`"J% RF$<^cnk4p-f\mi?ǁh ROT 8.CL: ULE쳴NkPڳk^(ekXe/wha{'dV͊svEV=p}aI @mrJ93` Ӕ.ZRN*fj?FLXid&^&nHzj Xۑ&uVmmtWyVfvse˜"p}s M^̍G[-v9Yg(g6N91OV6Fn HIbkJ[]Wg;iv|6p'N8KΞOMDe()Q8U>U|sIMkl˷oշGiZ8UQ-MϬG+f&j1#6Nfj]a`VaiXe٣o*UeEt2qС1|_MjO1t0OKuS:dV2^lyt'+ wiRFIr_E+VEp,QI{=6/e"ZfJ^0RuaŅtn>&8oz`Օwd;F *Ufܸl]F?'5N+N*È{66я*Eg6iS4Z7Řqώ]|pfГԲѯΗMǢ̥:r` @V0M k_lo\7 _G$2]WL GL = ͻ{qbܚW[ܰ'zkobW~sNӂA&% g>jle*DߍL`a,bE0˷G#>Io__LykGz9ԷLU6xk˙q,K!| 2ztaEKZmh!R-`O "¨ᝣ`.MgZU˄BqlٌdždiQTZNgSϾJf׆EםOcX|ԑĕ.nn5Znp2 v'_%Lnd磏bmח'h3FfGYObR2ǯwo.۳I3J&ޱrl&Ԉ6%eW ˱jE WʯeY>I̽YSӕ!t"r+[EyHYr!ͷ%ӫq)\ yqprqd巑!t;IsZnp<hc6N,}폷;1+V1U-0J97rn+ C|zgЫ?^{zܚ65D7$T-&r9vVivӟé%;,g vƥmz{S[Oq&EVh`+T(10Ĵ)?+5d\07N_{?YGyKĊ2}ջvsjMNixL;{[X|(Y0gw+^l,3kݧ+W6\nF61;+k=u+|٬Izykj07yEļ}p -XG'Ncbhn6%d'c UO]՟÷G?; 7c9gl^C?8-O/ѕwQ] _*??POGw^H{~Ku*[[lK'~~_~/ݿL^EO'sZq fM;DίG:m,\`ߧjم#yÚ!4VdQ"p'ST%UgϳK֖屸dWg%L-JtJYjO{z{{Z[{)CKwA7ikwv7>&4}}tnLrg4ldfΣ@UȥR*s=o.ɉ6/C}bus7 S A+1S^ XD?soْFp 7 >v .cfnG~+\ /  $CP8l¯]k*,[ɳ?%X[[sR] NG#/ mlʵXԍxWQ!swdR0kx&YF@z b^ؽBiڮȓ xY)TXt־js:r=].ns`É䣌6;\ϹeMKcߔ*^(kyt|;%<XpYapmb.Dy{O^Kv:kktbIPD :t`%&lB u]5Դ`iyJk~>2MpS*>/9OխG9rԷ>^'8o[ʡR{:2*`b[tadO۽` jD{ظnb& qxLȆ.ϵnM\iˡO~x)rW"MEsc˷ZZroI ںA '%:^ ,;zY{y"~O/;:|d$p>i  c )ԴY[X G6u}'Wf%u56ʛتa6O-,5ݩb:XCrثH̊/qB 7狓롟X@_D/nדfCٯ[;ZxOwy9T쪝]ڬIb JE !bhN]vn;}᪗bq̪,7;A=|[Sgҵ Xe}ؐ;^-l }벯N/"lEGv0=٠o,Xp KTsHA&6ϲW.ؑI >`T9Gb;Q&8d3x<+[R'3@h4ZMA.Sc: LbֱՕC~KNv9_=+mR.1r2 V f^=2Xlc.*H3͸D, ˺Wv3mMciτsuZ;vM`lVHԤk5-8-^O0aXha5^ \g [k2ZQ<8a8+7N yѪRfL|{$e mYn%X4.~]msVk-5PE.ix(-6k ]}gųy-4h%鱹@ Q jӭ DVcuДxuk}o);b(l8q0` ?fl'ux=']ioƚVŁ%JđRA6A V }o͟,s7̵$Hw`vHAcL +mpxA),ǏLq<^Ṗ\wOf An8K'mQ||{ʸq-`JȊEEri3r%w_KLF-@^2/NlފTIwӷGVae4ہ[ 7ȾZd'&rRiAK>"S c)X-Xe0"S}NHU<ɷO~[iXdhFK|oqWPPnbB[;ɆBIi+&`_S9qdaS}yģzkד|(j.UfKOSZPFy @A((+>bS%đJM\ Z츛 v -'MW.gLW%R, Z/zɔLbila9&W OokVwMm;jI!!ܶ*걄XU~U!DY̲_K`VmYoӈ&hV0%y:ZԸqzDtիim-y?g%,&rkP*!e'Lū Y/r옠|GE1N U;&umj5dVbf^ġ7JYIC\5E*+{Mk0!8/bū<1ZV1`M4I]t 7vq"nH&,)*=LFRLC9yA%=lD|S !5H JLd3%$C ҙÉ(^fCLV(N gTV[*hdL4¹Ѡgx'1u, ,F& ޗ l*_,0,%s2ZOZ6f͋pSt- {8h_nvP{V96 Y'x)u&B1NkhE3e:QٙeU_dFIYT; y2>hJ6>if@/G5Fbyda?&dzxT" Zd~xd\Z9H A\#yr`+:˚DyH 4>Nhph)>w蠭~Hw#=ޖmbF/nPČy_Dop(`sq1zm-:ʲ12=};W;vSP͟D";rJ{9ޮHcFEbP< zVh(ZL|$D˰ηl+\luONض>6D\,ҬZ/ԭ5 vX}9iuNUV|֒ԼOi@4inEƪ|yL!Eƌ٪k!rI=ϯwZuMpSYOdmo7'Bob~U;ec{ p&rQnU+_AF}_ZLv782jgbjzQSamք?Z3v{P1L S0M@j*2AS:Ye LU:@B6t[9Mqjm8}C4ua-y&!ļhO)& f :2PIELmJNs,^cw:WׂeY҆ hyhJP\E b2LT),/=ʥY420ikS_7qYI|WțOsSw )2 (1(gG0;geLcJu\o\ҽ`Z=NdBqA-ٻ-r:8XTP)٤AĂOZjbY[C-Z,t`GV8CHIK&[2a" B&W-޸s!+n3QG*ΙW1UZF EƁU+i}ʼn46kGI4SAF8H'_weatw赕*#)Q{ fȣ-Y-գ{zM~D.s +8O(}ffJ:(Qk닕]Ye@HL b* [12/Ȉsۀ@PH%IH@0UF&;&iL+6jTPlFIS,6cʰǓTsl!jo٧,z=;Lnݾ#nmp#enP}:\~WZW ="&(uE&bck쿥䥹G4U;irA YvP&:e2)-K3RAa_Oq~!ӖxP3Q66d%F fl=Z{Ï,VŖN7J**l < j  {&9I@Ғ ~rsoM!ds"`iT<߶jeQHҍS,G\9T#_^˃!_I@b[oX/s5x}|sfw %e+,]^>gdJ2dH qX/vp*l1'ж4'wڊYqFkj~_{|OIUAD,Ϋ$%WnnF=LV1`z7c(Q[oQV2NOe4: |N8l{T0j+ /F1NPaU QlY(J{#GToK}zgl!8h1! ܵl :q\< .KJ?V+‰6X6ECl$%,%=Grm8]mmVݸlk# uP3(j_U ʻ̨H,.Ky\ >kuDf-wiBe_ &lbԹ` & *da3p._c$.bYC~ar'>)LX0֑L-o'nSnTo)pX\&7VsYu6a91Zko+@+RSL3*'N(d#+jXQHΚ^ʭ׀tY;#Q8,,ru F0V _2&Uxq/?&6gHz7Jmfn"-(цHEf.NH&uT;2N:wa@ h2Kʖ@9"C6bY3>Sp)/#i:ǃH)އU=iVX HVL#[U`{&VH'%h9^s Bm;.rb!v 园es }I ۖ @w+Ƈ# m6OtoA'mFi9*R]Xb{Kifẍ́dmXYҕ=L#llLa Șe$xJ eI_ %56/vq4ŔX RƮ;7? $! S/6a)Vcb, p@NM(,E1bƋTͤdWŴ-n ]5bѧ8S'2`o(|5jv6$%+5g`n'DO]$S[H["*.XY614O G Г d+Eun@V0xIRE V<䍳n3-o` t$KV83*f:a 4F AoT(!qMUVZغ1|(@N81Do}J~^K]53̈p}4iJ'O]}_򳛟J^䔘 M[чKm] u덋e͖1dHͼ,{^HD'rQhvq3/m95#CBQwc>$Xw2T! xWDy #2@M'Jmmmxl]t.5u|wڎ/5\] E:&>qzogq˙${{t!ƌ|%s' )e.KIX !l-yt?\HuASz=k8K*}}9azYih%kY"\q O} ziDhAVX 1留(kGmbswj1Wu?{%193oKZgepRߥJmY8GLG,j jG4M%V! vQ ARhh\>} T8`{WyC` d ǿERf]Jck:YZ[&==nĵ5 ;_, OP pgxFm>'qmM+D^UݧKSNԏ;';1ϭi؁UM =~ycέa .I4ئ^`'#y6,ڝir۪vv邃 1Ш `N-R=g4=L;P۟)>jr:eiL:]; Z %k73U,va q+lr{!Y4|X/mķ!0n^`|VY4 u4F$طDԫQHE>OjiZ^c1\;@cpmƌ+^}TSI6)|M%VOG)әp裎B.b ! N,Ud.q,ot$viBbPKoznu=)L }VكtY1g0$I{<.FHrDDW=ũ4"Dh\cl e4?n(K- {jȏ?iOT'2@V+Tq{GPapO2<>$H+ҕ zmjD? ",2kYދ9$MىԳM EU Xc5%;E/"#gqLeaJY䩍NNp#dD,9}4:qt_{YW&4"2bxFK/&̓JF?:M(#+sP4A:ТMXO`Ueq+Hup&"r޶ 7,)1j(*RB,df &#x[7g8D,)4B +5S , aK@gYe y<Wi?`?_A_4kk {ZZ80ӚX$iU( @B[B^bʥJz1[E0[gp\G?i/0 DǢBKGd>vy ~ o{!Vƌn1\}*aͮjhuWuTWG AtZv4%AvLlUʤx89aSt[-{r |wXu 4iyQV+iaA}ݪwޣNRJFm`K #@W.»X`)+xtBrZTwv-Ra"~ 2 @wrv]"z?o*U`CSǹb}zrtPvbYf٠U'LPu5]~f{o;Mz ynhM9W\O[O3&B񨁀.2mJ{ 2`! ֬ cE/.Imk%G^rF!C^}}Ht pK.QjxKgs,+.1RAkwihajbV?멱 XFJ,c,5j2 ʓժ}>WݨK 7[&5%TwIFYr={#dwJXg[fV,~_8q(yش4Pإ~f/S%0٧?O[⑲ҕ=oֻ,^Jn;w7p?[ܮr:R-8њ2,] nkN/@?-If4c&X27wXj1~v Y9+RRvaeVSU,Ut/[6C]:kjZHKGOBp4O@kY?F*n欁 pQ܊Q*XL`[UT;"!$<`*%EA a1wS7[KPB&:OiN+]WDc;xqpf @eCxڤX dN^e4D]up ( Jq5 &Jh@> M5&9%o7іL|(!mB^ #{ga'\c"Fk:ͮHj+Rm Sl)2 V]I]@7OKxV^ЌUFYE,F/a!NɨG(XƥREN*LVk֬ {WV:g&.g);HShY:&-8JFhȮ g3uН.L&X"b4WMq'EMB  ReU M쐅1``*tf. 8t?gHqV0®%ˋ%I-UmH0s I,l#+ĵտgzvd1|0c%pK(RhFk.(2L SpLu)oB4ę] Qk/֞0DVvOho ;j*<{ͬY=6ڝclF hTJ@|VHF;$xEj\ˋvvE5ip5яfћ*<5<`ih2H/ν9߀TQ{sɫ[b1mRS$Ijx9bZSD^6:%1S,5IIrR 셜A Mq '\!e\fNe,07bk`)mL! !vHyÀ(ݱ $D'FO&tGԢYu9qL/J,LXt[ݓ?v3sDƝ6>)3it?]M&>$Q}o0'kfx!c mi]c@M2{Rh0!#ʤh҂@03ӷJ^E,8_Ƞv/dmGY?Gӿ05ŀz'Rϟ_ӖL>)fq0u;.(NŤS&(V+`r똻 QPwXrhsf@/ti_V{<%֪ &Ȥ'iE 7&%K(e"8B##1zT@26RsX_;G~O&9YUOOtdICB*,vܦq 5qS}6c)yDF 2[Jc٦B mѶ@Y[G iJ +XGbD`ע|\łx!sD kiM,jm"U(l[>b}P됃\ $J2ˁ BĽ#S?ZMLI],>{!WW<J:0mxz8Kb;mk`/ݠf kf`{K8ɭIϡN}6z8>QsIKR':Zl𛅣iI{7it|mDvymrJZN Ć[XY5Z[J9ZpMclTl$}sT/5HˊMSIm༓AB$|-V]*Je-9wL6?\,6dwп'ڵg݀f!&Auva3qfǦ:DZBHe 9O_ xpF4IyM,g'-%iTz8=k.m!/A BjbµKh !Je`paz#R> LBD}w[cW^" bg[ޖ{Cdi*;tEZܓú-wOp,xC0%HU3<$H]8xeT>ge Nݢiʜ/qq`;b CVg/ꦺRf3x`XZ7R 3BbPt8^`Zf ҋ4D.UJ쥷%E*NB_rWKsЋFc%2I6lx#2瘀a(v }RDCz~`b܄6M0 >뎭HY7H5&rn[g 8D#+*#Kk.m|Br~Z$w/Y?Rxޖ?J=|XncRcſ^:pb5r ~okU=;IHakі{ B[JMTP Bg!G*͞DEU7ĵBZ㪙R<{Ѓ%$#%ENwė ˋv[5]!ޘ @In fB#ID% Q2nEW72sȰU@%d)2DphE`VIa(/MjyɕFP)Udـu H;8GϟwsW.Vw)jw,֟>nޞie6d:몘-̡/x\!)۩Vx$8 rd\թ`r\և,?Ws| †FG`P73{ @ -r,C g"ܱs蒲! Z͋]џz >@u'Uyp--G|Bn] RӼ58riF 6rQt4^)ÉD˗ȀFfoj?kcF}a zVd#! bjb lF"pbYu6.t|j moc$qp@Vニ\v Z黹No- oSQNHP 9\Iv^Rή/yI|%LW|t-s`x_؜=sɔF]A^!x %FP ' ҋq\ 8%Zդbm}%X[.LEh`D{=#F$ # fjw?_Df:< j8#awˌLL<..w2Ik[*zYM̵=E:E aZ:Yq6Ǝi%KvH1f9V˗I$ܖ?9rcҵ2<:mC6KFv+jyhQU(]4fhj6B?`ѣS1 U/BJ kJZ4!LR貰%4R "rYC}Xwl;PApb4qh `Ӂ0mm5aTBp..xZrq69(ҐP  bpkeٰo?Ԃ)H&c~DO1B?v6B^vsk% `q');@%^к!,ل|G~x[g2 qt;@vJs\J=J-nЎD 4'rLvv6x>{u{~;W]k6QQk\8(pzda\i}tf0N5@r ˩{ÙJpTh@3ӯŘ|a :lNVĪgtP]."ql<δB,)ɢUP{l[E<%uy>6( 3 B:r&5Mr2/r@$ v(Zu!p{4mωl^eHѭ,Ss s<AUݡlQƣ }mV0;8nxM^m18PG/}|ZZv1)Kˏ9Ǩ7 @xwlٽQ' &2VGY}A!$WdRƒx1o^6H% m|k|#ɓi&P$mh,'Ov l-7k"xt`.ۓuknnϞqQb!5\4qwCǢ;? i1*I9u8x vfRFCɾ .+e,{܁\]+Aw]Z.xɽAs4\Yt6I]t$iW]]tEAu].W8 Fy0hB+Uw![^&;b.cI Qj)Uy.CW*BNjX٪WF% 3nʫQ@Pii` W$rbܐ\ڍL.HGY(o]h\PԚ'шNe& [a~mR%F;\:Q WaY76:篤bXN߻J0>L.eab,njTYZGtH!rn l 2##9\a ՞Hp ݆ Q5!Q}UzƎv߲|UvqdG㡆GB9#"g.pρl]|S2JPLS2PJ̠ Jvφё "_SHцN)3lψiLخT2ᡅbnjttho6^EHM4R]p46#dJ1Ө9s6%̅AAj˵ @Ƚ!py(&~E/5[ƥz窥VGPz"_b Q_X| l$$QxX#U |_~|zWz^>=@ kZֵkZ{w L R?8xo.||Ykla| (*ADD ,$@@Ejգ[6jZT͵%lkj6ZmMgI% E-ҴUkdijkb[ZѭjIUmRkime,kZ+fj)5&]ԪأKm+U$UQYUUQYUJ#EQ&ՙYmIQbZ3mjVGmkmkkLI$k^ljW*Ѵ(2 dAo!zA- GܮtnW:l"6 sp%`ŠeQ f#IQc(H$3&2cMġ4FwvDs]ЉJAIDw0%(L(fdB$BE/;p4٘Mh@CFPQ;ʄL@Yd$ XLNDlLėw1&cb"CRQ@ ə`+y$˺$…dK( H$ bHݭg +:ct}Zb?6oU #L!E@4!# *{qۼH<2s9]7Eί(aO2Nwwxw]ywܸy*˧^/3"<:7sq!:tsytuwqsvl&f:NGxΛtuȝwwgq;syj]yxx.\ܚ}e-D $$! (@DJlEYET={v'rUo9WG/TFA$ TB~:[؛[cdS̬OݻySKѣ`a!J !PHD$UiB@B*?i.Hȩ"E1QG#Jb Ƞ_~7=ϐ vfm>?O[.֓.ːkᏜkD)Pxb!KЇ09RnőSX~:dl ?13w9* G:B]ʽrXy;渏T?ֻE 3KO* " !CJ&@``p/g>xb"J"f޺x38Ƨ8d:x㛼y",@=FtVB5~{+Dž';^kog^5 8l.ǣ-*c$LuYlt/[SQ`azqg"+ק}w `,vcJ**8 )%r=K CTъ XcƳ!(Jl"#Ȱ^6l4\ 0Ұ 'KP)9vRL E!RaC A ::FԲcr[$z腋&d56lfB /oQ8=P QbBE@v;M Qb !dT&UCPͬ h1DlLRY$cX,dl$V&Q))F,PfFDX@Xfib66K55S#Iزl@F$hDjQ(A66FDTIFK-v$|JQxT*ovTۘ*TF@h[j+QU[Z6mUcj5֊kXFԛh[FVQj+chѴQX`*-Qַ9]8k-WKtt;7OYvw|*  *5ƖP #>OevO_?}Tbͭs?onA:קȧʜ $!JQ|j>)0i9 y/?rN-83+Ѕ {~^W鮻af_laLfvedL//DC #F2:IP2)v{ݕ-#.ԠqIBTh"%AQTR`qdơ_g7U2k7nk#uzL Ak8]*Dd3" %wu,iomS,˜YZ6QƒэB1$- u[~}Ŋ5NԉGUcX!_LU B/@ՋwȰ }}Z߮$* H 0,ȁ"Q@JhjV hѬVhڨڊ55ص$Qm1 ѶňEV-Eh$Xڌ1Z+5QlZ-F+bKF6,V-b#`)4JFLAcEF!#TEhD[,QQDرcXRTU5STmdQcFdձeFFɒƬERlSh؁[ cF$4IV#MQfe661F"1L4,-FclLf`QT634 E 0F Ԇ$EIaѤ)fd) JhPI)inuU_:dڋ[kVF5E!$de^pV?$wy1B+c+oGsoFl65Zh f7T:qޡC!k˶+jb ^ǗyPWqRrWlR "h+tu.B{+Ҩ\{yLUfŊ1ȕ[w$6E[ ԣa0<D0ՠ%8E.D4oʰ&ms&!BgSβrADm ugv3y 4Y樜}y#rYս !?_+2.紴m Pkج^"V ٔ0+ap#e'VANJ&4ZV`Qr =]סTEv:ns}|TyWU2+צ4s7 T<=$%{Z B.GȤgIII"h2W8f=(,뜥6M?|T' 苝78\\tDvB(DVŵh%o_]{+&xO|m ܟmY/(?c?%w &rY8× u%jn,()+obd~=D6FlǓ~^~b*XV”o5p|sH(Sgv)]}pS?y,'#&N!B(~=;.S@"T{uRz_VuҵA%$(0֘<$S)gߨ/3!n}YΝ::a 1*d>beT(nD7?{Gb}J{UL+ $n{mCyXH0!$I*Q$iMYXB;ѪMc>K˥ޥwMa ~)Q&O|Rk#Ko7yv3MӧDsPFji@XA>oml]9*܊4AWw7sd[lYdc˕ A!6wN/u\T/s,dsFBI ;UG n u㓾Tt~#VN<Ǝ3ŭrzu޻E{G'rT_"bU[bDL}wgg < >L *! (bٟ|k^yټ.ekN,\CN2. L!acz|iqϲ\S[0$kKœnE ˸p11>f&֋#)+m}V$' KTxO5 Fn:>z[M叨1x"Z%=tЦPvUXɍPB)# -e@nSʼn\gP NiFxT/!+ڹsYǯ{8qWJ[?G[ڀBxΪQj[4g;Z=^w|rG?INvlO>+ *Tǜނ;s =5:&eMMUO7x0L=!pp`6}gV\XSMoʚaN? Nuh/":W/L"5ɛ+&) BzGB܂c fAyҞ>>nf(J0Zܼ7qda3<ĻytDΈا*[k{Uڥ$@$PP`kTM!;d=OX!D>ׇߔ{B{rr;e~r쭋"k4 2! "Wt;-C_ Ƞjs:x7FKMU P)DL(35hep]҂TlMZ[ FګcQ%vjD$i1JV*vڵ_B2 ő!kzBxU?}lKT,tT:HQXbD/ b`N(~V]WLKr?=$" ȁ&w~WKOnķ??Jxup1R ׬? MѣFCoCF0kmz^W'o|_~_>fgZH0$A$ $`I$IYY]fyw};]k333]k333]k3$I$I IH0$Afffk]糙}$`I$$RH0$A$ $`I$I$I I]}3;BI$I IH0$A$ $`I$I$I&z뮺`fgzYY $`I$I$I Iffffffy]}3;ZZZZIH0$A$ $`I$u3<I'CI$I IH0$A$ $`I$I$I&z뮻fw`I$I$I IH0&ffkfffkfffkw}33uuuH0$A$ $`I$I$ZZ33˾{Ԓt0$A$ $`I$I$I IH0$A]zuy^OW~~~~~~~~`Z뮺s^OMzzzzzpz3=zYY$I IH0$A$ k333]k333]k.yfffkfffkfffk$ $`I$I$I Lu3<7Ԓt0$A$ $`I$I$I IH0$A}z]u]u9IH0V01c kTԟfxgSv>*JT"iTu xd GDQI,Mc)$ԡVbDt5c3^W]? Aݗ)_$X%| ck=kikW(Q]>ָa˺ mYdtjKߦ+H!"9`88I ;wG{^+jNo㬆Z*+ӕJ%(dS@\k@8Z*JfDM&ڿhBVX˳[ V-1x@ࡊz;FK@\AkG3 `SRBjQ ? rNBzَjG4T{Lr0HQ,[$$i5 "pHů9 VNQ!&=dA5uqZoudç@_9J^Q`*dǡY`PMI)@Ϫm<%"p>) )A$ $`I$$3|ϙ7`w޺fffd $`I$I$I IZZZ33˾fgzYYYIH0$A$ $`I33]k.RIIH0$A$ $`I$I$I IRIyfd`I$I$I IH1uuu3<fwH0$A$ $`I$I35ֳ335ֻ{$ $`I$I$I IH0$A$ $`y@\>px]u _}ֵkZֵkZzw33ZZ̐`I$I$I IH03335ֳ335ֻw޺fffffffffԒH0$z`]I Ifk{Y~׵{^$ $`I$I$I IH0$A$ =u}3;Z̐`I$I$I IH0$A$ $`뮷{Nֳ335ֳ335֤Ad H0$A$5ֳ335ֻ{RIIH0$A$ $`I$I$I IY]}3:I*ꪪ337{jy{7㮺ֿ]{{?g{ y@x{{{{{{s337{{{{{|9{{{{{{79p$UUUUUUUUUUUUUUUUUUU{79p$UUUUUUUUUUUUUUUUUUUUU{s2IUUUUUUUUUUUUUUUUUUUUUU\9UUUUUUUUUUUUUUUUUUUUUsUU|9_H_p764?G}$ V&*^Kb uַ_{9zzzzzzzzp{ٙU9sUUUUUUUUUʼx9feUUUUUUUUW*89UUUUUUUU\ǎsfUUUUUUUUUr9s7UUUUUUUUWuyyw}9UUUUUUUU\ǎsfUUUUUUUUUr9sUUUUUUUUUʼx9feUMmmmp1խko{UUUUUUUUUʼx9feUUUUUUUUW*89UUUUUUUU\ǎsfUUUUUUUUUr9sUUUUUUUUUʼx9feUUUUUUUUW*89UUUUUUUU\ǎsfUUUUUUUUUr9sUUUUUUUUUʼx9feUUUUMmoӾ8A ?K) PHMD!cA»WvB>jc)NjK`޻wwS?SϿֵֵ_{@eUUUUUUUUW*89UUUUUUUU\ǎsfUUUUUUUUUr9sUUUUUUUUUʼx9feUUUUUUUUW*8>/M;[:PA{iQ)eQ:k  j5m_U[]7_8% 'DҪT)6,ak&qnV8+-Mٹ " zc`C=^EDX-Պ5YIQDr>Mc 0Eg3ha`\tU'h|ؼph@)UC*yK4/kr DlMy2 QV}H)RuŐ8fށmw/{UmxxyUv4qbzCyGsя~D(/`y0"^y*5^+(ƮT#p j䆙1ɫ!7 XXN.x ] dkp5;JGcA~}T" 6E_0 b g7B I?~9RKVK1?vPF7?"C6gZ6%QPoSuފ_o}۵ܽۮ(g;Pkou*jXlz_9o~GT%w>#z{]^K50__IߏGa_Uip87 ,P~f8:J΋ϿY0/j r+Z&pgA+x]HN3A:i ?Җ FVX}IG;\J%Xd}H8f%Bj? XhqW &ÇEE;p @%iXխ]8L=)CEtToNwY*cgd0=Zq7ޔ_PN%D`b|5jk3?Py6@,Pt!fJy/4ߘ-Llg!C !?>`ݳ Zў~d3vN);?̝/0t+͈/\9D(Tr2X7qb̭f33MV6_gG]|?TKQ 𑬙ax +zEFL#,y2YTCb.]t?hf.CM]i][H 1qA!.1,e~_aGFD=o+Rv'[Ӥ1n"RWk {i8컪;-J9'BQ1h`pC#&2a^{Z+ RQ3zGU<(Y5NzCb<8Bap&(D6\/mmHR믌1 $a Q /vp^vݖD TM#=uZ2g!(#HLA8:0xUcƠ"[pPw3zHΕɎW+^<{rOM꓌{EpE o7$g:tִAJִNK2#1.)Mtld6@=(g59oL^>.nG pdoN;ĒXaOAa$9 '2 81>:Dit$z%PbLJ4f6h,l$Ec֛ 3l)N$vdΜv"Y9rmW~dCL/Y`ljm@rEp6DD t>m"R foV'lM.7Gg qb{ +x5 }WYj;w囿C<:Uf8ƶYRbZ;_,|MּvĨpYmNIxE>L@xg3-pǢPNxn$ףmjҐuu$|V! c#Jn:)ׄJ3M4HGB6.TtS21:]?äE/ 3Y$[AlF"3miX{b#v}GA +@QȲ_ʴXaq]OtgVf'{W|ù =p'-inp7z$>wLqܑ7W&'&5$D.}Vk]$G\|#Hz;?hf `]r|jhS3GH)Sz>9Dq_/C*dFg 'l]o]T=]Y_`Kvq7kgDMO4}@ 0N<ߖVMW1?ONq%JBL1RlOuc-w#8{18Ƚ .@rF =궀桱U% 4fҲ>)i|5%'U!¥BucˍqY9q-gk2OmkD [^Wg0Nt>vnPqa@tazcit01 $]ѹa%ϯpwݺ|ZkhGR!" }L7j\. ӽEұC,ü(ǽ 0KaJ/߭!*QX3(u l/L4maY ) 3%u ޾#c0t+\OL,rNfK|uh`W?[Zi$QOB1Y1gRr2xtNJaa%{Eu}w;˅šPʭe'ilpoboc풙>bѰ^Օ+'%#${PY%AW*-rؾ~O-iz"߃S1T_`\2Lc~;H =vuEkvA/,=c2x?eA-ھ#J0e>ZUMveJmQQ̼XΒ?6ho{?gY6_!Z7drXh)2` Q؀,KTOܹ$d;ҢP(H¢SY+cP!P=oi/B_XGŊ(i+-h2_+xZK]zOHf쌫f+Ӳ jmi;;x1M~/op QYJ_}S1Ɵ\?);W͉c$ _C.v9etrdZPO8x5&X_CᅧU䕆hf^} yxccP q^\khe6̘5d E{y`.^c `ݏa{q_l2P }p29`Zez♂IJr1/l 1&B!Q&)ZG,etk^a?jU4&YĤҭ[=pElFba7z<C_WWUV#\:#|KFlImW 4޽WRGM4|!>{7ۗDN%RֱmrR mٻM\%hק=5òq=q%4ky ޯʏZ9U[ib"6 W4^WPG̴Pzv$rg)9 cyUô{Y:5_uj|_:ckyr"! B;pm`LMlWܻ He'JyRk]CDJ!Pt+tѱUd82bp'p(l8.mkM_uoL"I1zbc-~n?O0,:BW@̍ٱ[^SFi[X+bzq{j EMP !{Oٛ n2 ) 𽓂NB݇8?{S4wgل4@Dld),{p|J{ۮ2!յ͋ݛEvXֈb%qt\@΁](Wa $HRVt:[`IWW ϸbG, yz rȓ#O0.gI>V퀜!??9W3>eFJ$1vҹ GX)zgs 1G#:[@1 ?NmZ0T^/u MKC%\x h?fTiLLI=R}ZQ5 *l#6@PaHS99.Yl AˍEطYmn#b1A% 5+0*zmC'ߥWD)pd ts='4ZZϰI^*ͥ%^gCp0lgj濿(~9&/n (‰|uQmڿLtZle!!Oā #xdUUnGOE BN"~ (5).EPXPSA\@\mK2!*ű9~`b?\fqOllRAkJjvYEjvs}CSTG__[|,qքμ_̃%]"bcLAH_)@`v#nZU"X|K"0gV :aX)#"Tm=LK8,>&up vt$?υg(\Oy{FII/`(㐃76᣶a}+\wY(LDZ@b_W\* ]sT#whzZG& ofax`*Ż*_@Z4gk{ЁTd)rd!GH@+ߋKLħxDZDbwWu)!=oQ4jz bۑ!W|Fڙ%MoI/wa/2<2l!jq?=^2&J>>AڭhsRzts _aؘnKa2_!c7P;-Z@dQ:DMBe3dq̼AHXܫ߰~yܚe3W.^UߜM~dKonI%RfBO :%EܭRQ:ᰍHeǐp7L9/ EGЖR53=~9A_Rh|Ѩ`9tDjɣV蔿$HI NDS= vszC;r%u_$Kl{5/ϐhs3'ժ{F<*H_֡Xܪ>іɏ~tpũp&"su'wnM1u԰{ 6|ewsO\"e"!b[Q[nDV ʂX \i /RUkA7;&Pa7'Miip;H1ID}=2w"%7a^fpn6.Ieˌ8 Es@Z"L݃;[%&;gObn{=~gT#ݩB ` ~&ԉ#7)Q=~e+\n, y}{Ne\z96X0Dٷ_@GQ>돟7#e?v!xr4>uCM(Wd6p'`ܝ'x|L#`` X G_oZ nYݲ%hv]9)iv`ʡ|VѴ/ajr2wTa8^"k (h75F4*='<%zeDZm`h|4}zl\Rx 0 aYm&-RN~#3w?J) 0#\)(P5cҿDRQ$;$n-{Noq,SC$,S8G6@cwYuĨ`"Ɵ~̹+KW[, L'p Q7 L M"0S<ޯcv1-ܩ=N XcB! ztX- |Vx|D GwCD*A'UM#"3BCjER:NXsP륁1}3ps0W7j!,b9YIdFC?^+Tnp^ Hc5X`G4yI4)f.9Ɉ.rY5CsH )}_g(08{-v̞S\Rc8·%Ar=V\♶ ^6g|`bƎWI2z~u~Jl\W!ޑ;r;sH/عfPo- aۜǃ_J,m\Ll2JlEJ'\dwm`@s+Ć;BE1poWj7+$BEZ##ϓEJy h&y]du|/UŸ,}wʏ3][bK-sq :&w۹f$Lo V=ǽ"L_3b/%)y4;1t{gK9!_,#{ XǢs[(<{k3 ߤ\As8pYeAKIg$7K8u| aN]!꒪G]*P Qy9'*2 ~ f0`8L%jW=m< =3qN{(*995p˞R Ӟ'-?!=a8\me07wOLX0J|yO\( 3fW2}PZ+"86r}cWɭ!\] *Q2YߴH%c@翋=`ǪEa.o߃#t "=}~Zfڟtˁ؜ {\l,y_oq}`Mxq%5Ԣs,./ BO~אm'G3(<_B#sk5a~J#1P_wt~Grm9!8Nxy{ݹ~9fNo?S]Rɧpa?c;]y3b[ڡԁIibi'M 7f`bD;|?)bߞ t4 I'$}a/D6IBcZ5SJ _^͠s?MMir*F%7:.԰n5 rS@ k9X="0$q;I$&`p/X~ϒ/QUZ_ijv^w@W#g +M0ڰZpP\jf.ƨـ@@0ޏT)^'$8`f;b(7K@+@tPlN78Y/^8WmC,[yMu؏()-qTs^$R>ԓRS4/֋cNnn]Ϯ* 9_v"b{z C[OO/\._}v$XXdxq ova;Oın *m6LJJoLEKsCј !ݦMP |rqblĞ )_3FLkCּV@!}Z|gx_K3<2vL 4>g-MȹR(U)(miipOGm*Nͦ*S's5vX2 Qz 9>%,JcF3F(&yHmxKxwng*rv98T#OvᨸJ=/v fC a)tE!v$*_|5qɄamp^dnnIN >֗{\V2}C>->l)a\LX-HI۴#,ġim3u0d^wtݑ1KP̻AqpF9\!D2.B <Q@yIΡ ^Ȥ?v9O0! ԏ"_69_3'Dj?P00_B!I|`xd(ZP9gf/̳ GAG]!TA.jύȻ WÞ_ZYn8_30W³vkRy,!:;K(*-c^C(.$rO=Q>ikᬶ EoRR3m Š!Nuj+"@FNW *qGr[C{d,ve`T -S}0dV8CK14 X 3&N^0aװs5Fhsa 4 XW`87{ߞJA?MQN0\?OCր艁";Nr}"\jbo>EӸefGaƁbct*:[6 1}!e 븁Pd6v-H9!Fv@R-5#$X, @2x9&pSM)Z)`iW?AƪTGRcpVߕ-tN"3.c۸Th%@5Th;atō0<f5q~ud5IL,]I?g^Q^Q 5d3s(8"ﳼ*LT/AUy{ӟ1,67(|G:q vwDuxϻ Ԃ| Z< o$ Rd"(MCD+ Y+l) Y$_!1cafvdL *Pp#IMմTAG)SWakO041 Zdsp4d=w} ) K$9O|]=5LQ=pA]IwA Hn$Renf[x˰~26 mٵ̕:a?Ʌ3/l U(@+I8` \lxQ(<V cj: I15J:KwY9a)b9r{W\srb>eGi!``Ŕ'V"b*Hh :IG4dkb$A!O‡*j ̍`0CMYkb" e!S|=v2=+Mr.,-8+s\)٤ͦ# {s( $ ">3ߗA1$bnC%#p<ە5A['N⯖ &dAFQST0A\_5Ⱥ{:s$ۧGY(q`"۩`b%ikL Oiކ!Eu zw)0| !=M pšUxxGP?>r19L}^& א`oӑBHGc2,}Bu{T r&`|Þ܆Տbh;@q87P=.,'όW~\CQPyR/E~-n,/ #/`ҭjF7z+la"p*zP]R{4f"zmqwInI)4|]HYCHo5 cƕ N&Vn[7Jq&{y-qb|~8e-ЏK1lBϸv pO%VJIn`bȥX0 l fzEVVO$_Q!H <Дa?_57tyiBQn~]_ܻ|uZw. 1d$c J<7H[w^Q=AD0g-WO7L/~k.[L@(Z %T-G!ܑ\729a<_+v~M9xukϔ`;եX>\͇m/:< IUXa]A(=iT:h|1'rcTz#Ky[</o  ,9/Մr'vfV6'|$3A2ԍCD,>p#d9it"I ZzWZ-ۂ\XIfzRE[- ''c嗀PtS\VkXE0?&憤v(B)]}gl\2KV,`<,L=Tmy]_[P!錺A}J"?[Yz!"NeWS4SB7$z*u>TCt+mz|" LoxN?. GB߷+g/=.w49Yv z$ě/gc@R Ze ѐ?lU U%g']/ .f:J_;UM/Fm+ʦ'0?iE H5mJFvdo\>&hFdb'CY UvG/Ew; Ͻs>?}?@{93\:aKw<{16f$VD0R &"對-G[Q+C ]p-yxy^-.{maм@.n3{OV N#sJ2J츖_`~=NUFQڒ5H@W!B>j9gvy $\JSUS{jvZ }qؐfA֢z~CLNFPO/(B 9g"k^vG g[|9m$2@ӭ8V]wN>;yr<;f"r` cTD:$^XXij8$u6$rz>]bMds%!=TXmg))rK@L˖^$m^#OYjWJ[5"-~t!UX!>*/֣Ovǐܡ\>BUкdP9x$\ !28)#R ?'E@lK3>Y$ URCbHzQ8lI+5kĘqAķl'q~x$BQ_ī1jT[w 7w'Q o"J`TPWwfzbiGu=y+6Q^}"B2$I0QH Apz/t+3KŲLԍY: 2fz !iQCl_yIk vrzrU5 `wh$X{{kJ[gN=12:՘O,PA[B LRZzj I{HFs7BkXTdVT%ܢ҆w>}e9aH|bqZYYla>-OaY~wZXXT-0.kJ kQw(,b;tPCd"љ_ x@)`CZ Ih a?-9?HvLWy(xm\!_G<>l!?n.&OR9-.%Dp˛HtL$.$0 RrJ)8e>-#}:Gcavܐ+3;,q:k-T=L] +{U^_2o62>rO+{,~)Ay@.lmy $ETM1}N~oL`FDɌLY&$3l@kG\0x KX<9cȘ:2X:P&sxb"8Wԯ]Ж328:iQ;e0K9Ixlqs,yC#$yDufZ6&@>_&h^ztbXyCvt^3;ap  Y8G`횔fH`4 _^t#^o|}gC;?ܽʷV@wyq@*wMbPϐ}K}iw1U)Gz~DV{"putPVтzTztLzz#90%^)4͋ԣn׼V`}+ x#meWzVwCIuk WlZ@t eVb 0id"`r"1bdg߬, ax8bok|7~%p<|jP2 ( vcLClk?բXI8~K)L Yc$5Q<%%Yw]l me* 㧎x MaN_vm/k#veB \hKwͫP+A>$/GIG/N61<:N:&bVG. X|K bAh3(8,D6K3@{}z߅˲1咸ȒS|93§os`y<<ޟo, uJMϡ0t&Ƶ>%w{$P;K8@MQl%hǭobW2g6%Jyqa7xm5%槀>,Z ,œ>P(4/! "&YF c*+D>X41'u7>q8u] R$$d|; ME\B=ULt"@XR `bW Ɂp_uFEװ!{hT!x`ѽlN -FӂUu)EPx̒l M/ȂL%Q$]P C;/diAw5䟼l ohYv"(8(K#xh30,tn #D[(YO@kc$` lr-CA:lTcC˽ZE G  iplMh@LL{!8j1cYs/4̝tѯ !Zc@'(VC1`8y*,Mc,,?() =];5nyݣ-?d$;($!  44HZUqjWbGDzf;$%C{N޻ %1 +WU 5瑄&Qpk u E9 F eȐO6K򦞈NI x( BN̠`NB?IwTAϢ/KZLW4 !)s9@\teVQ4ZC1D;/GXq匟9tc7Qy*߅!ErnVNݑlE{ȼ`9t~y#8bRvΨ3wyү!WKxo̭3wBs'/6" UA6JC1kO;%L +BܝrvoKw S; ^:atc LW) wuIT̿ٗё[To"iX7e=Hir=LVAIYr"z9=l>4#tGH"4Y-/Ɉ6&zG'( N'߅&d࿣*1T3Ɩȭ<~j3 ;f"D3MA72TXP3 nvE[Iy>3A郇`G*Ũ*33O FݛF8`J3\-+O N{iaB?_~BnjPWu@@7#&Hgʠp@X`Hy l ?N@N;C ]!&{_I X(XH$|Y1-I>J%("J#(M.K0$&ћ'$\"x`^?v<޴s}EQLExe #9fĘu]E$a;J~{o6yRj5 HY.zق0#>S!=@(DJhTuԞT=^ "ƊYOUU@CL&6yҙ,-Z2BJNV*D#(jZ{-Cĕ*zaLHGRێ3}CP-4ҥK]c6`1J/9G94N $l)t4V~ xc)"ٞ:y7)K+y9/iy}ZyA6Ute /!Z >-AElRQLD'UCBQe@w$9 tHᔦk׃r˰qk$)3U^ר#^W!; ם. V ,HqDe!Y$e" 9yME>?i\oZ(aHuxew3U$r2s`Pzk;Ɨ8=LoX3X:'evf)>}8X'~2-ːEw=ߛY~x#zz/_< Ftb"6<gXނPp B`-Q9:| gd=27Jb:=Az/%Ba\mۜ=F~RɢBQ?s|]yXT }-ZKC&a!V?=%v&&T ].̨)8xUҭ$mx wh2 051~ba "wnO% |&41 7XRJ ':[D#1JO'/PIOR|Q(>3c\"%wD)϶(QmZd7ydڭW[I7ێC1&w1%eeu>ouK1#0L2R\S΄pM Nx7CႹGu@-1,o mzTЃ=Vme_*XBBj U!C]nVgI pY] anzmftD&!Dp5o+Y ̀+U8;յ:S^H)+@B­2Vl8ٞ=q܍׆&1Uɢ*]MBB=FP4~e_ wW`GHF4zjW)$C73h`bۋAj-Co6@ K-M]+tMk6Ja!gg)y,r75!aYޗ>Vj'3XAeJ'R,je1CϜUH)#.@ kzYj@B d]ZX9Pc=_ GE0LiWM5:i.lM:rֱk9%ɯ&RI\=rVK]@sIM3fAFo1WVp`4%%BXP얘hX)13OyӞO#kim3?tVshF4:B(UJ֭ڒҨԬMŊ&N343vJM;$#iQ:7)(RX{qvdfvоЊоHY:'DŽsv=.թ6ns cRBDj.nwsA{}yqPn1ڮ^$QѩW]7R3A%3k uI"gotE#R=ryh:ʿH:/QE+堅8JY7rmcA% $sEA_]fj@m:H$R`Ĥܙ'x1 Uo4ZjOB9cEIkH Q{j߄e 5UTa>Ri(ĵ91a2/8OlWtk76R d1\'yi-oL!3ȍ& ] wyQ3O듹qOز鄅ѥcPAT QJ}Dq$daqUC$ШrvJ9{o |q_i3s_~&D Ѽ>虜$}Z4aFA$hFAޏL z&%O]%FJz!bPAA=;R'lil@@5yU3ْMxIerr#@,)̓* uZNnKxrzBl; WSb*GMI~DNRiPE YcV1uXw(4: @I+ic0%Ym]ng~-q~T!p^+y*>AG3 ,I禉p%xW h9%4#񧆽]zDo OBe6+ҔA7[99ڃwb<]{=<Pҩ%9AbaQ^xëzцL, I?7S܉7J6R fX=}=_Hf^%&Bn r|.DyDw!~&s,ℑID1?TBՉ @7p.;qӅ8@|So;uCIæ H(e@>=DԑS!`dF8)Yzx߸Gӷ{>"U +~}1"R3PaT"г북_Kf;![Vk˵w.&q  BZKvE/[r _# G8> Ż:QRCIHt>Ւl}'de.9yP*ܐL1ar4ѪүLͨ ( px3!\QGM|H@^pH]ټR2 DOd:RWԼBUY$ @ Zkou A(]pbFvych8DV %.^AaTTSc-@ ])Vi=(Hl“ T:-zSJ\xa@'6:[KKmǓbRg:X,mSM=cqz9CofO+нY#-$4T2 2rwnHZ*lçkĹ!ƫѪQ4^oJמjB)!]qM]w/˷4˥<2U_i*Q BPC͉.zcG?Y_3)Ů5>]] #DSS0ƝCq#YpEh9$ϳX=LF…U(2$23U0~ŦXEN[:˱ I PaSDaB}{:RP!?%ƝJ=ܑ``)A-.4^d1YCWZil(OGC>|44LzX  zin`!^M4?19_ZXa:/KM'r;{osV &,{3I\pZLǂY&Q@IA$Ht,҇vQE@DoBN4 5Yto%I7xM{(+ R[z)8 V)A|ډ3x|~s#vIë"JCLU%-4A9+0Ff9iIԚK7+(S4ǣg+>g׀CwyX('DC𫮆=Ú7OE\L]nEj<vh^[˂;;SwOhW2:V1O~Ao|y[ k31q}ZwsxtF3Mol0q^(|c00L% v.8CY*C{|U3,Du`?ׇ-ϢG͔y`"4l~ƭ c >hg҆:4y6qn{;;;xʜA6[/m{^(^&M"6\RэXDN:L8 AǨG7~OuP,]3'l#)qӰ 77`#C#&9MK%|^7 Yn;CD껩PdD/b:8hxVсfJ  z^a{H*]nj?^2(WLC{ Z"&c Aḯ.bor?Sg@:1О~fdsf5a?bb=EM $An3ӊC6#aK2#6%M﬐arJQyC쩘g K MP}o?^UFq-ךzO2ؘby$krle6lܛaM,w$<Ä;9tX]VXB,H*p=; I>=^YА|oB]Sw`.81 ?U{~wewk%ZF :zAiuņ%i>XYG}}G( Sg2(fb O/(:O8 Jl(Hn {bDCDgg'yw)zmU??8k- }ZWQp9E_ |/[?L[01G18pa[9Yy_)OB"`]kS ֍ezߑy"վ2[< #~Jch}^BXa2oxXT-\N[ۯԖٕ O @`qv\F |iԃE;_ ԁ^=Ra C"PJi >@2q-w1sK)F'4gOqk[~0 Zˏ$0em9+[)?0Yr667=rI^6<% JUOANO9V|}Va(H=G#f6se2I3627*H}dGUQ1a=t5:|꾖waabjG]Sz_.gH#+ךS=`Q_t*T0A^9$XE87q8hGO"XF/$8'Ko >{_HWFRDJּQqVR%bhK2`b7+$Dz#+5yb=F,Qd_Ofq{/#@~5 sgxjt^% m2sf$FrC6奖GGq z`/}>ǚC:uޖ[f1ş$*V7(aA":,0a ֶKK""tƽBTg4r\3dQaGaFP9tXcTd n~>Ĺ |O "Q1lNȻK a҂g V*'|WVqv7֣IWs|!$NF,a5`Dbm'"-A&6˪x]e[%Bs/x(¦Dg *DDBp*0a*,dZl%q)A{S7U8ɆѸe89fXCKZj O.֏4)tcWOu3e l KA'oy`ѳRxz* fRjq*sUeBR h H)ȃ%o^Aw,V|d'ˆ7S/Pf4fKأP~u}=.kI.2(t'f:l z)BLA%4PER;Qc}$#x\K# %ZG!KN}B.4gq%'=S%}H;+S E_@MhԃiזpsYQoWCqc"G,1.,vÇ#>D>?T){~ eHP]R 2ћy"V|LA1$t=6vuOpWXP'zS,muHe+d4`LcWfu!l.zhk٫v'CWޟf_Lj,6+%s׼f3Q9d(#6pT!Wa% WƩ sm%N]-Y1f(l1NC ̽X#aYΜJ +/ڝeA,zLt#&}_)K h0ׅI%}t0;Oh`-B.cmNEDij:(Hmx-g1>r-0N&<Զ2d\8&<כ 10눭 ^(44Ρ_R^CX3;w}oX_j 1,C\IiKk1 ~Ò|+r 4~x$aU\HAyD^_>26݂=̬̞;cY(H>r`=Nt됢ݔe ͩ骏C̟!u&Me6KX%$^ eOuD,D#A.ZjN kFhz5M!2s͛XzMMe-P"fy7.DJl m\Y2'ސ~Z0jF&:52&, |@7&6PQl([7Sk^A˗$-%lUkzYlSxR6@`u',a+Zi٨̹?<,nؗ^n+ -A0'4O: ".8(*_ CU>ZіH(_u<=5leEЁ YEs d,Ec8IL2;.KB:oGH{ޣ5eА!<ʤ6t:҂ElE9=37ryթiW[c V8a.$OP9 ײQB~`9`0X=éy&x5bT Oh0mțȘ8U/r"h%{(0nJ/*b7wi+sdiMzc= X5=Ms)3Na!)fxwc33ǽs;5[A5z7cGi%׺հ*2Pll3{&E4leg kzL;$-(=vU-+Ƀކ#4:L У /1#g>흍 ڤHbýIu/\(eTThX\ey 9&N2Ѫ?Lq;>5eiGfYM5_,NXRETk53OCYrpT#k=Vj՚ߑUy4"3/$c< 8ʦ)yic 8]lqc/O>X@p}+4el@ל)꒐~M֍)4'O_2i3q*X*;&qj |{j.y ψJ읎bBj VHk%ȼ!HO=k.#guCpe@y1{,jG2jƲ\k/ݪvjZوCΥfIB>ǹ_)!K~]&xl'Sf4z'?ʆSx)fJP-SܻP<hI`ӄA$%^J.lL2k:Ƚ 24y\Er5ْŽ)D=xAJ2l#QiL d>}> %7FP b&ŶV0fRBf5? ze/_4W@1 lBIwIVL8B.-qe<͊X 7R6~1`ڙ$ Bn+״Cf,4L:-dq/`{*x̞^3o‘LEKT\J)ŰmWz:79]o_<\LI2 l5DkNadd>9lA{߰>F{ DŐ2ˎq ,P[%s]˘ʃYs*h$_̶Za|Yӕ+Yeژ &:ԫg\㱡 [i˅B<{8/>`V̰[,j6_ƹi7>&Yui!C8BIżsK%XWzMQ(4oCUY7t㎕q?{R>[;ߎ}rھ3jg=,9ˊ]- DF."^V~ȆH/$U/}0J%v7aC@vwG[gnܦjV[C;Q_? &C4ϭ qh͗Cgtqw{#&7 Cy6cªKOY$HS#\;Uf~ZӕzNV\wJC1IY(ai PlfQg@Hlp?3Ɋ@Pdp}Rn,:Z e|Gmf؁@BS)nKܭh@dC2O߸ΌYn S&jRoB?p)qN)d =[FQ;Vur*o6>,3g Dԝ{6+`&X+ZA]@dBQf msqғGIK%.6{jV^B@.i5Ҟ8xR >2˷|(.öz Dg"cx:ySbba1a!|,Vd[5"a[*QS/Ly*uv%(n|og^[A Q4 kJ; 0͸}dz@兢?JE@$Z #y]}|BF2q;_mR*cGHkY Hg=# i\<]Eዩ-S=XsKEp d0d#}6mUf+!X߅Vh-5+~x>U>dlמ/L*Eoh7=ܾ/}|{AÏd,{5R+2qs/y$ IoQqkNmӲ3-~wo~5CDRCj[XI#'Bܒ Ru_ݰOb =cd /=igfJfj+?5-lqAzyFc].|{`%w,,ȸnp?Ҋk^bHg SbEь VJ2pEml o67:vNT6.>\cy$+oːyC>u><]'Ý*j<ގF3=$[P莘ԏYIN!^1]cpғ˟+l:`@x^gKr}b}ܢgηmAT_ 9>ۏ;>7`73-D=X/+ZQ1?RTF=xf>]f6Pׯ|[|m nhA?1$`0ak6NQ;In\Jr#_{@PթZߟmO > ;P8~ 8N:NBp9{wsgfgu$\SSِ_1(5hTъ̏wȴ " eN_FccҨ`rːr]Amyv0 bg4vVٿRZ2[ߖ~lAI%@ (QrԚ,Te f0F٨h&,BJu v6?_dΩ_"DRˎZ[%(f֊dP:yfd!#Ȱ/u3z{5(p>&y5_$ {q֚O\SrgkA ːe] 6XE}J\F~%$BY%wg{kV=q(~7|XꛯW+vH3tugQ7`4K2 $`%eJ%dLZ{6sD ћ,w iId]\=!=jo0JDsUymnr~JNʚ agV\~K㽭۹*.Vm p,E !oC}mnA~zOW)y̛,wA~{T%rR>I&27.ggvNeaa!R kI˪rc:C,vƈAB0K9M9%ui LIYdwCd#6"8yկH0e$σ8BsMMoƫE*Դ+L#M W+㪩qp ! f~,ZvS!' ~CS^`GE KK C uNfM;asl[\s"gXKRAְӯ/#N?KB{~N޻. p.fSD 5z=Yz ߕQ,YY2U׋|+{U]IƦ7RfvtK?1x$bwBdxΈ_!'ck]UvvUǠ@i zgۘFMbNghGd}.H(Ku :f񑸭3x:o&.5%Mdʍˢ[ Yȕz Cy^or)שO-b=6u(>r&2e{ūE+a[7#?x0tٟ1Vc>1%vVf~X-| :OT^Eg5߽q:x rCMYi\NBlcUO|%eBuh8 *Jj W,2 _DŽ!z+u@˳]!qH.-Ý͐E{b*MTaNE{ . ?y3YsA0ۘykP|:y|. 6dҚ^nk\Cĭ t`r ~^̫;.?̣/ "bDТ՚q&כ.rnͅhpEC,;+h'W)9ˍYBX&% G4smA*ee-]|8׼ڻ5qnkFQOK} y/=OjK3<ɕ[ThR#=Y.B'Qvj޻MkҘaK@PTg Zqצ6KVzfKWf v![}tq$ d p+MHC#pϷZzvD +_,:yLuaC!!vgӕ6,| DtqaL!cqQQ31tuKrFQ0?;Ҫi^?3nϾ}F0Ok8y)[6Lv4|kRR%Ql|s5](M7gn.:Ԫ%I#\ZJh$҈oͅʼn/qF}̈́ɏ8tg,u?m.Ym|k޻*_t; =v #&1'kHjg\I 5Ǎ+2[-Q}Oen ^=xg`^T"tb$&BjȔ]A mu#\uƖM܂sZ,5RWSk5) tO Zm.3o4}恽12bB w )l-5*4M.69P":I|*E¡/Gԏ[%] e#SDcܧ|y ~A "2=C>E;)[A6+,AI @c$BHD$#aPtuSҵΣS;71W{v!juF1vm]ּb\iwLsV `7.JL|.|v.y۹m_Tj]~t fTF~T&k67 <B'훽* mlwН3DIS\PBNR2{u3oW3u<S2$Q& dfCTX,1Klj2/2_vO:6<v_ˬ*r56pԃa;UzwI\ғCyj̟BFG,.ǖ#'ei3c-e"Zݧd@dr&mِIb| |]ϕv7ؓ@tiY"L&.LץT6ny{ѩ6C4\®,yP~cefx\iWCb+0gjY-+f1eVM4֪&4O3èyI~jsIɫ1LUrlw73?=P-tVymEK"UQ7FKՎ[<܇_έZD*c|}M/Ul'^$п}\MuU8 ±V1M=y#r&Qyvf.Ok;6l)Ak()fw#-1%|pqRM[<)P !Js6}|_oܕ\Ȼ*iEjGoRj0Z6(%o]&o-!/ [JiXLKd? B,0%Wi2j{笸u䃕"R]9ȟ_)>%g&dr *,R;ǩqMIK1Wo&{22Vp{F[Y8p< =3 L[׭knXaҢm_=Ye7q]}"-8\ǥi@NC1)Ykc rݾc;h%i6L?S{ZfDX_{K׵A3Ò}]J"#FVkKܯZ{.';^Ȥ od,eȋL3YA1{kej*i[kC*:}f0m,DcSg^>w}(۝L=_Zοxιw<ˌ22d,IF1 6nǐ{Lm$B?]L@z^~_K-ӭ>a%/̘Ws^f[FenY^|uxfMLD"$N¤4i,%$dB@IŰ2pw \gakscvZ{8^SK$AI$ j ƠE&&k͍IEP$LIe4C}~O޾g{6lu|VDgn36ud Y HHwUʋw]En-&M3B 2Fou۱s;*eCm7Ё%׼6hI#QsĊS'<5\v;ɰn^QB$II`H޼b3,̲W@_ٞ 3g#2H(r*KCi *d%FcUm;u3Cfh!0#2MhDcδl=nm%SG&MC4SDt~[mOlX q($L&o~_྽}z(ԉ bIWQ'[4eDIIE#&JJ=mw;3 KWl=W9󜪑 fG.m4 mP8Cg/L0B@# e] #(?'iӭh#^ZA"R! {mT&4`!DlA%QFRB"3}}[=wZ7p)ʔ@_%|-4Y(&P)>Cד[{v"0mq!,wvb $BƐBP3\WC!#%"cD!_4dFogDI D߾?z3Ըc1 LQJϳޔ,̐(z.`R Fi#)D(1e4&A_6$bh/Fl%4M,2M'v%!4)`E @I (،I)% dI`>)1 RB(L!B$Qzv$̉BH EIB7T~+~ŽPY\'0HI9" k/]3k2_q0 6&S4)x%44R$I)L +)M*c~S2ܩ S Q1 DbLY%/Y]hF̃t6Xu6DYI HBo;LHm\̒bLO_PQLXd(DE@(Қ C">7iM=ws6  *2#bBd$b(i,%R-HCIH@"ŀș%&RaJB1; 3&f&\a A.i,ʋ̐A"TI2;MhlABdb )cRHI4ňϺcF z7RQ;1ؒ*S2fh D.u&4I+W2{ :(Le#4dDF\H(`2)0Fț)D4Ԇ7nDEHA4a(HI&!E4XTPLLPb 6{Zv1f)4hM2XƠ,CzƢKJQ &hF*PP(&Xc2Ѭ*!C}1&ɮm$i"#I0HlhDѨشL HDi"D&1뫸 ;V-+# ^X-Gq赥סrgt5߿Z{S#*gk[VUU{ dD!$B% (ЩwGޅm%Au__c̪v+iy ]ml/ {V5md֢XڍIQ&V1ҌjI3l"LPcM"RX"LEmk_ZzDTO ?[þI }SEQ@A J>dbG8 KŅ)$⍡) SgDI  )DUAT~Tj5cmbj6V >#-6vpjݩ,ge & u,Y F,4Ւhq~bJf%6C0U'$"CrەWcYX@C9U2/ Zɶꍷ] Br|ՐgE"[g"GtDD @7f0*Ic- hš]0K`29Ma Zjb)!c($e%U5~Pa4"U`]$P{HP(?~je1p UH) ,8= $A` )QlaGGjh/96 pȭ""Wkq $Fv1?@?{Fe ED$@~/,]}Q7yǬ$%25IVr0a%-oһZe/R1P_vu͕Р> [\tQ5@jD7@ȁ ! =++rw8`T Gkng?)w5ih'ji@-( ,[vUb ?㞝 H?]pDuF '#(cC$]}mhp1 ]8O:Cw-QP4cFfBRVeh-hشʂR[3I5-b͢ƣRS4Bla#RQ"Zזוf}o;\ImқFC^h^VI[/2鲹1ܢ<**jU!TFXCUsM宫s3wUNn]1n{ɾi1RH"?su|nWK7{a]t0_34M =.C[Wl$ PF*<,ESQ *jlmmkmlVj+[hڪ-hݖ[\F$"iWӠe҄fŗ m"hR RahREM1U!E]28-"]Vm]_t2TOv/"<AYD & w8!U gI~msOg+]aoZ +ƵuC/"HF'ʔ{ $i!.:ХoUWv <Ќg%1b`m*>C]몮1'n&,,'q 1S41=7`φX! K-007K[5kޢWMCTN;ʸX>f~4D%-  4pɅ 88`Nv=vX)ٽW 0h^C"[fT E.DX RBJ^Ðs7UB& ڙ@lP DdصōH  TIEtx]q,*!ﻋqp12,GT U6~s~~(ngǟAjB~Ÿ MxꑟLs7G&կGH$ 8+uUy[[nA1B,KFZemMDl&5lV5ƌUҚh0@Ej̩QMg6!za.yIOlPWOutHYpVNJkv;)bܒbjńMO;7w8~H? *52]Ą4 `$ @ o@B@dM_ާ>+! BѶUwVWwUPܠ =K.¯To~7G֠(%b>VM4xJٿjOHbOa>_^s *BmY (yd+޻-=;ȔimN.O2:߷ܑcC~'$xhih%lu3T oo ߰:93Ż Whs\~DrVUjyxy 1 iQtc;6F+d(4z'(k8,ױn! ipiQm9,uE5Y z(pUx d2FHEN׽kYhc$!bL׿jn`t ѽ欵D'PzMx(/c}[x];BZE w _?mjgj~MO.$fs Yt$뭌xGꖶ|˿nX!icEYm?am? E,.7a` nVIQucP'HŰ`g_mu=Vg!euܽrP6qO񌂀D>" !(7rs{Æ>Wr2/7fۨX]۹"hHPlI$$iH\?2}Wi/ o%&,YlrnWIkT'/|˕UA0o6Z/#DEPE;)|G\>ao'__vSEp; dlҀH" lQ۷͕dH:{4,*,;+ Ö\'b ?Ԁ2,U2((j,k6(HDWsQ)"SV?7IuE8i!re_gg?m5vuV̘N#á8BQܬJ4`M`0 j:ƀu<ڗԈsOI2A&$CQ}"ug~nG- WdJ 1%֢0hYF 'P7L7?yƻAֈ-\<8' &x#ˣjWp22Zi隤yh\!@p[3~u"(rx檅i& Ƌ qHqk"M@4 gz rKV A'UBX Rv&c~ym>bg!@fr VaIUuH:}V9$-ra$򆹋ure=Yl4 MѫIi^OjNJx樃 u>xgGXFI`S\/K zDT$/CX? dȓWz@ IIu ;d ,7(? 3*YlC&4/vk8!ڤd? w'Oj!I&?35"$ˮ{GOǿ-ƧAwm #I€RIҰQ" @@s Y PnXT譝]ԾT4`Xch@O2۹RtBo,_9IVs@m]}g1B?LޯްtiԪb#g aAx!N` r~u񳛉(? $y5E52!D8ϵ%)V,A@pS{Ǔl2PC:{z$I}rv1voiQn6Y|.w,wn7eaA/х:^}ݽ/۷IBHdZMfb4alyu꽧 {L@;nqLӽѢ;d&R]P sI?N*_Tؤ슠iP)L4L,QQ(+<Hh.pJ D[[I@Xs%;}_&@ x0HҌ 0ji??{pfjy4`>,_g;b_=EG]c\xh?G} 10v>g~SR +@D0kvT㰁k>VB$S iXfÓrroy2AÅpA6)`wܐKVgocF,Q1lMXztO⦱߾C{>|a_&x.sbyJaP`H$IV]B%3+\T㳂}R  ](1DH/3aoqcbBnR H1` ?sWL|o M[9AJBRbfYUwrI2+ߋG64E+*X)Aooek&EE uTOcg܉yMM2L#-<T# #SvTh >A%11J޼&V\¬S7$@4`,""/뎆6Q@lD&ɢtwe7!m,aPw e5KnyyX33hk]l0-VU$-E&$$?J 4Y&hʨb!mCAs/: D2i`iRR'e|p>&p(`Fdt$!I zb "Xr?E`v|Eb]$ ڸXUkKV+>cZ3 /y̤TMxt$I,@Cg]U.rAϋ^C@7(xk5x$0M0~~;ˮ?œ9 G[~FNe{df&E]PXS."HPg^qOr(m "(x**!",;{6n|~a@3 mhWM6cCWUhMɐ.U5@S?|yjW:,2Z,m_v'$ݣ8\zl{Ut=ӵEvtGc@F$֦=^z8wwEL]czYޅd9WN ` |{o̢֠&Af6j#4 (ZW-qZB8>#i[ڣ ~۰( VhKѪ5nq=MFD·ߡ U ip EmvxAij O \tǡoՊk/|91.-U+appڊuv9NJ4!vEy03rҁsBvozQ|>Ltȿ o VVUv~ eК:;=qV Zo!L< 5܀HB4g6M'M2$&; JG޼*" բN0KcV7+TBS&^t6rDgU7r&.CGj/j'JNq# ϓAI(fUDfr/~.dqm7ikbڱ[?M}Tjh EAcEY(>~E?1&U "qBAzogm֖PJbJ"An30w('Cljb]16%j߲ ,^^\^W.,H  $M&"KwvZ^ze+>)}m!:;gו7LW!fBԭX%HrgJaj̴nRm"W!py'~#О'P90_u.@(]XЂE1Ke.z߯ 7ei;a?l~׌8IU۵-\˿B ) >Mf $j[1:}\c Dq&Yer:( h?kW6OBFZj @-V\BdL{yxI3>zL oU1CE=1mC5xgYe5JJ>ۓS( *НJ;&/)RjK޽ebѲ1AkB 庬n^wH@POEn|u.8]~2l.PGaVEp2mW{Ő-ݦhVu9>~ç]r<DQ5L2",Q`JV,? T$HBCϏTvT0ҽCEQVoo8DP ,BU@?/uc(sMRå[(!Ҭ1IH YfompLN(_NTVsm3뷊tYJP38< $t"51S!gFi>2^P|{[lzpM(50m#"+7l2s_7<#/Ӵ(O!ЅpFT9i=2IXlw)Ck#lMhk 0s+$g Djb.x4quZުZ5d41hXJmhhkA`wiX,H̃"IT>cM?ZYH'kyoOdoco<abC;o)kx?o;k\BwT$, ;:+R3I,` o6Ypܑ @w`^VoNM\ ߩ5hfH{^߾PM5"HH" TGUk{lFEXXڶ[lZ۵zarDي@ ߂\RP𿊏j Lƞ 4 4+%uBaa(@MazcWdvJ$,YzeU[c_}]|s .ɭ]4IW0]lRE*@O}n'NGC74|ODs  Г㥮%lxs{^{jA$YUDDJ?O}+l7͂\;Yg|$>1T<] I|\qlvW~9QPW= :/ ErvxE b*dXI*+?$>1oO_0jQ @ I * "M/`\}LSEL믷+]_l\5edzM,cB(ÆaHi1MEaAFжE%i~:LF,]]~P P鍑^:Tx8ˆ$d`aX9;6ɢdv_Msfڙ&$! 6n7u؇F+Uޮ.\ZVVGJ#1C{x lDۢL˙\ KU3<4P7fuM@A=9H y^3+T8Y uDεWlWt29k  PV$mde*J8Ȅ\&Y+^1XQuIH<c(f=@ `1kF9<*D6GOUfIm,OgYv'e|:/bJUmLIJ\g87͢Ǜe<2%-vYBs@x\@ꀇԘ5?hm'LmzH$L]y9HXŏJNk#c\ 4B8,Rb5 uB?y+@Q/" ĒZ/j28 Y A&$)p~f>R%Ȯ^\t{]rKe$VfK7ZrMױd=2=SZ5V&8+cQ*3dեFv#9 S0.;*"H (*W=LVῖ+1x}ou_ ̘ܬ};2!q'T$cQwrPU J0!͡k~_m߸ p %SVS1Fƒm{z׿jWHd ]jFrO~c?UJ~WOXd $,15γS(<$V $L'TM[ӽ"uhk8z?9|5UK/ pIEԃmP9H/vo)RՑa-Aڄҟfи9-'UT̢s.`Czw}]lBhzud7#Rޞ~cMNhzW|%E/^jtp뾷|0o #0w]#M vf,FN=79l%R5*|]"_[wɢMy<.Tkzxy7ZjυN:lc㩞|y} $A'U~}>zU+(hgǍ_3OT'D6U%>}xYG@ B*6HKF75;ї!6 No[E+aq^;s VJ:bKYctokB" HX8)[|SPގ!!*&V`)%j庡 T! 2.M@u+UwoLX28E$~Ip(Tr'jmfF,Щ-?KX$1qpG/JFBeK=aK"h]5WRAR@Y VŵUWZƯ3Ej-FmkQUjV6ت1*6bk굾gNUw֒N{H][`ri ;>Jw8`\pgE F@-H$$4LDc!,j,ih0B0{֠ɛX-w@nt::;˭\d }5n=Vp{do[3 4ҌP/k=^p~z+)e}UI wn  bV ,48uLT lLJv(Tc)] MJu<ILU6ުmBGby?_km+ի>ɖWq'˄[0Gz߅apylD63(A#SC5FX`M(ai,Pctj\Qp29H)aad]B-n6iL8 !2hHJdTZs'۫XrTTZozڋ(BnP˺nQ4S8X0@ Oeo.SiKCjQxBْ&-4EYf{&N80I=8T"a},6H#HxjR |}TRI${$$bvTP(1M$cD FRTYv4]b:h񁬩ctD[1(ik (,9~'6F+G챺!sֺ`} ժyάۯ׊os8ZK9M4J=. ]8n{y f Q N'`qI]9u>'_ EMQG'#o@% iL33IjmB5)%8D;C4ǚQESjA5Bmܥ=5&FpAjc)M+P.) n(RSIuI7~Tf-tUT) "m7!wWI!$$b$bhDbțcfkdňEU2ƨkXƓD[)hlmcjb5cEb-f-TU1D o vX}uKV4Ob4:WH}}i lXsJ! Ad90qE,@xW>๟{z{|₷ZI7ʴ$].nȡIz&AX}%R Ay+ ՑIsPVBQ7*=!(^kUN_`+ C] 3$,Zˬebe=|\fR6h[Ν=]]6n󓹺seIF1k'CĔ J# uI9%Xb0,_di4l MD>9آu~ lR jZ+ TSɭs9A Uk"CGL&a31k5(`,tB5Kԇ(d,5:@M}c;>ͪ"YU:A0KJ+(hѬ3A\ۺqV Ҙ7wr6^,R?5K,[! \5 N\16LX lQ,f]يMO::r[dDDA]M*mъ Xl,X؟/N_/=/g&(5C~0묎܏ kwX_߁vQF4?k" yʼW;Ʌ_br}cr9#+|z ݟl3eCPPA Y/kP:O*%cGTP%)%Q.2 `($," 1I쿷 D9E8BP(Sאr{v]I+#FBvBH&=K PPy}AE7J @w.*|҅ݯ-UՍjշ ݓ5c H!&f))h1vZ%߫j0R԰"Ȓw?w tz}K Xl"?|?SS8 uz5x_\lpYe&?C//'?͙FEF?""kt@E*mzOtRrzD@ @^H _n?3Hqq3!gE>CJSyF84~FAԹEUZ)1(v8rdã`Ttұ[7[| kծ)WJrxcyUi`dӓfsi-JɃ3Src݉\D(Sq]%2I:Y fWrhv֥1Nїw@ED~yϤo|RW^:qR.9 uwoYkMB%t+ Qݝ oʃSBlGΦi{5ISCyʅSPUuj_y:оghzd9,k }T3*oJ֎&Xey9nz=S+ m+Oy;KzS1"1&G*$sƎ`;[k68ʭl\&R#yt}xbݎ^@ ;Gt^Gi8n㝺orz]>&:3$ HR޻`[Ɔ@?7P&tR6 h(V O/1Y,B(PİtmaD{F: +T c+C4,ҭ%$4\ ~qVc)JI Z^%^F pyD#sNzhJ?AjpG&kbi_[,~_6EDWl2HUd̼D@  xtF % @ǹts&«? z_0!f| MH-!Hb!6ʏAzP0D"$$Jh%a-(Pw'IcUqW*B5}kQܜ8 14~ sK]tYD@5 !PÌU_J4*;_/TgFMmf؋uj^W0*4I8auCE޻{;W+ŽD] L .'qˮ7?~?Sx:oo?۰%ҋh&ɟe>[9Fi ځ-Dfq[!r[d$ݽ+v5mlBE5ۜ#QeU'W?W]آ J1|Y[3QjۉE=Hp Ry2VVV(EVQ &GL /CZыMUmm.B{RU$8Q5s_|:ٖ2l7wT-S F<^G#, *}y/%/fIÁ ր=+)ᕐ8/MČoz#I(?o% =Y|J}lafW|=;]{?]g> u@A.h}AF1/"*UVca_w|RUSh|cL(ƴ@R][[{_fv*9vB1DH(8QjDwM, #C=sJ(pQȥQR/s1t5\kUV-7ڮLB }Zb?ꪷѬFئjslqXG^`QH 8ׂU#ŒtBxe#G 8{ #2gcεh865CWN1Ku ;Ӫc$]NዙCG4B Qbm]+%'Fz)0 $xߵ=oŝG&KuxTbs! !ΆRFf l BOE=0źRh4G1K6KƄ̈́fP\8@\*t$kXBVPQ2sZsy\58 !5MgכG P2 a `|ט #Έ>B~C!~ $C考T lfhc7U!>{j ukA8'(-oVKaY>]#,>cQ`p@vi,W*BHrV-UTAv.k] ɪu> Ddpwk;]lF|9؀pT@K)FU /o3ltϝYɰfYCIE уEqzye 8~|kt6Ef\] (U8R nA|:{0Ciun/Nq1\y$O#0D JOA1BbTP15Hec85o5jJ&|I)5kqkL(ߗ%VGF$bHy WUΜ"첬018%**hEhkad䞬|5.!phԁE\Tp\{mHJaxMBz>gm>\PN Db@HR z|gѺu*X# Pg(5|B7Ik^q7_׉32v>{;Klwa{6P$ZeeV^2Ql`ݰ=E:8 t:'ĪGƶ-~]FC cI3$្>ML0Җ,aa~7?C\ Eu1+"!Jrv;oM=O{oku@"Qt!ѪJrE ! kFU_u\oڊmm\g%?ө@B*@@ ;[RCAV |*(V9[7^Nj/xyU2x6+ j׻ZVkSV- PB.U B~=VӕyG)uj-Qvi,гO"[ օ}3 Y &W!1;X_J^;U4vo. -i]kISidg(LuEKy\򂠍 9BYSEj%CUܝq#%M@PF*MNRe4ˁrE^1~*v0vr!sfT GJo.Z CuP'h|-Z_r@>x~{_ ;Sd%@S(5+`&ލi:P_lrmǽRʦ'}c.'mb u1dd=2qքvY7/YKK :v-dr 5K#NdH)$$Erq8[ݘ}9Eq>z=x|o#Թ5qI$\CA% $+ !B7_#G;<&Az>耪8\#"ioh 24(+.N:-){^mb׫T뮺p\*k[jmWVtZ:cVE2$TAbD}&*O1_O&J| ^j[ls>wL+ݱq]d|"t)2*Rz5S:.;aL/2+?;?"~x=6AqoI$rS#Uk= jzQ-ECZ}[ܯq;<+a.'Z"舿 B J@A PAqESyȷE@"r6}u/[9=(@/P( [gF<9kubQ H+:PĢ'E*0@ps ??3!k)ub kU Q7٫Xf@tp$ Ey0"OY!q dGKOt%(Yl" ¬Q TN2  (AR V޵VkLdPoԑcjUEZ 'n٢0% pPR V]k^J,[\ՃJ `(@PR8D8lC)<9q#`WS^ZVWO.hq-?U͈fWWl` vb^ÌMKc[r n$Z"_\mVQiUHniUua~ VٙjM+Ohg6q@Q"TB *~?!gwN-]S( @<2K:wFΚCV4 6) DUΜآa "Q^ @8 XBRCvKs#W y`|i)ê H;bFP[ݽL`C؂m=&CłG1DU6bӣpD֟]́r@xjf 10 @ooDk!{ lA kd`ڢPhi 3b|7`g-PU HVx)ڕ[Gťl^Y >" ۅ"I2FNퟜ\g71G%=ȵsr7DjoՏ9 m'jʓ1?P8 ^&8͛TVYR-Qb!([! q snBr~$8Hpb~.. :B. 5(UIBU@  "z?&'iu$b-/ p1BFIj |S_īkɌ# "Pw5k-E!jY^V)"N[SV(XK3(EoRF L[^ڧxJncO˭y"Ge+l3֪`*& ͬ/QA LP$BABD <D+ZxQUIhT@0@F61#LoG+-$ $tR)Q>34w~tPUU*#Tx(u}uҮ8Ԅ9x 5hTwwnZahN3cvŹiDOq$$TCU4EH @U?ߊ7wʹ]8b-.XU/@|HʢAK2>jB(4U-dXBIKƪH]uYƎ+'[EF&E!t"kC2%9],U` FdF4 JHD7["ȀeTD̫7rppTE$Q\lRHk de$IxMD&$rj ^ pMr1J!D0Ik J"3J@3ao $V;AEЮrZ4 /It\vy‰~xϛ9F,KЖY芢:܄`-d՜T[4tfdM{ͨiwkF%B*hGr9TͪSI7f Ϟ:dJ|It AV݉1MX*-1&1ɃQ5A9:>u9yUU9R'{bQl9ȠK*VSzHhfiauڪAR T頪UO/0EAyAEɶB&6LhDT6+XbF! BHlzq-(ٜE_V俀mgWlU%|ܽ!ITMH X"0uQ GE Pz`HH3M{>br\=Ma['t Tu" =o߁oW˽}ydzqzy&DkxdgFTդ)5FWЖ֧[Wm6J7NA>>r?5 $8 "Jox]0|}w=ݎe3QEMDJ*T@R:PF"'n &xj#J?,J/;sZ\_qnWI "I :D:MnE -[LI"ک{DLm)PŌC&MQi:ճb%QbnMa߶/>[`CTeڄ1{t@ |-((@TRRFM8ZԬ^ UYNpvѳ L,ӻ#)?Ic͊HUDU! ɪ@ C5 P!DQfW);1v!45gǚuVdwnoZ(rYجDZáHd*ᠨfku9m2ɫl}'e+ICh ~Ou6@>HbF JYFwiܭ7дt%d8RD Auhe_ΔgnEDC7}ua60`4*kvhuJ1iEOM@Wv:<ݍEuxh&BQK&P`( +xdE6KZ@qm,p21Pr'aꉳCή>5X[kWdAh_u^u 5tWwEh|il$E?^G(qI/B{+Fw5۫5G7XF |`fk3޸Zws}>'sV!20H (".V*T-@Jo?>U :.Bw?ssbw~۶:@A9 \QTSfs I bֿ,J H#"/čkiiM&$,hkTY_mU_(>QOaKO3jҪv ! ӣC*y;@@|g,}o)Pt1?p6M% M[-vn)iQ6]B@G;Xkernlab/data/promotergene.rda0000644000176000001440000000433312560430721016032 0ustar ripleyusersBZh91AY&SY6<6H  @/݀a@@U(ԚU?T4RPC@   @`4d4@4M&6SjzA j   @CNku  eѤv$Ǐy 7,gnccc*qh l: b&Frj䲸XL: ]d}x N%8qAK霼Fc2r6fxJR 0ɀ5\՘euʮ0Aٚ1oT&K zCIX&Ȇ ,uMbҖf 2C")rTKrWj ,έŷ9P;y8f ozѦ,T+VXKU=d4qIC&@\e{h`,2&p9%q CXpD$Ypoof#4Bbq8,w͉=򹣥R)0d4g[A[&2*ڙl yo 9\{!(61h:ֳ)t-7n׌|g.3QpT q4yf{8pꕫ[sSgN1)8nDM`ͣ3bo%றA:s"u*u#!ta,XYap m[=hiUӧHuޯdأUiΈ30g<Rg3@!CY 8Yb,gQBU#pja+bB'P5pɧ͈HxzrZF@Z2q%Ftځ:LÓ6Tcm*4&$Z'YlLbϖZ Aձi!\1 JVϗF;ņaL0d܄Xv\`x,>֎%c&XɄMpJ0<`xw2Ș;THс17ZPٴmDiD,d(!B&3'dz¬aCDÄ3NEi17blj>6%q6byckokI< 'i"O7yq7g=V21Ѧ-pMWfYKZ'5L=Pc_;|l~%N s3BaxHwUn+mF)2߿tdxnm6<=eJ,Z-h;Exf -dzb/fG, 韨3\nubb˦#^Ca9|pmMZty0rmulx򴭙7_Έ,&1THlx/bAw(ٛvηJWqUkʼqg>9WZ(G]_ xEl(x&!&=oȑchOh%6eH5u c~I2'dJ*Qs6EH*u?zrۘ8h}NJPԷ0 aq2(͓69|L7Mԙhv[)ٕTeKYRgw}%ޒ1K8Hܽ|Jn9;K!԰)e%{w @IYwR^Rq;n._SʾksA~1B҂-xLZkJ%cbjܕV"/)dZ(2Yth~~%$@| p.SJcgR ӿNه!JPG2K.84l}8iiW %b:Cۇ!<o3Fp!2zx5(Ec} c?ik7sJ]\Μ8')]GF"g5ӧ ;z&1L!rcpcupb6|"%wʩ_A,@S/?139\OF\ @>[| 4!ǭj ;E " G>BG>#Tyx|{4j0cgPcYO_r#J-94ډM*u^%CDd^Y/vCm!lhOuׯ*4U f8Oxbg]10j餌}}!Ÿ5kvM$iW*VPP"*SZ)g+T;&h=( }Z,1rw IKp."*T1}V*uktR_Su}EVByF\-M_,e)d9y: PT*#C q-# lEC#<^e0,%3ҥ`w Dhlc#ƩeɪT#qx]줝/}B5cƓFhVb:zOr㮨٥.vFHOѯa䥉v /CHqrnT&r0-Y2yRGSd3b!rmcGγ'ZQʫڼ-$,d [/AhbdKypd *P|T><~Ԇ67G 2I=-)ޡgc1!<Fnz#uzi%~9Udbwjo2uTXIB:i\L=ߏQ,Jlf塘yb>g^呕}qE`41ab&&fA@CfP&3Xg?C(N'oX?(g(?]yiAq"Hw p#mu3:҅(1S,^9F$WmaR$p! QQg?(opJdk !@gч1 ›7VF _8.SIӽټ]~z&6b( Z5`X(0f6^sGʄahKN\}< FAXƒFAz2NeǷK!næVjU2al- 9QA[M{jCp]ZoPyZ.KujgBIwgS~;>E$/lIH0sn =`KjYS(<ɳr \l"ah;q5[X{Ɨ!PGU`]RYTp2#i5u#c `ID֝Rlaoc3 Y0 Stϒ2R̃(`zedVAxeq$%Y= vlX.4`yQ~CZEUSo 7U Y(1T9[']sT܏4Y5A–R{|[]hbJHx#K0Qf+H!xv?)XfQ[Fm:ָqWM<)yYJɧ#]7&q˭@Aɀr$T-H_K}=@%D, ˡ+q| P(h' oiQH!rYcl =&W_fncZMrB6JUVdp?>50zɾ fӔZ8PwL|Nu0SPdԼ]ыq#a4їV󱡑yc>֪\O닛$~:[K.Tv^ }x\qzCGIDڔ^.1~]p~$sꖸqcHŊ:tFad()Loe>6p,=5{Aa`g3YaB&dN殅zOh9d)3* Od2CnaF߼OG֍}: JpɵF6/wJkZzձLa׶j 0gӐ1!u#.x~=w:&yX%( ;o=6ňW[><%H)-;&yr.DRQ. N2!eW]{oeK#e[lKrojGȩ?̭-n*NyVBTе*|mS2I_yW*/`^ĩDhsC%dq ',6ecad|ܩ1 tc8v!_?՘u'4۴7|j|2BF/6RV^kLݛ s-&(lIDgِ7Ni(XP8<9u  ^@m,X H®Ǔ}k"ڹzJ0OH Wh2 59ay iuGz,׍a ]2Ό&9FUj&r) uZ_!}'6*ω=zoc7Q\*q).KTZo! ?@`X&}V̥9Dt8rv(o+o)+SŊZ^F Dp6H:Д&$ՂT@$)gW#ePHj)^vK D@N] fr !KKl%.^O$r'4E_j.Mu&=\ \*mT \@˒]- E ,|9Nʁ!/U!Cٹsuzl#Uy%_ =7m <>,z2(c+Oڿzy+` a^P?R֋ol`)_wfB+>#Px ]37o4$R\%.yxά lѥЃ2/ ƑOy%RL =ԶPJg S826m9Yq^:=6$s/AW WUS`FEYja1̘h)Gv$>PG*̙&;FO!\˛wXpe@QW*(E 54ZhI.oStHtw!>Jg1GnZϓ4Dxi2_*#d=LOZ4n!qo>gRu:wDI39Q#aup[R}*p@P4ej߰2| 35~dP> 4. VUՅtN4Z@2wL0c!;{c? ?$`]Ķ_W\,~ `eu])a_6;3r\cŠj) d[EJ )+-,+OWdAS1 "J+iQ)PɞuIL,HEYʙ z PĚbFnXIZt%2е<=h,Sj|*7$ׅ~F:R /Vr=۷ίѱSbxcq#6ۖ%ᢗ`3. O_PV/8NAHR :bVw *3ex![NEw-dSY-Ћo31nOYc ez?IP_#ѻ˻16,؛d+_RX3Fɺc~xr/F)M&l7{MSH `OK"B*5б B-YA9w=2G}t;#6z|孝J(HUbpۦDKSnME/ f>qW\O$)Si0r"XĒ*MhS0:FArh E}& sVZrg0Xā6pM&ro%@`< )Las(>+*6aKx^\ AN;0-g =~xCWՅlօI$9jSQJa !+{1t ߡxlx>d%]2-]GV q8,*^rA)]5hZ@" +P4cD%N6p ɦ)!aꎈ9Xe*&?P]]`-A<.;;S>9v _ ܈ЁZ"~<*^V( Y@&" R^=YWzkg f*l=B*xu7)aiΓưIQL _xJ(I~R}$$K6OΓ}G e$I@_41^&GEVu0J.{E);?Ab$G?& HXagt }C(R r&4X"m #Pn,*%w*Mn|=1cI?NL #i-ު#RPoZ^R5&S,c@/j!}3H[gymJD-3veh{s EF.dFUG_˅"Iu۫#D4SDo-rWM?}Y`_̤0mID!XHs?M8XՇyٕFV[z/,~tAU찱nA;apx4m矅zPD'EhLlhGx]pzu~z1 .mÑ*z9oz ٔ!@wl2mPF!}߻_D4mvJtDs$-SmxJRM1%x߶0恽j%sՆ~4QYks 7zJXԲ]pNCIDzWF6L{AٜYBt>n G`x6@+Up' S0lLSG8scDx*j4`ѸV=\j\f DSIS`S'R!$K*,qJ@p~fI&d%e.V(-`jo^DkCh!PUA_ =_n6KzM;Hݵ~yfɔgxr#PEX S[n´?ZjXiش= 8j+q6ICJ[˙(U!5߇?z_ri?Y.wM.~;Dã״ fo{7L:b.Vv`#k\G͡r)&I{4HnW(oj=Ɇ=MȒ|4C6EϦ9$ҋs[" vؒq@qucȯ-Rm%bUG6Ƅ//ea̱〻Γp'DC_jrv4KX֍rWȸ썩Bm??#CЋsz҅kb.~JC>cYޔ"Ob4k^nOD2!=x-\_ F B]vo5VOo. bAFӭy::;XsEu#'`NA21\h%XήaeDR:!L*c_&=;gIOxp9$ Ҟ]"m9x.#tq-lr 79^xh{:e% zWۇ<Jβk;$'e4֬yKA[R2@NjIM̜:߉F01<#S܎m]0>G&s{PڈXvD!0e-*㒂.DnB2<$ ͣ,5C2V~cg!`M7ګrl+67l-/ՆGfvili8vQ%/NXЊmE/P]wr0|M{4?SbI%xLS Ty Ž>3OgA>"Nԃ\ƺJѮ#TAfWf huH3SS;Y’nA-eT?pN~?$B-4S^N־EnĞ'M/aaF=@)pp۹حmHo#Շ{! Z'`тaNnXT6 `^k4/ݵ:,/PgltfoI-q5{98U"Ww~ 4Uh?bg犪27b1v).DŽ)v.r>n`7C]y_DGoBd _Umg|@wxzv[+٬5.ѕ|v9ܧ+/rnu<9|Fd$kO2 |8q+ux}  a 'ۮAtq:pX:MHo'Wx|0k+͸5qTzZ䞌X?z#lY%XT]s+ ju"k!jv#ei#:P"/K:GU?,*?:Z,zc}AP:r4c*ǟm@">¬hzп^^F́ZBo4-|[?I0@)rk/;Iyuk(Lpݧ܊m(:ׯF3LTDR ˇbE\dpq[L1ʇM8F1-4eŖ~vZ})-c"N?LV$oE( QXm>*pW͍RgWɪ۱GM+cD!WR[S:i 39ӂ%|ZTc9 pöuy5>j g2-< 63ˢmn2zo l0gd= 1fcdY4UbbH9li'6/2uP*JBVC@T'm-C%H2jYg]ܐoD/KIo2A19O3ˡl{E@STUSetulQ_ݽ$L;_}b\V$R0Pyʏ W{!鋘CK&H fPNl; NYDP ~ >pG<? ߋd!k]mA;r5!w("X48_ib蠧/6l# pv:LDޓ"΋S*uRM.R\'`0QFwN?JTA78A Z#΅oօ0֋&1x2݁ƂG4`Шݽܘ&(œ,ҢJ㳉I'n1E Ғ!mNA)FvEa"Mw~B^Rx=SOSK:aTLlzx(˞(偬]!LZ ;]jy\TLDoYJ;E-h$WɓB=yUD=!= DJen%]Ov @ 쇯0үk3nA̷H-78c2ORU! wő7Cڌ>S7 ?K@1@;tؓ 6 6DEoꌿ >4|"*MEv\R.!繟tX0SD+UMaɎ)Vj TRYj|{Q$ھX5?h;/Kة(UJMj)pUBB8Q#8LTdKǮU= yKxLӛ9jF~[Y%֋GzjŻA%ڇ/I}ofE-b]4|%|z Pcɮp 52U6- LO0Aw olb _%uLaTE!5ַX~nܨ4 ? L(M7i2xN M=0uX䘢 'ߎIUAv/゘85ff0SoCԽihs4rm>+ŧS= , Odr:ٍJ1b;--Xzc͑u"fv(DwNyrMſXόQh $lJȾ| N=y$-EX>.,0u"?m W0\7гm~]GprWtUJr,84Ʋ'FQo䪍YQWӵiQu?7go2'`xv㣇oq"|#~$Le%7aF/2.9#6%^?H̘a.5'ئK{bi7v/(|6FT206x&hN*'Ęe=Aw<?sP!xiFv'qw e`[t^Xrټޮё`x?3gugTH,ԻB ` KuOW6Y \(MU8wc[x3lD̃ x0*6FldrjnQqw fz^Bu ]%D Vz;#guԪYB)Ρ~T69PIt RFy[ ~tbJ9] +~ 51ď5]:/2J󪿾b9/lI`3MS?D}zeƲzz_7 ީAUmhqW.[]UٜP9Fi+KMߜٚzlC վc0TXWgxEY %YE3ɚhʝčQ]l~fvX8ULbXW <^P!+ :BzVu8OJ vH-$ݦ 2Ig{Ԕd޽&`w;y痛V5rwtޞo^6YU 0-h(6=7A!U6ǭa%A;0\ ~_{?ʑ,.!-'"۲"ooyS`G5bZN$]pꎋGnjC*tpn.̅ȶA 8[-ˏ %>y^Beb&^tdr2ЫL1.q7C p0aVfNFò#t~y򄰽{"=~5 !/ 1tdH0cIXogSXvp)s"iFeAr$zxTBLh>@ϔ4/}=%ưӲwz qNe .3'']G*Q6j9Ƭޝzgukyps.epVMXf,\A{o4Z#x0{ʥj8Rb&M}2paE[4ܺngbXIȃd]F80M[?sUSUܶ.Uu&<,c'ɃPP/h sְnF*ҫ׈+j7L8\v$ Itqbx@/͇$ap=cTW(I jOuͽMu`j *ݫ7`q|Xn`>Y;5nBALy$46x.[6rm1QGxvCMwgǬ³uxsW$` @W'L||Ah/NrHp"GsEUq~̟F;g!HϣZXnȵi=[ 'uhϥW昳XumҬ>ˇfTm=t7!KYw`J5>WpWkKtUw^r:@ZmͩD.Ji`G!$9r)Ĕ9`3n{o__[Puh_3]^7mC8`pqR[0CP@Ìܧpޮ/id hOpq ى#}-rM體0W*+g*T;!-ucL;Q;25RY+A@I}ԋCܜc ϫ=@g`8!+$H:-ӊP_n*)!gqnpX?_s{r*ΊQ$tlj-+:{(XH ]㉹\h@]uz|Q;pCLn7 wf%pnYdaA 4!(^p39!R; orS@< Wopv',&Ab@ST!9om%ϤWև]RH6LYCń_EF.BiqG6)6S~t:ixy5Tl ޖ;U\,@4]#8s|UKX>X6+:|Ip=N3Nr, #i؉R/-aAdv9(9m(ac' XDn1W&4S7~ Uu!ڮTBYxwMj1ch B[SELѺҀf>. 3/PT%3]Ji^RC)(tjSr ?|ۛBuB@^9vߥ²o?_GH)]?[~Ij"wT1g?3)&rD*t? Lg!m?};wol?Br,B%zcV/a&G{Y<]yo!QsۂѾMqF{} X 4>3T-tk^[?<R3I(XzT* 8B#H>XP?n Uuxr7= g&ͣ )A/I& 廮ݵʎH619aASL`_{){vXJ=,f*#ԛ^'úq+sU2Q @\r{j7%J4y+{Dp;*an\-(\/uj3/GKb۩nVGVlSc$%(7.pOH)C:M4%6m^L6Y#ΒGSBP:Ix?L&D· BGҚ3b4!0 ě+/K-BZ:k1WAfsDK/O?s"V"[|*b]uŘ6ba..Rk :*JWg9sMAIϋT 3P}!sQyOk!L R=L?̬a񜼙\76́^-jJkF? LmЮ6Fzqt( 3;GSO!k=D*d _bv! {|i[A%K6IC^ykrNr<3TɶY |&_KQjIm oghq\,{2иjk5#&SjqT>㢌iQpx# F 3(o"%ۊK4Ct;ʽhBOt3X#QÜg:78 Ո? C4Pz$jATyBenRU`~Ē=xf؂„_seO.B-`z=RU3]'/;et$-48gT5Nl;F= uοncz>}BHݟ"uԮ5.Hn\1@} ͕uܗ!^fxe]?<蟹l;D,fM<"ga[yT|F]jm,)`FHkO4l;Go \y`~٨N 8R_zp3hBW@vcjJuEc,$( D Zua#! esKUZ!/rOa{7b-8AOLf/$9!Żqk PYNR{:PjN"bGe_ `T/^琤k?R1yc꜠5qYPOyr9Qd24v`W}Q!E@:ifh<$3]T'6FQ)R. >ch{tg:"a:u璣X8 hVS1[Co#n{vVE~l|绯:{_34maO#WKBleG# g0WFx#R$ יRR|_N!;^MdS!ɃrTIH\4uC1ҡdxnD͚N`qe|jvFPزf"g8^/XM(PB@ᅚ:K0a;'<2q5d_Osk4΁t{9 䁃`+fE$lWE^p~ΝUO -^k,.<E15{v}k&~&=uˋ?"B|ju[' mA6|!H?Nㇲ>b9pyl, lM `%+;4>ʀSǧ.u7j1-T+;5T8ޞ9=8Xoң~NIy-nazO7@5Tr+!N.p֍#g9TG8jg@{+8:gOW\BMܠ~\zҤ e D=Q">O~ug\N'sWe)Lh (`ĩN OibыUd'遬AbDh dZErR.ȲκApȆ~]L?CID=G BTQU)Մf\MtMvlmV*~DIM{¥0PN=^"DD!KX}oïB-D3nffx7Q>y y5SLkH,-=oQc3K{EJoH[ݝXX* }]vO[[럍>C z|v1I$Ȋg}O X wؐ8a=>X3殄XUs_l E'Kk=ja.P,F& kQ!UahVH-1"O (@5>/ĔS=djF|!ؚbzuƥW|{Ip. Ҁm4-ef A=͍/iTF4l#W sQqb-&GYqb845d撋Xm;D?KL+ T>!,n<1O$ìR@p7 _'t ukk0ʕc?,?l'ČEtI8j6N+?YIA>Hx>ؾl9̹+ƗOa[َK7q^ Cs-^͂oHİս_Ƶ4\XL7ZxKHbpY/U}w"˼wOi{f~BۊޱA,#-XiAݴm3Ψć?JuL`-]q[XOgT[vHICA|d9NW@mH?SD.\ˤ$d1">2`2 iZGW@:hr7X"@ǔjDc\N,vQ'6bg&Ud5 4LAaI)C : 0'쭆9B}24KdӉ=ua8ݼ +lJEQ`YT#%zş:YOyJͭ `{v-([(ǔi~$d}o4Nk3 ʕrQˮgWc@=3Ze8g#>[8A$ 7YhS"cPXf4z_EA/K1(ʨM҄lI:Hbt4F0'L)9#h ͐1R)Vr֮X%p?t"[^m[W't!{J 8:o*5b2M\wUF}1ݷTE4Or'v:p&]9~HT``;v^o畫FN(۱~zS=naQ7}!|n*d-m8Z~Z6楏;P#?\d{{ ORėu}Æ5(#GU lFU]YOq@VQ|p~< 9Ыa% x+D0x"٤;<,`D̎i[N<-)+rp.&>ӊgiń}Eh{]o6dٰXsUٛ8#'L$rA%ofdOo7c ]J,Ψ_YJgtrmg|TeTcSW ztOMtc:oR^_@}[3`_@tX}vDyBIӼz1]Jcc\2#_h&DӼ3b@*\(g3}fg^Uu`kM<b5,]xPDa7?i@ h  +prKB7hTsufu6pwuL*j##/8_,P@/Z9FifnZc `6yjx\6/ w%̩ӗwczCWG#@|mgwZRUp^HqQֵ1hI ](!78qMeg<}.උXU)$qLRK-ݬߍR`r,{~a,Td|s]e&41Lz|59%!RpX,S$8rp0WK2qZ'8uyXЅALH^50ʢE;'%=ىBr1惉ARܚ:7v-FMRtBgԸ.0k$R2j聮 n1c(ڢ2ֈhl=l$*z.>7c{C-tb,&~hG M̰xئheBV! =P9SBӢt[1g.fU78(twY]hDNyj2JhF-a%zo8l(?TW LWBC]潭%XufKT=F ،ʨ̓Ūyr \; 1ֱ )RK뇔 QvuDsV8mم/*U`ׁ i6MlTH{_FbO}Mq\^4u4<G}X- ŸEj3,V۷Hw3+&%ɹk+*L9C^FVt2fm *.ԗP@ѷ_ҥ2PF$88jM@~)m?GF~;t8ȍud/66&[dvZxgt ;P. ng^HɿRoX!Ǽ"YSCJӽ eLOp`!)3f>S5&n_V,uݫ}fQkZ\ Cm+Tm1vJsڻ=V%i?G=q f\̨ލai$hMDdLY3).Jm7Ŝˀg +oZWRҩDJo4+8^Sg^x4Uiv&nh˜[:jnR1W|s )2rK7|k!k䓇Kf?<ꕡ_ f ˩=B+CjgSu֪EP\ܲq>c6.4[8@?WEZxQ1  b Z!BQX-6Ro R,f{Y9s;2{q"Ed"C]>тD-utډ-j~&PYU(J֛c;U-xEtY4{kWF"c>*BU3e7|\H_kTl~,YY,BH8_5lOx&'zh(FJkiAeymq9((ƑH>8bx&\G:6oh'3 if= !ܶ}!蒢"3jqVDlo4U[^dT{Lvt,d3º6 ʘbqJM_,P/KT"JGbp7GrLQMb;'@.E'ŎDžDn:f~w'Ye~MP.ckth9-=2.B(kLF zË6kբy+/VxGIM%MAb>]?(J;y/N7,>ij;r8*+Ks(g& ̳&y9Rc6hu9ws%&)57"M @V#eyM0 yLY~+|"0|!FJ9?iA 0ThQGyܳ!H%@Ɏ.VfDZ'MөXA ~>JqJn#dhjh/Pr/+TzF1h=8tF2S_q$497J>76t"ep?w1wo6LTBN>zXp)oo^&7=ryKiPw#Wy y·,W!Է*$/SV?}n"R; BmD*@б>86j|7ZFs:;Z 5. jzw%P5=r*0␢BkN R՛ktZ?Dk#B 鞡}uf$3i3Zc@u, dCF WkL%CA=3^J0D%A9tyآg??CO>vXV+m;fwGwl6m˥ƹcK̰@6bBw^N_\l>b&tp Ta K{ikn+}b9K1 KR5 5ц)7%7Ғf0.BV~4%Ӥ˕.PNCAWKGuE[p+#}z0bQ2:RtjW$ :#MsWI)= >_ׂmJGǽV&idt~]4IM &>r+:u)Ϯֵ{ j}uMLwCKѢ_wӼ:Pa%šjԁ/=r{v[| q5ƒpΑH|_3Ė%]AO6HoJة}gƿ1rAfs5}'-?Cm:2~;7Ad>hrߒJQ-5 7jN=)>)IG?;nӤV"UMTt4kOރFag*vTϚZXOD"B@дe[)Lad Z&s*SS}زcEab4$%ѱq*T6/ ͎,9\>4|@t=|t#d_gͽ]4z!By@j S/~$Q6ѳxue^]8JSy} G! 1hOOE]֩(̠n/~m}ɱrJ>3$H7 m]6uؕXyT6E:zttdpy1b+6#/(@ױpIM= ޒIPd+4OJY$M4،--3߯@Kև2/.9u}~#p@HM]SǨ?B)z}LƲcIߤDQnhR TPy+Hϫ OW0Mᙉw;MTE}\*<:?j4:|!O'ʋc_ a49/& gU 8tׂ}}EeuS"W&^osV6deLi GwU0Mb;H K=3_D~u< 8mߛ68;|GYii-!tg, Mit҄$t51ddw u8k$MH?kWÁ``6e%zfN5ճ#5'҈3~cɀg9 f͏2_t)4R 󇿥-ӹ1+cUfM9Vw7gޠ; FV.]U?I( gԫRyH;;^0!С894 >cZ_f%,Y<(kk1h2;l/;>}|0s ,.%_VVdNn)CѲx[EtI*Fn#jo98ɒPVm*M`}@։o12G {Z+y էwhq=*oڭ[wN/v pƙ{]-t;+h]-u*fSI`A-u$cNUEA-J ONHbAmȀ{Rȝ)HgR1zeΝZ85@t{* P?==,I GMGܛTr 3ep,w+eUk3&FM+,sO IbHCQٶ9Hs+ "x#@Ȓ!nyRM`*qd`M`qp}(/p4iS@2)aOSxa'iaTFͱ^Z3BVGh){)3Ez¸ԴȖ*&U?Rq/`ՐuQU ~V-B[w= O..sb|S!XļCB/C,T4JDQawuUvqЗ)!L{sGxiop}EV=pa XlVF!X_f4z ɞPl3jvjhWt=pR« yN>Yh0qE AT.U2xbdT@Y9 <B q{x-Y#;x'qH^Cb#HL?gW>1;e~\N~jnFe0!?>Q:<>@{ j&tܸ1)R^QkUgapTt(6M~ۓT%=CP8϶%8J#vLLN$6Erɵr+o$t=xdojsE6zABձL7]BY 0\$1\M}ݠpʊ[N+:X*s(O&gu1Wn81B`ik)! kBx6>+k 9p!ýg1-%iN l.e9{\ ӻNޖ0*I '(+=Z~LGm 5\XzR鸬/,='qF`+&f֢AD;TOӠWxFUW ȻzYo  U~Q؍Gy\qfERaj9TTJUFC2M9XLoӰ">`  K眫F8ǓFSJ$9&#H|)%>^֤Vl *!bY`WdO43;|S.d(0CuY`uB9\lz(X K6.uwɀS ,ͳZky\>?(:ɂyDSIX7[lhpBAI+L؃+G_Fa?ۉ,#c^k>aYIZ3ӗ!9 ´q\3B:'yf=v5odXId!Z 2Dzkv]8['V4rL;ȃOoY%1܎NtVXaPH+7 XZnI i)FEwNX.1{HȜt#D>SF y5f?'v)QE0w3b[>!k9_i߻@K# S>|Y-xq`оUDEiϏ$8]!i\wS2 q›7B]es!WAkHmS(8t*~Ϻ|8fS\ў%izN`AMq߳Nw rBfLmXG/uon1ֻ~ 4m-V>^ L.hN# TYE88('0/ka SAx )3WSS]|h+ZLJ(W*` @g8ӭ:4]֊_hgpi/ȥŭvPV>)50ӧ5:+r^_X!ҳb5՛aIDz9[ o a))Й% lmjPvW]f6s7[|c-%2bu;{!J$h$\ADUZR$X+@x™.Z;r^_~/N!iG/9=`ᐍ](OTMD-sOaŃUHPl}\b~7,zǾK"%Tmx+JJg䋷ikXaɥsB5%6n#rdI%)@mkB?T`MfCޑ8 }wE`ot4%!*%? vuo ,vaMaXͿ2d'79ۃ z]ߺ_Ǝڿ^neE6*m+qDm{n 4Q@7Ir{=@cQTe}ReE6rPYkvS5(Xm>q$Bf7/AՀ_h<-t罞ݴXjj!hҘLކqb |m6:||q <Zfj)9cS-.b)u! L[;g[rҟ=T (`DRMÑ~-GcD[{}cָ#j\Őx\JLpUxWOmx8CjJ6d`魯:2\?r?z!hYKX"S˨DȢ: N`K2 l2_DZ /ew[6-=4 YXbl.&'H4+rݼ6 _Ώe ١~$u0v}wvj% #95;%ijFbP%El-ߗtѪ^Biܿ=.NZ98 2eM{K r@e%T_Q m1Nȃj$hbOd$Mt|M $7)E۬j$R"RVH[舻(SqL\\J!R(f7i~ R{o"cXVlR. ,2g:3WP2fB )!Z Q]/!2P,ۃȴ:ܢ䢉Yԇl,~~&\<[> >H۾&oHg7fGaL[ !~Iz'r:64QThXfiHԑQh z2JjOio"5WQk.?TB;=ba_+9xQt;JjkZg.>/zg3xg/nA3$IOVKc5G^# la@MeXMS|A gբ]VOd2o,cXIG54OX R BYNezo!J RPm7"ڏ30gS6k뉛d g1ǓizioX=#s[ N8}ڸ[n:5.Yer\e?E/w+8L|Q-eQ ߖQS8X%۷%q΋>&vyK7 )YYb+@:QLz1f~u 0db"bRn9*](Gf}mcm@alh7xzSumཿ&̄g}'QeIlӃQ#*Uo9#/t$0.S| /t8mJo}JΔ꾰D) Yx,1g)˼rg[} [%lǤ؉Lܯq>’cw%B#Kda p=U4v? H`#jO1ujIet,%O +)ҾL#iYלJN^-fG=p͵ol_vq _`\.g=R'u"K@$R9ܝℕ;6H'DtZ@U~7wWTq fz}Ua詼R?U{= DL2}  uo<F%p2죛s%RlG?ۡej! s̗) uo8gbe~$ƠGX ^AZYˤ }:nV(<̉.K(C9Or8Nwq,TZQ 1Cr=}d&5ye)8SeOkTs♊^WZˎs+9+ lf&8-RD0DnݔH/5@LRJ'Fz3-Ԭ#bS//q(.$*WEhkd@`/2iMٶ O۪ND{7]tOw_BXQd¢ȎKyJMj_ӡRjg ?③ .YYeknO.5eh 'bEY˔Jisj^ؾM׽Y+:h{C}"w롪7ᨎ#23Z60=N KM),$:>ޟP;WYH ǩrJ RKB"z뗮BGcUk~?֒őj` MHդ$cW3u}韡:"=&{c\9GgR_y 1QB?r;S2^Qg@MYݐm6}z5ڡK1g*_^jae]K?B]OVx߿SRuF{P?]Lk/g%VTp$?s?둗D=BinPx!G۩8 OW>HSr'BQW>]Y1\EFIjP(1㇝MTk,_m"tWl끓o[e1^euWy UB('U1 Yz\:P" dz+&,ݏ.jޜ6"岣VV,O-ٵ^;n6{e߿yƾ9ye0,~D}9}6VڭF=#k ]bzo|N½bC) N#]\i\tgzaxU"anB\]t]" 7IWFDc Llt74r$u؈S9+Kn!+lmP#@$dt"'U>,hwkJ=5Xb)>s׊DFb"^ u'k*s[/׃cpWXOepa,jq豒Pr |=⎔Mw[_6N[6S(wq%vC/LE fzdt J %rtec?4?GFAmc3 *f7S}эyR&/yyޡBܘ#ӰYڷWpb"TaдG m0@چ|>׀ T.xM' N$/ΓEW9^uub‰a) W>X;LXկ9 v>zlq7H<,Ŧ'1,˃s򑲇Ȁ'"-6裇f_ ŲjSIdl< %8T FVqvRwx0 ;H}1sZV]7WZ['ⲤQ,rXOw?mIgAQgF§b7Q;b&OA扗F\82@+; <-*V 'yϰƧ_wFG#jKo$x8ZkڤUq:'پ0%kLY{}Tk[vmvۆ{<6B2MQòM1u]m'yEz+@(\ :ܤ×*f gC0qU*u]<{0{\-AV: &Y q~^|ЫJmKzv,*n@FCl N.0GU < Js<I@ b4y78Ћ,qmdB'֒؞0kuAݻ/Ul6>YZ;\šE*n7?c:<гHO\]DLV\qa#Z+Edx@"R BWr OLg׶w,̋5eQ\xa.FJ7f*3s'"eU-~%CO?(ࡸҿĔB&U1eeSJcLkWawaI$~~Aϙk̦XdV sP]#UwFqOG`\?XbG*ig[֑JZ* pSwӅqF) =WHӝ>$Pt$ ʹρŐ#qBɜ8٘Z0ۆҎNx3J@|RNRaY4t\ ҙ*K6y/ ]9,y !ZCAw| ݸ5zjd]IAYL7c.GO4FcnނFexTA42+ƳE}ͥUmLZGf}=h:tCԓiFa0}pP<6?u+hix2ocxT4>{>෦ 9hlGeA*吡TKQh$yLp:J:J([R\Wך[4_Z`NK'au1Y{2ݓaKkU^ _2s˞tT}|>2eI4[{zAϽ@1,gd՛Ԑ'$$Qa DsphEcZưAX^Qqոwd4y캾Ŀs.|'7ūƤYw& *1jp .641?^qκvL@ֿeoL3L}cԝ\O)NO72 2jmXCgTہܩܝ,q)λkj0h&H+f["v>RSueZbCHh' $|OA( 'vAD!e)Fޮ?B ~l" Y?S7㦸&2Ky0 6Vj';YNyL4н׆ꜝzVn6C^3%BՇOV.*=?-W'g`p^Mm4[@!CH€_9s ؜}M8my7!RmADw 06ɰk*ʗs@\T۸Q$נuizByʝc$tMO]" /h?g,X[O=) A:cT\7u :>h}"KO}{͸(Fucd=$NmGVQi}V[4)r: aΡ`cm;\DȽ*lFG6|]y 7 86!oOFu l*x $l*T,B %S A1%[FxO9%[-F d|{҂J^ ^|s >)sGegԡy׌\<~bG![xn 9|./ ,ٕ^LN:d:#í6ޠwYʻ@%lgg06FaromfQݏH79Dig2m5MMsn#q$J-cWX@ekqGYfI6-V75^=b=TZBF}JޓZLQ{%35?λ}G^3~8ўnXik Miib :})rM| I.LDۇbEX|_~WFeoW$9ƫFccp pCJgaH:vfk"WyT du(;ze*V"mI jJt/Կ37/N4IjoMYP G-%4Ǖ}?`oC]'*?Xu t R"U(Wr{E)UB_t9%BkIJ!Z_жm]Ք γޔMl'#AiyQ4]tH4Ļ\tR$MP2Kx[3S[zYHsxiEy; =?^hՎl +Npvyy i{ i;^NPE x.̉dQ1`ة9Vov'{wO0i"-pGЏQ"W>`9Lv(G{S$T6ub v̬R 6)S:3_vC<6E"u OXF +PFӗFYT4?BjEdzf"zBړo@JC%w 0|7LX2̜H[ )Fq EQHӊ3lu> PloHi0F K Ϙ.[hDP5N+2WK?.Z*љDw!c+eAitgة|7wE}gH[飝︱@SO^*}Ò+^rbhF~G1y'Klz$uFsP@FNJ}^4{)gd?"1,V 6|رaS,,H>l7h"%U }B>8uG9irW6AJ\-'t'.-OxbL頹 sճY)E–.sP?%% vZ`{yYbQLmQo@ >t%x|-]A%Z\Ź%eNrp=[\fN!ãҶ[BZge2I$,, "Ħ{؂7܇r F0nqõ$hecAa.L{cdẀ(u^ Ӻ݆B [؊:5Ľ]2 w 1R鶿}xKg)0j ;6gvz~`MC*&3=!6ŽvP[ %%q՜k~u1M !iQm/AT Q{=#=@SG.ݳe-g^,Qr]f.ہAXR]wH/箋⠟;LޒHHU߿ς@uueTC1P^tJd 3HNͣ.z]t)QYMSFzI2mZ"e6Nn% ̨"4>k4!=51:4W+3v3+D7e]/FOeNӠgwC3oJGӵCDJQh:7R+mE R*wiJK>p?~b6ih 5Vi(nEwP֦ߞְ"0}An\)9^u`׻>&8/faLV\e nu3L Ow, q: RD N.3#_:`5Bsa?DV=^`Cgj+vlkY“g j}y1W:Y(z Yiijও̭-p9vQ/4xmP(l8]gYd7TҴ]5\yN5j"NE]^nFW?ÉOP]Ydc=obƈF3*w!`xҩGDPk.(!E2x$[!NVKb|wy;%b5T ym jDfbcooOa±y׋GvT֛kJxKhTt7+}GoӑNfn$saJM{eҟY¥DׄόM89Ѥ;#O8:I0ַpzu2w 6ѢNC^k,KkS l^&5c^rK2Lb}4u]ܾ)c8Ï̖=R 6k5s heVb-uvz>:h}Ӹ{eCa݇m&9Çexs! %1vbBq،LQf8g}pV"!(^>VlCݿ4.E7sǓhDF#tZ+ zF#m覈-YG(PWlvZzz+jCX`je~F-kDN>ra[(ݓG "µl`z*A@QRXQ3 $Q_2bP+PֲށU6#C@Oƻ՘,&17uMd֯ԯ8P ;*;(f;}Aӷ Ҋ`\!k)]Pɿ5gH%O+Rʠq0gp3d2L1K ?}Mqώ:WN) [yc( ADNh{0#W/ Ͻ>젍euẉ(*Bx3\O}:HZRl0&gι,VN;{ʨIaf\yk$yvm_q8;b"2,W/?YTMCA@u9` O= rO/2FEYL6Q"&MO- EDFK!ʫ1q{U)7 7PTH0nOO.-`<@"зʛKh) Js2tRi JiwF"C:>z 7>p 4;9jdϯn+SLu1T X&6XH. ǿ8 /\NʘmvGf1sP&.*]\6tq'uz]~&Ƣ^7كXfpyI!5t]>PemcbwpL=]h<2::Bͫzբ/%  d]dϫo\?/VT| QBZN*&۩LFi "!=iE`J0P2`,v zO!œj1P'֯w*0_͏ 90F4kAv ^Rt]hzb@{=Og^ۧ @lx|Tvl䇗^*7 ~$r"tE\̒N;rlZ *jjݖ{*_L(0 v>^ ٨S}ǵt<+ c9}zJ3W^gzK(8j!#rGK(.u]+ kkV)$*xJ4 G}ԗGyV(`@|sYR:7P7IIJ? ;|SCزܢu S딧j%&33'oؖ)| 𒀰|UWx~[o7ty96l*–8i 1f]{k4m;ˤxטi,E]#+U)@,󚇌-=D7*GuZzBNOMIA|_e=^|ː()4Qx.ĶA EHC*W`L/aaL`b?N+9- N-ub9s;dfQt}(6-tpΥr\_r}=V:K{ mM+q^^)M/ /f8D/y_An0b8ߤt1KwlBU-Ѓ*"\G4*oM u5 |Ze>W`ϨQ t%U[D1dP?t-k`,PV/]ӾM#Gwq60dրV֘vdшm"ImEaCH1 /z)!|>TYq'ً72I9:2Y˪,\GQ|uL7I@BN,d#[fTG ?Z&@"8S4OlXW7ZBl'3]p0]x X_)k&Ӽ^J>,ϡ931IϋTȥ@|] bk=g&j/Ў?XUU]1(oM/㶑g.P)n#%Ő֬G-E"Ž\AA-v}(Qy$O|:IC:eauo} ֻ Y Ꮐ`@A) h ϋm +v`,mouAJx3q@8~uhSo)YNf9DR/Ɨ&N򁬳;b>j#Ê 2rEHMei2){qyɥ `Wdޱܓ,Lwn?"xn'hQH9qm望BP~+VڭVp'>[;NR*`Ӂ}cADǩ-8u58/? 3H&5`m"DZdFI*벃MK?Q $>^uTfLsY2ZqHC<4q&q\yDtg~ߘ_yxn\eGthD0@IŰ:ܑ hFt'3wȁM, WJ `@/=s'>|8$dHow#řCtft?ki&iLm16$@>e_ A '@`nhjZ2e-(RT?Xs!쐦B7F:=&cȏYȁ'~9-'p+ s[YwL~Q,3 /q5Y+"lX%I'A,J쒀W$a̝QiҙK(31T=rrbK,jjpt*'wdXTQso!i=;Cek<&O1iFN- g;Yks&[ m!sn-ߴ T&Xg.$be"_UXw| w^*X`P]fQEaAܨi64IJ >4uD5(?gĆk;Qȳ<,;5On :"V[p:/{0UDջ`,';È(M୒e2gCXQfxk kZ ,^[ F$ٔڮP lffӊeeiZhEh/~!s2{B_.N~6VIJ )Ӿ+1_\_t}h=k|^In*I4i;锈~mTqrt_Fu3ds^`9Ă ltD$qOi1u0]֙lh3,EQ5~? $:˕y8qԵm[΁ "o?kRTC NQ)jFi;q6? ,=2D54\^5qc޲@>UM1ȋѵ F^E.P:vèl< 2ŕp!,m+:X[vz ;E]b f€E3X%7X!zRm/{DGL%>կ_p'GgC՗[ˑw^d*A]Gw|l77)!9LdZ\DzF+oVt0)2DZG鍊 =` T+!&&gQF q .Į#,JArhHEeM5)vGR qzW-5o'GnYDᅙ~Es%R2tGy ]dq}tWa̯XÜOٿ\[, *ʸ>01xgg`al*ޱ%Pt''dSCe>k;Ԑ8C yp {gЯ9 ƸmWA6uMV;m-)d;Cm 7׉kA=効mX} FYT5)=xÚVۈ{Lߟr31<+ |vu;×}B1%Vf TD yF{ #δHL^cef7O:v3g&lw<RVtI*Sˀ0[JcVS?݋ԋC l3ntF/]W Nxp }y~+{r;MG `Ws8;/=ufxiZ".$zA[ﲹ+DXA]*h*<+lrʰ"u}W[spzM7<`Rtp j3 [›`E%eS4@dj|w9F yʆ7@8R!2}PQ|7Vn 'bZS}@ Q9 秦ٯgy cMڲNU)<@ bB\x3yVϡL%D:ǧ?=kq *¹6GߏGzCό"\gLTѺNI MҘ8X@ }M;3W)'~SPUNaR cm;5.޵.2YF;r:LkAb>v/Z Y>G9`hy)@ܗhqI_{$^E"6V)aw7`"1=Oj\Z$$mlziя؏ȃCE2jW2wmjq;ߕ4IMd* q؎|2<Z%v]3y2Xd1PjhѤV-K`f0߭nR%W`h$ <[ =Н(Rjd2m5M+H[XrD͝~ icO~{s蔄g"tqφ'ZvDV#1JWw jl҉N@~o4qvMLh4;FNa˒?J0~DBObleZ.=[ LV %Ե#oLM7,bY|nJP$?l)H-Û\/fnic#xZ쵊GiwfZwcYߑÙ^=IF jNeh&Tc=]KkVr W$Rh# Dkǥ,l~gEw_De'#WJ{h` C / eQ$ [x)e0cx#kF즿&^nyR_~E6 ` a$ b5*5TrbTv-{#=2r>퇳X T=<}g'=[,D*"̙C_«tiH3\D5& bT6g>T9aTu 4:I|?#9kozxӗk m5Yŕsp)E-LzJ^i>HF yO`+Aܱ,*'$GEϧ5DF=z%.W~qR8>#o6뿧غlN9 iͫcMH#zv| %tb;AjlU0ZIڈU`A.VwdYڸ]d U[^Np_u -EbZq_jp֪;J,lz[DŽ"N# \EIak縿fT pgVժ\dlRky?YM:XR&`]mM3S& )],x>gn I7¨#(Oы 2,8~_x!ɃtӾ;~@3-'F1 D7 \$!*ֿZAp<Uݫ1c4 DE,deq#ob%H`/HCAV!ҳw=R蟤M05&!(4,۶V0 ]PMM''Va&iprW2>5{Eo4>Dj|!l*c{p׏m[ѫxyrᗣ`9\PepJ%qͼ(a_+ r_C.ZDh v%KP∜ .tfHT?'}F"`1 $!lG0uғyߟ2p!tJ.DʟM?/﬍+L[勀UKW;G>.SqAgX)%d꣘[  IP ^˛`aR:jP}Ho<s SVbeoo@P%s3o)Enr{Paόn$>2S^"Ҝwx/!6]f7ِ G*J}!ֶ1FBz+pxCe9J)p^p,y8A"%]4E%i aׁF{[u18ˑ̅1Įoi^XbG߰UI(Nߙ!Ud7b`q5oU=Yyw KsNasdsY8sE#ui`J ` 4]23wthfK+k'?Z L:BOJRR´.\C{Cy1Rum<6ޫKbķ {Ac "zUSS%=?/e,i9g(2!c /:gZwX-yL^d-ǿĩN*xᣭ̀k^q1g'p"=[q?k)h:@{}uQfgEu\|Jz^&DFuwvcxHV=!Q% i5}|[tW`g(⡇(<KJ:ii_u> iq"Grd'7T\K'CE<?o'~x׿ [漨]-eY:N:4LN/[`3K}歈Q}7Ľ`-c|:H+^bR i5."q_sI *QFttvaBOJ;/H׉c)yPcZy{byIlkFZx,겘U8T`ʛ9gX(LU,1 |gIEo䟨^3z.AM% Š޾yh',ړCa*z晽e8S0\{:ջ<>u?Qlg~v|24hE?} gɡ` =6:'MFq\eĄ cWS,Om#dƋD3&"#(Iu2"tGz̫tsoςiDKԧX*'塍i =-;!?o5pʂ9f\-&.n€-{MHꚅw򊥥BY:N`3c/pHc>^A";9@)H+ RJ$O\r* r_nL"r``ۓ\Z#)?sm!si'6 h"v Z)Ǝh_ZZ\|?3yWZuS\?3 B0MZ0XJ%ȇ8ߏ-㎘r+U %xOTP $nڅV o.S*Mq6 '>Ol*75%h|TyN fbWH%jU>eh{ WHPWlGCļjd,yR_ʔƣsG Ip"op_yHe-eD2A0K:[蕐%/B#l5^Z6ы!/K8%d^E?@z^! jp$փwOۉH¹ +ҎꝓВ ㅋ5' 檔z'[[g)t9.80,3]n D9&vJ%s;_">;I"8FgĪ`7aaCcLt(v*wCSb:?Сhtu%lids$$v&ҍBG;lmb;!f`$_i7ڿͱ7{1̐ꒉ9fVS';I./,| Y' 0p:C, <ڰwp]\iB8#.M%;2Be"˾T <:4qb"E=2Ua@:IE$Ylމ%$uz! &G/2T[|ٿH*8h#ZӨa+(y8}5"KbauiRq?6ʃ\gst$Xp+2*B TH`fX$d"(UođWkmj).!F>,V=Am&hJ,H n""ڗto`pjO%x~Ns;T-|ͨrTyzJTH0Y2{49ltrVk&.y<~Q$sS -qyS0Y,o.c.!_`*^^cN-Y*{xUA DUK=]Y(MKO\޼uy7|eУ0@OMz>a_\zgH{Ƃy!;9O0A݆vWjOo=$԰xjU ψ1b>euTwQ{PE)ft)c|k[MEx/Yb]:7/ DV=>H+E)6 !dz.hmeH|Gu6 $觜2۵;X&Ш'ģu aE/ Ýǚ7m 3x2(fFO LM!o&wXW+;KO4FQhkf" ꇨBإnX_aC + Ra{/G`7 T'ksfLKr+NOFv}ȼ:uCą @F{פcA_J/{o)b@~y-GU3NJS&nCAJS 7Ib*0KHJle҇X~( n#i3; '/=aF~a1דō ӰǗ9s5D瑍zS)m5[rWW$*d(+A'*}ՠR!ƾVxK"2zG?v c}]qw\5N"Z(hHȩCIH.Ik9(!YVoV5ם;7=[- ٻ()b }_xz$YNU$ 7*A)z5(wܛ&yvt`*/°AS=EW9}J:Oas T-}<}t%E+JjPַ:J,YeiMT6z#01;F~oEֿ-m|P!n2)^?l^^($C{u{ت(]Pb#؞%m, s9U_ϕuQ֠<K}ĄR}sNˢMf6JQpqoũVt<`rԩLA>6D=U+_>8J/kuyg %4ܰ{O4\ې6Wtz7fZOF (-i 6/Y]/¬bOvwnztYPmn%Omc$z|AjŎHKˠp3:K T#Z &u*%InKY*K9OGG2JLhM=mtk4XtT/֡W$9׺<2,0ʎ2Rm[/:s# . mn Yy2X:ˉGgm 3o=DUB|WJ3~@U +s{q1,$w{zQ&Dž֝Ġ}$)9cO14 {nykzMڹ.ZX`h`R^#]ㅚX 'Q)jʦWC`Y>נ'>#"WMA߫*e$N??V' T)bf'}11tmrORI 5,04OeASNuӍr$2蚶㧍s^=@F!9˙spW\lMݎ!֡JMASO.Y1. fӈ⛢O _r@AWyA|_8'8? c1H#m>2Xi< 0Jxd-;[ݣ4qg'vPV|EODmgQ__8B-e~ ,--/MRޘ;A)eb#FY*p}D mY4*l1j7ӂ% Nzs@,eSϱL*r 7 ɾ$c1].![gjSo mxO$KiH %i_CX9蠖 ^arܿ`w0b8cp;ޚ^OVaJ;; ؋Xc,pYut@"Kڋ;KO _d;AFt|ʼUM77Ckףl:Qhh6 n i_*NuIŇJ|3آϥ1.4+2PD)FL-*k|s1_ʴB2/H+bk,l029i+m,|\+] M>3&p9lbVlwz>}P1@Z\ f>vǘy}C /F,!_x8Rn3IFZoQJst }*xndCӾ7O!]_"8Md/O}xy!lxk@@u] :5}` ΁z=@9IKqAFT]*&쀋^!$܉1)A˲-z; Dz Sv\kO eEwq:4Sn1"FtW‘@"bH?J=0\.=RHoeO5(&3RQ\*4 R{+km2RZPŶ=\S8]Mx9hN@•_pD-~V00GƟG?xƅuXi@O͑]Ӳr\,u gGȘz06sy?>3ɪ΍3μ":pԨbc[(V(pAMۏ-0\ADGo0cFe,ZNc O;q㾞2 tg6kEgol1]6 ~c|t n6픾ɧJ/wG $x9]}Y DžЫ2- @2guUy`EޕHv/FUkNѱ:{$قGG}@Zo$ E{nQ|k Uz^z ސaU4T"p7(GXǥ(GR;r7Q֑At<\M ҧ1Rhse0Ĭi[^ߪY}QOsM ?PL}X+__pMKp:۫wt~ M̻WOP3%`]a<㋜nD Xj8 r٦ofDт- a"A Fh\ӑu݅K4~@#Yt)!({G.Wm+euM( yX _(~xG]<%͈J2!0Yz %Gz4̄'m XIou4+\^i?i{E ً3Mq϶q 58dPTML!c!TaY K;U}󍊀^6_D7v=hCX L 9)@ &Hu:M*aM>u4U,L҂)dx"2I~X x @#dMܕVt8ˊä 5fu$4 iz% dkylaxF9%D힫 U"}.b@Z33ެPDTCkuJHiU:Uי4619J9d8F/p)\,p؟_X}Mef6t\DAf.7wEEEޒy".JEգCf4dF*Pd`xR/lL㫡 d zmcD-!s='t*[>~羣 *,Ŋ[P;,sxI*0B?+]-}޽ ǧ[A!0Lħ1U+K+^6U<샖uD: bea0QV Yb-A?Lt Cq+{-iW§H)Ln0 WM Ǔ{Qӝ tWa9*% ӜHsfؽ)N֟Zo/t˒5{*fG%l %z7ZCZlKvf &H_~yJ :aP$piE$K|ƺ՜9"$ftM(Sy]֗y1߾ง8euCsOd- H PI,e> Ly]|W' K@ZQzޚ;ì Q-J 'z@=i C2\b0> A,mh@6S%0%KwldpFBC1i\OX]X~H-z"@O}Jk\~OH!{c wD f bb(^-nṀM7ۣ֚ڪRNuz  qr 1&xD19k)y\Ɛy>*&}Hѵ f&]3H%w`t-4ڧB?gJzbP'ɼCaP&%tL;S;OR+tAD`Kid +Edh<#[@z =tb$ՠƢ|NJ'd~#-#"?{^7Ѻ:Cky`9#OY}GPn S2 +HzmЬZi[ u'k_7) ^@ynJ]h6|PA7p7^|ɃeӂTh0 B,u¬ t7HfS'RIkDܙW `HVKL83;Yiv 6^W(n%}4[,60QDC4BdVٞQ sV)rCހ,n*&NMVN@Z+D(*4˖PI"ma> zSA&uvdzKF&d^*gYES[^O rXcN5ŧ8N@civB<eQZўt X(jy;OX`T~Zw&ޙ'5 }V#J%fmr=EG'$ȎAq+A~ SY y!Rt8u7U\uПuj&ABV͈>U+ݴ޿!~ rMG%#܀X4L@)72~OST=䕯aU?Tt;\OxnD!\@A*hMf3uآN^PEA971\D(T(7m*r(aoEfZ(D k}r'd'>צ-VVrׁV0 BUa WEf^jFgMGWoŞ?Ap6iTFqw'TC (wmB' &}Dc?YgqKkke'ߟ>oA&HOBBpH%]:$,Fg\mKgr=6=%Hz!$]J*k^rX~`jlG3*!p2o̖=Y)訊5B:鹆 ҍyiПi4wz9zmX/ %!řT dɚX ,XDB[&9ШQ.kKݻ%ύFz\\8P#%tĊ#UA>jY\ʂI:z{bP%Nua-KMoqwMFBh@N'_@K=:$l\xhXWZ[E9h'x5P=/< Hic"rP>drK4QK##n\B.Lj8;iJڞ1u>s?=0޴ajy?XY [-W5|3G7X\Se$obC7{YeMq=l#%HDePqvdwv XtH],M))JaeQ$ pmkuE)UUk~%2dL]CFM5=bL~`ڞx~>7ȗo4qӰ1ʮz3(cм_ ձ64fJ癤OZ\ŵƔ ^8M3@ڛNrvƽl R` 6LnzwǺxmU]TodiwObԭqڞ-Wg0J51`dɸa/ysTj<jk BwǤC[fw5m O}9ΛkTQpiZsS'KJt[^0Rs?ԯ?xMn|kWnYʮ~t!OH0J _DChy-jk DxX(4D 3M紭/7k>?$c*y"]WX&cGd~-YT!W&-жΫANK64ݵ)r8{ҍrivhev"A"']uhM S4zh!{+ÿl~1 W+ri|\A C?tC3j|Vv$RD'jAG`4&$%ImN^ yX6VNB{B]U%5Ǜ3έ.o\X͌a6r՟Ёt[W˰ FZ= Tz&ORiGY$׆|GG5nJzf5:<іIנLJ-6lw븟Saҕ=KmtKn| EڽM̝4A p fMp>4>I\lSOEDk-%xaYuҙt5ZA+ByZv Z$Wu8_E%BEmB(äiTG!7zm} M|9T33#Fa,q=_К_*5H.?"PcmChiۚWDw=[PLÙM;ܝ3b`2#`F7I̷dWGza{R/NL>m&6>eK,Mko-MZp Hq=Aenպ| DW\Mn! +{ rh# F:+ F^xC. ?zNÑ҅ 1XyZd,%bcص@[r. YfV9H58+R^6W,@ vb>z[;{Zo=3E0fт= W7ֆE~qRr>Mhƍ[W/FkذI ,ϡ&xL34j5JP [Qoz0}' y|*Wov*F)L")'>T0D~\: L֞\QKp+YA>!qb. 4*$`O&Ac2b^N}^b$r] _>o3>{#ЫO*n|jRe$i7}&pn.GA<<*'><5ֲi:cr6@8r%0QmЂ`N4=ԧ -w0h{7[5>fo__$Ǚ};:$i?4fV8ns~,3^҅꿬R܂AR)Ѱ]ksTw}F@r?b;1GF5pc7u>fXzI(W?GD_TZc2rB5*[H߭N RV֤Cgi@_:ƸByDW/S mٜ^5铧cCV)A\qqr \ʴvӌ;xQ*肬b㴥xjBv.qOO\PJkd73͜٠ ֤ܤҗ;+z͒!ioqlnfo\Mt}d gTqF$k R0-珻Ft٦C,&o|gW!"Q#%([A:It6#/zHe91 U,eW L"3x}wDE*v}hҡ"p\TUz6orߋQGv>Sd0 `h^!Eɧsjk=yӋPmF`zK>sI Ujpl"5U}3Gn'wO8zQAgg/V/0[>dct-,h*;-iJˆ`RTΩګǂ(R~Z`Rح%\#Eݗg=ɝmsh=j(pmv6X^+mn:Vj7E`u98gwWLLݥ(`HakQ2^$u:,@{"Igכ633kwˉxФ#3=yɲW>6 XMg|r50"~0嗲쨡Cs [ EAŌ-OUvrLw $JL ]|$ #K!{(Y_eVZP dc7 &F5N BaUH6̫OT tZqWI6[7X*(JffK#b&Fv萡"vS5/ n kbWY7ג,?ZvcAdqs  $ wvL'QqQL:!96>\ͣ3^}(5gB N¼izɹŠ͏1wBѬ*XRy("8^Y܉I;}KIXTwԲ @dE:( 8ն*"Ұ8/lxq306=TH4O |?La^MLÅDڽt_m֘y51r@cI EgƟ |fL'E&ҥX~Z9)x\tiɼu<農P{q)XH8Ծ(W`A)%$r@E8pD:2]W!m/Hoi lS xP)nv?:FK#6QPM겺ۻE&^qcfaB㙛L81K۬9D?5%c)ʜzQ1_"e-z@Q[}JVI3x?!V3+$]JOeDkIk!;g8\T;zxpݷj pR8b^C԰s+uҖk4,sEi b1NkZ "|="A\AtN[CVF$?hk0q]*Zy#W2m$L|1}saK#YAI%zdfMMX*(d`//} jcjasp>>T*무vk)/7ScGGך^t\Yiˢ`EΚ!qL/b6cBc/NZ?1w'W (נLY㧫Zq-Sk 9 -ĥwHL1cTWߊM^wf&6ȓJiw]BX4o.6ycQKU{Z#"K e SJ/pku,\!ɭP>_o? bOʨ=Sh %HӚ蠰͒a '.Ƿb`O:I o\b;O'X2q%Ӛ!4E<1vwqю+ %=JdQ?𭮋|ζXPjYHA JDkwJ,I _n.O:dA fV\HcQN.ltL5`xBT,RDtkTA$@^F £ź$lIઇ!F* {V˂2-S))d#eX =r[ zZjmM֔[LWq8ÌӰ|Npсw{P>cgցEMjsͪ_i&cݏtA5BeN`*s\3Ԩ"hz[B{%ql;`X<^B&,79OX+=w\0&'sJxD$RV އ2Y0h/J_\X;Um OafBQ=콣m҉3cJg?T0[!kLHDe ~3h6U<9_vv-?9\o]q4_ KAl鯍s[S2xӦ `FĘpt6N1-FPVoPg!!Ne39CGOOet2A7`9Z0O<0ZLG YX2]]jogz2R?+P"ucisI,Dbh3y [ ^_.+ oIVodo"/z!$dAǫ3ܚܞjSzSY, [SHpP5\ū&GhƁi(L ?1w_> ^AKΝT3M!T+ЍI&t}o3 OnO<,%rƃT1j:2jJN;aL^U5CZ[@^\͝tWX.BKdtquE ,|VV߮Yu'X5= ҡuHYHrOq#}|j;V~S:WoVV_KI"x&d)!#5+k~A1,hmS`I> M5i2S#ȍ$7r=="!'}}pY1~CPK 5? Ez%ŝN N$9K_Tt'[EelU (RuV>N,0!Sn6|]5y]]qk v" EJh#)(wG (' D(x)y'p]F;;ݥty_a/(rb~],*=4?H)SUd C_jYRHg[zz˿@a^&!Mn- @UL>qۉʫʍ%q7[}T0Wb jZhxQE!b31z!ҽߖ&P7\H~kh"M9^FawƬTe/]k|(sϧWdZzaFw7YpS^s/!̀|$SY3BUn./SdU xsω,DZm#Vt?0٩EU&ީQރ̍Dah|T=U,U@|9 czv ^Ea'cm0ZVCogݏEA)i2y} cR^;JqSdJQ3nL{ os> #MghJ, ura2+Ƒ%7NV%2dO$٫ z䬠VF乡vUhiBU {ٲq%SQ!XqtԍΞi1(n-3`e+V&M5Sk[뮮U3yjgV(3}S~~|c0UZ@[Uf,u:w65.< EۻyIY?r;1;tvL89)2mAϫT dGul ޝ]5O▙{>]$<{(Et>j #rrncGzGgbKT$'jJ`| 2S?1&R$~Vmգ+~ֵ5DwT:A,!b熊탆*`c@{r>=K9-GCu2"bRN%C_5|)y_6:ozNlν=._\ܳr 3;R5&RW`C_dv{c@Ϣ O" C&:S7T`d8ܽ5eZx?^'., hVPD3D-6IE k$|(>9׹;IxuC_H;y:z|ܜ젷~ ƳpKyP Goú?( X; |-^' T{I-mgUwi B O_R:DB硯CV+6H̓6PIX {hHᾙ͒>o$ q$L6^BcT2 &yeWxNE9 O k̀oDJ |0U I7xn7c^ IֲhBں[ET^Ko2]HGT6mā:WUIlېf\p,2:PC;TS^ae~Q8[XfAtŕ&N<~IZ 5 Nx3a`9v}+i< '_ V*b.uU/LG`dGq2RTtiu7e FJ;Y/C\Q'&P߉`r{f%QkEleRTeF(oPC boRh[+]]NLj0<&Kq _'yKJ76 &1@UM!Т^K|5]L*1!iግx9(J?5ZA]JnBQ1@Ra,v)'lo^@.ؕ1HN R`q5Rd)3{ks Q"d8wңX2-NȚ/Q_vչHȚ?㢻ͼ n7Ͷ4o"UH;;\ͧЊ\>V瓋 7:b5r Mp톌z&QOVqp6;y&!!sj7#|m7wG C0]Jb+J{y |N?|ㄯ#̮{eϹɬK˶ ҽ Ix|0 =i% j6D~gY')m2  ;hs{̫y x `Pi@X:L}pzR' 97|~O1MLPCLjlsC]4_u4Τ֨fSԎZװyX>P ^H.9khvIJf{76c)Yt!Եj FHpF<\]b?cԻRCiyC$኱9fǗsfzHJ|, &UΌzfY}B%@-zl-Pyywyo(qӑ>z=G,AP=kCS!kA*ͿNgU^!{\\M!~DE9e=}$89 bf1a?UߎO38f)}CMaj͠ 8Ӵtݒ%gElMT$]tPX>F|iU &Z_Zp )řT/t0|0"9rٟk@51BPI_`8m>sp&xz<']O3♳'1W̚*Q"0 ,DL%2?ͨx?~BL?C:N<; fqUΓbRŒ̯?畧;w̅vl@,f, v7`\G/"ޛљAŇ`n!{yG.7@m_!/*)l/M} #H}0C5x{`_ Ѳ؅h"e\e'0)Ujb!՗sY[?/WdnON8=W}o n" h? {p jRZTM.Rfmn$\0o`~goGthtΎ-*|7 .*W}">,بޏtd>36WEd+eZ?Dj-6|CpH5@TiYӟLD ^Ȁ~fܩ>^ -qڒMA\:qb`?'JdVz?da-]rKsΗ%Ekҟ-fE+ ӓ Uqڏo*+ J' 78F}AҖVD28<\l; jIuѠX!1jucĢ wm[l/(IJV+w֝-^ t6Z f0 ϲ!UfE'ÀBӆrT8xnNwi$ڕ$6M >Ir8r B]IKD]ePԼVxÑa o;7m*qwm*G?>-/ m7|c,):`YjiI@jóp7E^\\X$BSط<%eK־/OђIkC-uUY;T=GS,eWc?Tہc\blQkWHuhw[%σO+ySq%g\%N^N2)LBXH~lzĵ!Bȵ.Z 8^'ZIXVC^}.Srklq8Q~ ;s{[˺$䮎br-{8K"n!˅O&q,0o/z7"̐N2"2.T4cfSS` cV[(#X/ʮp0q(K0uF,ȶbl6K#r\r4 H cHJx\&kcqrΐ>$j!uEW RlVO #WNǒrx&wE /ӭ9T -)XyfF~>`۷a\k 'gkM +YѸ6rՓOyzuR=dLy8N&@2TQQۄG`!ذ>vF'tУ&i:.Ёm*YȽ{>RVb=_s3+7@oe0QbH*/?ԙ^sϝVQ{ i.MV>sOPqzv21F׿;~h(ZMTײfkm]c~W#j{>Z=$d~E:UKy':̇z Tسǩ[>Q,OE萺g)jHȵy%0,6=䀪 h eK+.@#gwEoG S&@`Cv9Lq~.թT0wdrQ݃E5Db?Jػ7. I ]PQ-ڭ?|3k{FX~YA /(h֫ȱR $S\W kSǔuMy (nr a&؅j萪3Sxun?5kD" yfPXo!( 4yBӬu.30܀R{jq4E2V &AG Q{p^~;7zyt1.'m57]-DK'o1suNb^wHwQ79YLwey"e3îBBў]WJ͝pyϜ'0 YZkernlab/data/ticdata.rda0000644000176000001440000057417412560430724014757 0ustar ripleyusers7zXZi"6!X])TW"nRʟ ,bA54Sw&*finSz3U>R4+ :'1$HVMAJ@$XuE_`th EDz`$zgA zz[ M$sQM}F$S,C򊛒@o1N= 0XT/c&8H"E#qYO&>> %wv#] HGgReVQE#Ig:H '+ c*ZS*=+&Kj5)"q*QorufֆfR`D^aUb}XFK!wT43/vܑT1ҽ9m?jj70Z+t+}ؿHQvZeBE%=B8V \& [@UWo4N AGy͍y!^ڣpewO O&n׹cnM(J7d+Ju3QTt=p)~Gt<|[bqf˕.0]u8CЂzAojϸB yByvLMl7LrS6ĆWn /{Edv'нY21Hhmī$tn쬬P0?=j[!Cx2/n3C4ZS^}Tyw{ݪJo\8p WFXTٷɔ.94fD~a%QRq6)@&s<峿S}vP>%O{M!)N]p-3@顛lplu'ev9F}Ru-y>&(]芵-m:| V?ABw"Nje<n[gԅS׻ij|ӗ }v&OW唱;iRT¯ D]8#T 2qK}aE1Hg߀zh2000nwat3I:<ԓ݀yLWk>VQ1dd+\B|erxUvr |x24/1V豝cU\(҃o'#%qьk9SL+Ν,0 ^P[unsdב- ( `>u[`UX z͘3A"W`Kn`dTwiZxCb쵽RN&T͝6cʶ(-/M%1AK.Y;P\&z*c夢6ep@\3ărS^oJ >Z|^.P&gLyy:.9w ]'f |TS⪶__|J#mQ;\uO>yZ׍ں?!usxi(M(gt.s仁.F׊PXy蘄vXa7HGX-ȿ yy$A(K@{tD_["t eaj1@ǂ7zӜX}\_u8ߢfV)H6%E0Ԅs؅Ys4}GR~@zGya4o!K3-(A[ҙϗe+HdZ}R%aüqr+Dbҟ<|x ǤOgZ{ ݇ʰA0|jؖr$"e~&毻8% F)'S޳K &=3\`f\'2m:2_[@*}?9Vj;QhBkjeF" BIQ&:busROm6zN k8hd4h/kHn=ݲSQStя˶eBס{P_t1i-koaSpsmy(g쀸SВ5>i6= 8;TEr,z?iWje 3&ע&ffܫ#.p q]`Bg9vӎo2ǗXFn޺7k!#K,aM݇(v#W$3Vkgu?<4!t Ba{ctqEӴc.tUTBA O|m%/ǚ ޷ޕc fLf=Z0 nnX]fe tdpBFb .Wqn#8-c~R>|uB%HaAbJzhnxapH =CRW/\c3_kjd[C:.qCNX/B:Q c4츄Eo8pX۟Lx^֙Z&}ClqFab/Nϊ?]By#wTwli8Y^*áS@[]@o4tM1D jwnRq螂ndkj7dR;ҧ*)?n(K_LlC> 1X>񵙱O:˞;v#0Vkb7iA+?an?F'J։ᫌ&qʉ2,(\xidd'S"R1a--C.\7+߉jޱ*^- ]fX-&_?:?8U_Oc9Ey,DcD]aTؕ7ma 8.\a'3<"^:s',*b G(9Z*s ސ_aՀ71I5`rntV3mqz(F|iX;4}p6$(PY#滍Sҡ/9d]=+ 8!qXO1hIUǫ-K\o΀ qRk*1)?0 hWLyʏ8:)?@+]CsEYYP f(50窷c;-#G`Lo/4l@ǯqN7(ӧ|J R 끕M'G)@k6x^Uj;+{ǬxR-knZbg ڕ &O[ ~tvHBHh]ENvE= 7{3b-[ ԭCa\ ֧ȧ5|ٮknUCΘOc[ 2\ĕ Y9fWAEs$*' 0% /OȟE{&N:4{k~:l3OmPVO24QczA_dK>\M eDܪI#m)2Ek䇇m(,SVq9mV Afoho=7BGx/0?g<ƒPf% ƛ'J5r\PϟxVS,6jِ#CJY=D~g~1q *]HؕGDqwl ĶhbJέШ6 -6FJT]cp2FUTi6ܲ>lJ^K{ێd#1S2"u.(A(s};Ok'_3UZZuyEv3*z驾<0CP2G`:\m2ҕY8I5&1}71@->-ZoUjE,c\_ ּo,=C/_ SEUzb 1l8oTa~$ ºTp!U Z bZ kU3t#u`zD,pl#@)R7gRqBTTM3KAeIvyCN2m8Ȣ&q^7H@<Zvclg34H[<؅=~U2C,Z %~WVRD?750rg@5-& Mƛ$nUٿKkCcšʓ;^WIuE1@d[ w™޷ڹ@nOءѣ]aYlڊkFڨ85P&!b4Z /EjIrG>fT'K "y@LTXڳw\ݒ1rU9i᫴{ƺ8w ޙ,9Kj-3,eh( U7!_CZNl¸RVM#/rm;>j+2gk,f; $Yθ"xC4G(Ŵ GCcT xS1#xhr{u&*rltw M&h0,"8Lx.,߯"3/ډ@WbULCIdtA8vmFe*BQv|%?F | vh~C}mC #c$_Bz b֥;=@C(E'S6ljmxDIeas?wqQ?u5 q~c*9_8[Fܳc 5lj[“30w,62O?'t܂ژ+ÐlDBʤ1pv2KniGv(> _<i`9ʹ Q("M.cly\)^7"`[zxl fQ?{R\O&==9:m> FzdHǬ1Rdw1u^=+e8:Pt@=Jly#!0.n uwrGCDt_(35#C>qۧNn&kj Oac{_&%O1?@7x_c ἁ <`}U ěõCƞҺU:+QZwoKir6A+(r$GEq\I!l|D[8AkSB iPu% ]"cau1uW@Sq|w)8 =:GIƜmT6x!65yߨicD[! lK7"iX,'=ySep !ۯbV5uIS_M(.yoNCQHYWZ_AUnoY!z<,Hz>aBlCoa:2E> 1R2L#yQرcW=qh~!.첡U%QXgL6;)Q2>j|ytU6pB~o!d!zfQt᜻W% UIإ? %EX:ϺġcER6@h0 /2ȩl{_GrH8ز^5}{ XzewT:#ApwD<2p#xh?f&Psq/=sAM? x>7EQ! }6ӊ$$o0lE9$~zֈ[:эO?̲e`Ď\_ATs8ֈUx>}z >&qm%G0F`.dmTh:}}QIQJO}dU]l |?!"彯O109%g:wb״GhOͨ\Ǎ)0F=JR -cwxw翆ŁeO a$%O%!;[D9F fVS9fY ͌q#~D)x;onK\xo#$3\;|4woٻ[=!n;E?f2@qƖ&aW1fbLs&X6g>?]#,濁A4|^+LB{}#X|J%ݴx[Åz.S ^/l4CI>oN~⿖.Gc ޯ⍾?D=ʔrlq 𹮞d [lX9<|0S\|ZKD_J1B:j^x)} &T҅K>c!45mt? @O`7raFeuPb Ձϧ_kUyhE,)KW_|J0A 2zh-d 9@]" RW^/ \Xn>8]K)O8H}O@fIɒ*ՁtTZ])/[4CNmT$A/^FgoF_ `sdS#hwVœ(y>3`l7% T\z3|ş۳,'[t0!gɇ{ʢ5 |fGza"W.oiNt8cT"%r}kMw1];P۞?t(u:93 L^DUL`3G@%gYgO<%/($8[e}ѠV3sl`LT_55BDؠo-(7S6'9(wi_xuզYt_nQ77'wu[vJf4X>Fu cb%Lf֊[yGiU9OPu?>/vA<(jn^ɖ+/$3=3arpY) 4@Q60α2 (MĚ e䝇 &!<gIs]ݜ=E-o.eXxCIPm,\^(nV_sg.\$?y#TA~eT'd:ZF#{qr'[9w|}Vߞ^֒\.ݖ:dU8-EY )06^ͣUR 7 X@k$ѸT}i:@^؎sAw&jxiN:]nv+V r^.Uq6V: DDI֬'j>!%smg6gVwۂJznٟGNN* LD8( ^nKS; Duq<߮< x bk;`CuB\f,WIXɍ]߯J7;eLaS£3еUV%崨݇S@bYE M6z` Ф"5ИTi۠rpn,c K\\έ #Jc"6 5 ,)eGtǦOڢ1 Й+ -@=BWv͌ɽl3d%/Xߕ%ZbԦ\3IWdr>e0̰G}uL8{z[m2t:kk1e{haa$5HFfԝm2;Mb`q((u JobG̀7`_{UEh+)\*J- 1G$:6])e]2s l}2Hé֣WfلiJ9eP.fy%4Q,aC7@֠q"F-cܽJ24w^[bZ |3Y!.~C9I`@[by" CKWK Q//MZ.TUZ@M F!r>)@ ~:-sWh` ڗSUoUY7 "lpRTtfe i@.j&4 ?P$ZsEe䆴$p"N'6eo8պ7 n3`vVMxg5?Ji840YzUK)}>DșRo~W[v~"E4*7/,Os*}rr隐+$@Ϙ/frs>Bc͚ ]g~߇Hbw<)ԅ6WVF+|Kկ.BK2ZHl͗8bNek |l!5S^"=Vjz(~+H߫V,VW2o"⑨sTzfGUZ> W#hӔ])P3Li2ZrT*NOeN"FzdŁ@߭Pe<ӧ^v\RpfU]9tݍrеj0~eF5=VEGB-hʼnýhI>SL*ξZʼn%'"ToJŢqW5iOy4lHc*Ec^-ˈ =Né@&.b+#Rgf'z^P^ɘGSLdVw>JEA}W W{_+\##Ǭ10A2\'AY95Zh[gBKټV3]Cp@6h _.=DV|Q6@d<b^Ybҹq.NSQ*=ئ9Z'o8u_sfν W_BNd덝ѿ !P7j &#%~^L+U_/& F8O:;>(޾peϑ4B%LN(^m~~,z½#xWLG`_d:%e>o,%{,hj .`gL[5'B(,G@194%gHYP4؉MzI ,k@Qq.`zmoZ[Q۽9̋ ێ_2ػkiǹUv欧z ;RTNx0sm5bաmAOYutEZGLi&MZ9*a`nu3ki0w-ˋ l0-qehijƤC0GPcйP"rLǒ'9m(?A-נmscޯƝn:m,akC D0܀jɁEOKbq _w#xzAKƈڂլZP>*&QH#TJFQk^w-هτRЅ;N%SDߣcV`af7AcYxa~ 荃Mޜ%}/;MtD]u 4Ƙ^!(`P Ir؎oU1RC`gynQrIy C~xb`ޓKWZ$E,/" tTVMz*a$msI\IJbCz+ЫHhD), xbrw2 SH)p[0) qiK( +*iL3^Еp[?þL@N=%A14Z:N #)`jOQ+߉~F njȕ cD5n_ڋe?ȮE?=~_wR#q)ٖ~gY)ԔnHSeB|'2j5>pK8̗lvt`y2eiR?-O&o!"\EFra}s|P3y/ҷDk sFE$ Mlצa1145H DFJAb`lWO{%Xa1H?_8@3Ou'['vPąWx3?}_)Yڞ*twD+pW&N}(OA"8[*O5d}\u2,LGrs#L] r>N~\j騔1RK1ӟK2 Tf<Jv۲5]¯iq£{M#c՚zk':Rmr뎃nx<԰aZej Y¬@RoLe~@gd 77`+"e\@Z[ykm lʩ*4A5ɞ5MJ=M 6muL 1cw2-2d{pϏS?&L.5do!PDhS0POU vkm?G 5.wc=MeZ.FQw+*Pϒ$λ-BY#)N_A,h<^+*CQcB|c!(1AvKh ˅z!^tu2bO氣IObM3'!taM'#]ُ;5bC{vm(d1#jߗCQ|v>7[,;"}|hT nwi0ʡ^Rt4s4]?#G=Eh@-Cl`ApmL*79GаæƏ5/%]Wdl:K#@$65l+Ɏa~vJdA3ŀcLŬsS"3TwG)ZUs.]y:0w&ZN[F̐ 9O4l٧Rj=krT&0u]$$d IZt &㩑Hc̮IľLOQ ׅ ۩)Ni![U㖆S"";6 :t5 r> E7F N@@b:^!T$ gခsҕs u$g.~RJ9PC*~rNk92}9z4cOA (" ע``mK4?YoczGt]M\zG]DQm*\xFinS-<^.܃0zD{ sm]A ;'0Ͼx?L"NiyqD=!%1nouyI~Fzy42N9†A:*7M/ܞJp [ b 6Nk/Jds%O }\kz$}SӅ3Eő/baK9Q5' QY?"J9ԆUar3GM(j8Y>˳1[`"Ҁ_*/\3шDmqpNiIOͱ~tfZ3HH7}8鞹o6ƼQB{~ % ,Io<`E-5)=O i/,"~WF2GT>L")EzHuD$Im'VXOC🯕H\Z#r ˣvL33.j*;g ="Df "_KiG-N!@B4݌jߪGvmSziMP?z_[Sqh lZUDM-ސNA6N_<lYTl-W=,Y.e4^aaHB"+ԛDt1#;#- Jp|"{z"?{1ٲ1Si0UcINƈNU/׹ץkM^no\C88_+IݕӼRه3l'X2vMR|x %N῅puS{CDszzk̩xR%7U8cS[ o5{xhjN\lԻ8 ACdOLfG;a95{qW6σ+}mFb+Ny,4DB'%R\{艈Ş9q16d[֒c|2'oeRsoY"R jnCA&u].\7[B kcx[8! ڑLL27v<}K!,7XuPTRB)+t&%B:3}vk(Zf_`.D ݝQm j2*)6GTHYQh}xބu0{ CtZ^@=}fXPZIq0{ܝ(i!RK Q%La2xs:@= \ɣ@0O o'h8ċdHlhyL=ѣNe;)h0L!`fh>![6iAՠI8ʊЭ-5iްGMA qZ.~'m~`!SȈI\ST<'ٮ` +0%ܢՏ| Wwl]%]՗F{Ӏڒ} 4d΄p-7 ( y7')ZKLc\*p|Wy\@CQ漟WT fXj39Z S"{r*doz ps0!goSЊ@M5j73ضp ٓ09aԱJϐB7`>D/+n|f}2/w!P,K ,Y.OTI7XN&ӝgbn>?^Lhih셣P<*dzEIǠl@ƒ.Z/όJL`)E6)]H6}VE9n03- vy O|*c_.- FnUXkz7X*e&7儚p}0) N6K(2B`٪Dɒ] -DDFeo$+^;k</lF(5P*-5ǼM 8{zҍhJkO@!2#^`\"A2ܩub#_Ody!RҧpN^q%=G^D̋.~##'N3>&͢6DWNyxjR H)JDEP.h$wDf& k3r+T.\xwn&#'H ~oCazϒyc׶z;X[t #ћAqF4Bxŭ(.C(/|Y|; [_)zsy\?. őY9CL^mM;"p\B: s(yhǾp҈B=T5tiQij |8hSm#;k7橲1g' B#Xp_hDPG( DB~d#-qͨ#7 ʷxͳ{՛`Q4=QjD5qr#[l\ܭ} jzy+m ~HET8#)';6F xA;0)usp_ȠN,7/Nнe'ҳ,Y܍,'x%k-kxitP._]N$˞>s,, qc2uPXb Y|C+Q.x+ֿat!( (O ^PVu})69pW:<>,CP"*0;s{gx.1oW6Y̝Rdr(GCgee UF÷|Bb-f;˧NR+=qq9ڮ=*wKd)Ws ևꭻvĖ#AG޴{%1Eeyb&֋VG9$yhYP3;!v,+ P3EO.9]dL)9hLoFJ(hòc&bpɴpN}>N;ܗ#qx{`⤦B{:%,a. g jBN!ʯzpA[)s,X1v{U^,+EGU? ir$=\0 8 V5TgEr8~ `~k,e.+lg05uhTkᴑN@x-T ih!X>gK FAcmKR+5x7x \^X)vr r w}9Z)YY"ޒŴ uu2&Wmc,QSBX j2ҽHGUsl`J+)hJlw)Kv񣗖>/D[iFbx|s $L~=g.L N`7jüsjbA%m3ȓ1a hҷ3GDKT'JRmg¬ET]" -: 0sZ؏Dpr66jEXQ7Q@`HSnl\G8|m iM\:[dv(*^-CˀT!j- QqLc18\  2FgiQUlSB.K0^.m`} m.rʠDcu8Mw9J(ޝ2eCwa?6@yT? lpÉ\{&;5sRSd0YReas5ί]c|M@"L?ԣ,8aK$yNk=)RoNd%Vϳ:vnk*sWz+ C#>nXi581PPTHZ=VMY6@&>3&c IB[Gh c8 cLXm,I=c[sBND5šYe;P4 ?:BPkb5)59CNf)e]~YW^2*}Ʀ2n˷1}$`74BWI[ q{GEdõKәn?5,5-QivSa'p>Z%^P:+˃Ӯ}wQ,;HE}L+QXJxNd&2p%)ЦOwn+O(!fAVw@@_]vP=.Td QgIbdT N'u;?]>AۗUyiĿs߇lOsh=lCePՒBJĴg4 > hc^!UfHM Eb_ܡ^opdnB&J\!kCn-@rUx&IpCE睅ԁC8:pG~4{{ݟ Ml?Mn0z"p^Zq÷ѮWm]p*PJyW_{s*7Dz mVGe=~ vH;E'nl pKq< n~>DP]$hVMAͪdh j"v9H$PD*l9?6X_RRD2\itO{xH;=L 1l}Mu>9&b'X`k}a+lNiu}5Љ]k.μ(0=$2ƓxwX zXRsaB:L!Pc"Mؖ‹E< ..Ckb1 ?K!m1 c~藪QEl|Njj % Blo}XBga1 " }j.nw=WPh QbO0|P'RGT&wXDG@)y /jhntf!wD\+2kdrz ;Aofb2f!Օ(j-"z03Y,,h =(ƜQmHCs _GNV^c젪2eHcGc` ?8Q0x? 6֏YIHZ)]-X"hJ5gU^%]v_`R_*wGy'1*6I_fʉă؛dA[@Gc_ZK67=<#a&IzpA 1e#~-Tz70NxeiQ-S:NQ!"_1SYђ2:(0ChTΚ_ (V=e]G܁r֠^5 %e#,˂l&v!Vk#XA&>}3~ju'|u"_#zvNü$\| Ǯ~T:romkY22Tɪ'+?(ddTs1sOp@C?7>69dx'MWPZq,Ffs0bI+4 r-`B[kemW\xJZ/P֐vT{Z2wǶyymqBL2']SuxV8֑9IXuv*6AǓ$Z~<eEP\Pgq?.Я;ktB3vdO/#NJbH_iX?4 8ض1(mܯ4PoQw|24_JFڛX* l(5% zþSof6fе@ߥ,sm;L`k(@ JaQU;]2ڠ/&(1V^>zL(00A5`fDR5QFGy-hodۚδX ᵛ#p! S6Ì })Vw;AH5QJ_!Gh0>*B=vx}?>^0LLX&Iqh(7 e&@sI".cUrz!/!GM|41YЙԙ=]uݡǤ,!ƺɒڿkX+5e` $]'Q1}e-&vy;jB !3G>Q1R֟_|fT MƆV3ܵQ(25@+<%kvP"29tǯb ~E PhZ !}i~x/?٫ H~&^dIGl8+#gOմ\ z X ,͘*D2"ܯXsRr"-}048{33c<WtTn2V\lOY ր9ڎàțv8ψBH|D4Z?g_:vjO*iLĽ HR,+Pޠtu#Dqw@@7C}1e{U`N)o)0X$%QOG|(WEhp_FP),6 b?QR3Ͱ&d{ـ'z\i_DJlJ%Y\j޶* XeV] b/BʛƮ#0,3{gڰʿ7"d|qwkcr`д^A(eZVF1 GvOgw9߻zw8:kE^C[f-< L^Ь'+GF(/[!lYr4ѹ߀1zԝ˄WFػU9ȿo@RuyZwH%޳כ)<0RFM/&oYRz('O`,DvA3G#"\ϊ?\oTEiW$Ԙ&t]!26'] z&aiAAxGkT},;6B6s5#NgP+nM_fGquQ Vx[ic''~kOW>nv^L(5O.* P2ӂj*wڱyUmVW~0P=I]vv#LI}k^1.׺ wU?r//I}UK*2s@lLN&Co[07.`Yb۾>_㡧dw"I:^nvL >a03xfp04J+IW*ư{Y!@.uj/fkџK5@嬟f /^%vbMDj qÐ/m 5D_s1|h @ 2M+^)#~(tbb}d ϓ@j8>NTO3*Aptʌud= EVo~ ».(s:"=Feҷ0򁾀hU$".0$r LGK?0tQV kE Ij,c`p-ÅlrNCb4ԊX[%: tB\VHS 2n*d&n2s[+y9Ul*f@RNݿ=" x 7 R?67G1ҹP90۽1C ~7ʧZ=+9^Us & Wv .P,|KuλdTj4ee&!1o",5`@s|P#AꜰEm#۞y.iBAb0"co/KANWAEU8]e98=q>K*fU832犻U[폚YKhyYw6A_eB11-z`xN%^Z1rMɳ{X 0bp,=n,D _:o>Tfna+8 4̸>c3e1 Hr/R @ո􈑻piup̍]T-욷A/ Y>K3 J/YuO2]-8*N1rhP7- ZCuYR󁥆6MJOf\~\܉%rRxn,Ɠ 0c\ft EetDSVqjQf䇫G(43+q;[!>zIʏXa#xZP-u .qԏ5 4O-.&g1 ds=X2\2)N3IZS}&>Z-$,N8DF{8m[W5飿2$"WXjg mt+UԬ:כ/m QϻIL-H|uF?o`ovuF.t-4 "))[3m-vWpzL'LBaA-ou>UEQU*Y+"檡3;`HZg \o3AYpi9Zapi &SR܂ pAq0r]=m{b>*of\ZHw$dXrWwDޑJ2܎Fc΍ꐺ!;|e ,QN&-'ϐHd;])ȏ V7GT/nR]GlNɰ{qcW>#zU]L)r qt(r=ьųڡ9֕+B3:]b%[n,4B ??5!k[VQ䱘TH[ädB(@ TYMV'sKxI\ɕhR`X;g&{<:,@~AgS(3Js) Tj3﵅mh893Ѹbl+S#$Ym{2X[j aOHLCk~lC;<3lK[e Q ?1j|W~+Ri2b>Xj}cJ+\cyi fO aMV:{а&z@:ϭձ' 6hۀ Si0t4! /=+E΢'PXvxYa~;1uaí@5Qz^~ JO [U˝B"nAC(Z+jۑzs\S iFaWeO t px!Cl"鱷i;)&JP1W .Lv0OH#նɨZeaq%e@Kp k+7io ĹN ]8-3Ssm*lReG=>2H ɱp` HJRz2J+-EЭTԎ_9p5S:>ػw-awi\]U.I;:&)fBbgf=K̕zt&2eF%RJ 3nfjaT6 G]9K2B![T YK?bi9b,*aw&ZbD= hK63({@{&9u[&>AnU K<ωiX]AIqQ}S/b(@KXj?9dr"i޿޺UQhc&]͔ >5G>S} y<,}G3aؠ.Ϣ$sw7FJ%*hc)s";|HifQ_ͱ'$B7S " ݫHgUlz4V7T혁BwQ=;6qILBRP3h<_5l!/1n+3 GS-ƣPzT]qذ .\Ҽ)<ƧX[j̴戕(;c/ ПQ +,v5|akg3Pg`} s `ЙBNʇrD1ӡ&%{Xsp"dR.3njGrGjjUTz{>Ʊ-ƛsd wnʞOuw8-0m뾸K;2iޗv'kE !Rqq)AC {p'Pܭۄ"AdC4}LkІ wTAE4?8XԣcZ-XUg&ʘ:݂ܮӎܪV'z^?̺:W (X.+n`3܎Qf+"6Q= .ҥYWT< nDw-DWioq: Duagj7ҏ>%au("\g-6\"d˩YO GKD9+FXnB@Q mm$D=|}2nO TaQ¯ӌ66^QǕ5W c# .S"T6%NgIj*1:xܷSjR3@J O#|K`=pCN_Yq6 p!G}Էq~zH?M#4XAS@h<_*qa_va]d$`ЃB  w+>3aD6[@LEpc' hvvn@ P`ԥqw>%@Qv{5iҍCbYюHR[5(PֳIvIp|\R^MN !?iFoj&l=;w/Q7X::+w]~ /֓$*FޑT#*xvY )Ip= {^whS>-..R ~HPjɮ=ATVSm,䠢m.ξ9 &H|koA X/5e,B=qR7KIJ! t(2n#"Mθ2qhWw ϔme#HƕɆf~'Rt]&U{hr{nLֱ lxiklr0{9HkRjNK؆V|༴1nf4Y؜V㖍j4f4Uuq04}7Ƃ,_v1JU,a"lQ nEtb-eޛ?ımvv9l'xBԚ/3Lh[>M8INy/ùcSٟ;zyj ӄi1MsB!9i7zH`}'j%-(B/*S4l~ xhc`N3Qл,~ݬdB3.x]7 Sepֱj!*8%}ӾMZ?A%`.˷^ƨ_S5Fa.~fR . RRw(#OJܓ: y~[Wfp˗&@,+U$|ۮ+V=<-GZT/1C{ HeOF1ۜ,֍@᫴X4?:%Y$ToщF ᙱdƖ=He5QD;|ZkeOiho-LAfOwڣq+BLK NgSdbˑ9?6ói#AK5<%^8\C/wjq@"V-#glר$axU@k8S؎PucbyeG9Q.1/yh Y_'dL;!AnćQӽ+x=0h@B M)+TNȗ MV2CZ#HI]`GϹ39ks)s> O>LL2kFdp/n;S}q!M. P)&'l qS!.C9-1H`Nq%ɪ^kbij h }Dď{]P}K4P }kpGЯ~u #}40ՠW@RJECf*n$bVK~%#7d6Q>ŮNѳ]GހaU>G~%Q{< ٯ-1rB#mw7ޜnq5Sh_Rs@iGq {t/`TtNBeĆ,0M=iѽT$&v̐dDD«ȝ_|]UfzUuVW@ݟDOa'4SI~eH@M:G/N;u2a%j'L@""val׽@݅O.ɍJ seqWz5Dg@ ndySC ` ;Ϡ`YXqBv[.{< J5\@OtE5 YiY+ĹF8 9\֏O&YMGEF""(sʿr,5|OF5VXc>!(^E23 fAQ+]LόgkTSeWL!%fS^--1U~bpsK@ / rEXrxp@E¸³oK;,,n3K$ H7o Z9)l㧷Fe㮟#h(7% Nbg6n61bWOwq}hqq(PQT)wFQ"9j@Wx0HJɒ8ɎTU+6*fd\b6jvDMZLpt[_5A9*e%+52wB;{UK*7C0+kZ)IX腵/r 2yP;Tgv<좆{8ztGI;:k6؄1цBIbH0s?* ۤCZ.֢QlWfT}N79e}4tl96S"{5.Iq(# PؘԤ7[ܪvD/ ŧ}/fa@XP 7-_?Fl|%T⒬DukW-c1刯:M<7lTxYw%KDs[z0( Kl/ЂtefV{n\GJ3xpve;*{Z>wisܾDIr3=u`L1|^/0MNIn'u/yR^dI\ϫW1ǸvX%.+uЁ{Rͻ2E"z>  !ʮ;E<" J6jB EyQT{Q6`vڮh}oߧWU}b(CKN%rVřEBDعȏQjYC#z7Y`+QֹX ycgOHfI"^VBla.)e.ҙd·e)nG֋weBp|xiǬD!}]+ v65> 6pZ MD8X=)z#^P1yB!˅FFrvL/l^H,nWjT1wڥq.2 " Vkl[i*mxh~H ebC%`U=(> +Js@mVWq^ L1v%k>.x=Xb_JQ?KsTnX".Q@֘ ]l.]+D7#εc\_D lE"l u P`${2-瀄Lpk?  fL|3ą'q57W*;/ECƩIr-fߖ[JZ1K`p6m5 +8sMWP!qN|-lMCaG3\q)g Rd Sch; t5QG-ˆy U{CUa8zכaNROy0Tg sUDV5ыnyhHQ [DXg &噊Kϋ틮Go%C}oZ7;oKK>P_5~fđ*=J̽HK/I H !0>g\;"Vy Bc`lJ(qEU`ޔsƶ }˧9jL2U^7<ӳ8;;x)%u-n`؜P%+N/kȆ1'Ix~.4]B8)3Vg1'dM`Ђ|I zܿ7ϐqLW7\[&7?ʔ 5*jD8HqT!84RŽGs^ru+m ȮuCM4?uI|;Dnpjf f4sYW[QqM7w" yq+~N4'"6~5OB+K'(窺j `<%L6\h~sڝ2fb*{}n_!ɒ`JSjQG"s綁B[3*z sLf~?BɄ;NgznU/䙏N{؍nL﫩q?Y00Du xsVg-#s MP1 ( [րm.>jњNeEHcL o#KHCIQ]C #p_Ga/DEr.s8IDD-xJ4X{?~'{4H1ߑ2 ;mD|ܭ'dd}d9I}\cuR$ۇAMuv xc:T?je?UUY2ěk2LNO?xt  _ߗ6|+ܹr(u[$ދV@V^[}w'L[cFϡOۆo(Idep4JŞw {XBg12M>|8opE0,>S:=pœjPdbRϑ}=cz5\<rkB6vAeFca>QT w-p GO91o U]Iݴ&y =DşiԤB|7 ?PpggLz,-W_j}ƒ_}_3>j(gaw #!>zYz13Q#ZYO"v<d*"*n@j9W7VMH3㉞8OOIW(ECTv[O7qtfӦ9'b+W&*cf~AqE/NO5"x$046j4WXt;/PIujFjenO( ItBޠӑiCn{K=:DɰM|+UV|+:j n`l֍;G9^[!l{9ZLBfp;$܁) rA'r"W^d j=SwS"fX=5c5qeSb,X|>z;rM >ߍ3VݾC~C>+*kSГ'wlC zڌ9?4cW1e/!4 Oap"1 z/[W)E : R^X~P^;ag޷.äv:ֻ#}ԢPҞ@~`1xء^M:HZZnť#'<6+ [-XGs/9Y?Ҍ$kQy_2Ŀ//4TvAYw;f>1nE4mOW?HS\.89@&zz ņll1gx^Hy917q N9C2$C*8N$6]lZ%<~Rݷr%\ V!~~vo47 -aRb]{#lMu#Γ2VN+'io&ѽMHl7w8 3GHwY& V'{,:hEuϸҘsϕPIGu}f "{6d]PQ2T5Zlq-f3j$bJ- {pC4rsu-x+f4QIpcdlC#FܬLco}ϬS[A_uE`EplϬzÆi߳ɧL~Hњ2l,}Y<"\CapU˥w8Lakg LPąۆԽ1HVŎqҗ,ܝB=0-ѲS?mӷd|YkD_''<#06R^FBO 4~?tMss^K9$3't_ *u gmkXz[80mQJ]`P 旗O9$mD %a#E~LWpd]T  X0I=gx+bWsoh}pk6 M7Pao=LM0mcՈFBX(e:؋DS_:$ _zօjJT3Q,uW|@Gyf'у:55!rњ  wQ9FgKܴ9H"C m,ՕCoYGO-n+Ovd깿dlȊth/+4( kTbpxQQȭ JK|p}1 X8E+k靳Բ]ȷZ+4xx?:yBj2JI TڡQwu!lBlf:I\78ť-5V%F= :K m[͆RϻBQUy_ °asF0GtcpfaMM#lBBӢUf.i/jVa|R[ CaDqjF:gVLtx0ǐB[,&Lbo\{l %1 }p[rd5K7k{Ɓ6F8?s-R5)^ Ub䁹"P<Տ4s.?a(If: -fcFWr(gɤ[0_a)Ӽ=_YCpe~kO$o|ݧ̋f- FAL 77Pcƾ:"# 7U^\*чw ;',0N^Mք"3%t?` Жb}{v #د'xmt]?oc[`β+r/ٓ(%lAn̛ɾi{&>Cl%n}ڴc0?K`lxx5XΨDڦ*y'gQ)opOI~q!ʘ#ؕըG|uQxGbt-0sŁj3 *1x NkbQPb¼T;)Sȳ9"?>m.e~|y;9֗@7yǣy/ڇdɯ,!!V[Z;M2IC#R Us3$|::wVP/wMBN2/yq7 HsGLb7ؔ,P8K8I,' t5Ly"rIdR//SĢiHl*x:P|s,'ws2#jL7 5a30V{JPaM5X*M3SL.WZ!´*jO̚UYJ:kd\en%ƈm>?(gp\i,9B̯Jx%M$aV&,5P'y>{[fyErz-Fm/YyJw121$ak?e _MwQYqiVP*8LsOs-Щ0o?XSP%"M-oknZY< o7.<&V+!Fv劺G;Kx)!#i,~$n;/גs0"nER궛rpQa7A~zr$]m$?7O#A:@r-5xYy,_&Q#% neM a{[9o \qQؿjC%sd0„ƥEg^\(EK]"yd-E:ftЈZ$,SM^i]"gdd\<`8 gr^P@XUSY_ã=2'oo<(sl ͸LR9 {)c[X!$'ӄ, BF%F& $QNjXԘXαMao]/fd-k W'F~֣qd!p~2 NItf 3ԔrM:mi E_n-Q\ +T<h9=:nz¼7ĝ{RڰoQ}|Q;yBIԽQ2G{u7 ]K$jJ=E}ق`<z;?OU ]/i3d[ r'KyĎ";; YO:c:cBh#U{.rä**$t/uL"Qypx0.\G+@U.'0Yv"a Wq{$Aɠa:,:$ܭC7+ n'eoK9.Eعi'\e6+LS-r=d +NGuw&;i$pV]+9eR?g5d6f.+w*k޸NMlqbł{JfOK wPa~8tOt1E,sJ|n=uK;;ϝ``7 gBJsN(%}ch3v+ M&/Ƶ 5EgхNGlWLN-7t Tk97O$AYtX` t9H{B£tRЕN oyy "u%zfP X-QT~ /"f~jq%Yvgu;_o*?P&#iokttw3m>Ϧ[DD1j6]Bec%jJK<"dWcP&t'</p _^^.dHֺB𼅂0N#7?QLVOnx'.@/ɮZ@ %i"q^#sc"VtM#éUMD:fL17Wľue\$ߐ rqo.~ҾQhWy4͏23ñ/𴶿)#,wDs~%&%5Iv+MrQBI$AgW'{F$?W@/T&^o p-:sn^2ݳD=s ^:+rc+j2ghG*It`p٬czFެӵ΃~ٝ<7(˹^^_2rLcn=W P~–H/NYF3[š>V@pӷ0=;2Rrl|nAZJ rWGV?#A2ѿ+B [5;'|V/n26,?3fd[/ZuҬnŖEfW_=j6eGݩV7;d lT6U``> Hb#fGВhx&fټZOɧk'>P6]a]8E4:76 kfQ(恜Jt9(rxSӃ|e݆,(pJ_ਾZ%gƟ+[y}jX9ѻs#C"2{]7l)邰ͭJMZUL!{?zb>Ǡy ' Hy@5@$Žx5Z'Tvz(PրA8V3 У4@yiyy~5 9J[Ua _Uxyi\[7V Ξ9]TLR$W|^XvtvIW)*wN_kr)]!gyVLݮ" n$#Pab\} BH`;W׿e/3ⓒ/rwɧT☉ʟSǞflos}gq0;'kslFAmh4ISVjG}8V|{{kDu:~(UaFn+qLf^e7#u/Ž_8gL};?dN{J-n?:Ca9u0y; IL]w5 acrvo&?p >;@gHZ/)+J aW\6K0 Zlg Z s[GϴѤڬcc?p3pj&˩9:S/(H(W#?cZ lYRCSȮa3(:-&N%FC.[Q՞.f {AI[gx'FNE\DzgNs쁻4y9M ;i҅?jL8,5ۨ&aDMM:"*7sK`WAmڇ>8gMKy_ץ r2mXĉINl<_VKb/=ԵНu Rru(ǕS4dmV븸t ӭ/*ߧ}Yr8j 2:ۏuj9*U3֢kBP˺…8r$IŋE*'N( hyGEJNobY`|8<9.oT'!R%]ڹb#jHZlB}skeZL (^*[ׇ] !l +̈1z.X"˾+|e@(] 0h>O7h8v+ $NE\2S4H"h|4YĵXI QX/qtìڽS8F#gWɕ*Ju_lZNJE"#z0_+H[s(RVUGRu4ʧ5.tKr50%ȔC ^Pqy{KXU=![R d_{[Xq_GٺfI؂ t#0ul(7y VwBk}VfE@ |I TTEòI(]Ύ?Ul~ SsᚍfD !OlE`t K @~@.>qa5'  :MEbgE&@.\w 'r]F|rGh江*u_g17K-Fl68 Dc{>SsErm\4.Jbb^[{-T[WφYW0Zvv1ɱ\cit ; 6 ZddptNtѮ:RFNX654I wY54XI@Vl$tz(_ܬi9I#j_yqT^mhYQ3{z=9$h &\U;EO-dw wv6uIT a*vZRak77 -\c ݩhv%w\_0קoPZk '%kCߺL Wt1$ |_tZLH:'Y>(J5||$+^~TVՇ_rumM&PqsuL;.SX中)-)~#~{{- 3˨Qg/yEUlBa &0i` ,qD8WVH:)Ig5yPr?,Qއ¼I7X B'ZRLRy f9XF0SZi$ՓsD 5XIBwɰT$rEGu@zsV;u2tшhW*; Iyno.a:?D.77 z5?堃B@ɾraK25SB>bDN;%UGȭ3Bfumpqؾ2K b:.H̸T lyhҦ^nY f\Jt!9<(-nAS{NIVPGAaM}B3/ ~GhLJW~ןydx9(} Q9~.t弪M͆1pA@7]/P8phh_XAjv-VM?<+/؜~N12м6c78 đؒleKE}@en#R!+`̶J,xE~-^cSn :Z't$p]+ Ocz<z/mtSi1 .G8=}TUa+ ΑHA. ΁$8-܆SqJ /9M#C?QS=Nެ꾡_1o\ց<N9Ͻm!!ұ nҫgx^f|-d V;l7;r4nVk6H|=~=`>rHQ,Z lvɳ"Lk65:b!mH\k]Bj';ϵFaAHTwȂpgW$l0"ؓzgAǩ$ oJx;ce\jrq c|!I˭nNtɓ|RgUVWB Q_r#ayMxxC?59PQΎsXeG[,H3F7x⽵ wtEKy*JcI,Ɇr"Ȉ(1,JsZh:rE&ujS8ZUJc-Ü$dY :hоSM?:d ,; }[@iɔF ?Q\CJXM\Awd[3{_6 q ;J7^y V3D68.?boT R+A߬+\SPkgɁ,l@?ş.3&GwЬgQQ\˟/*)ᷘ0`Iv=H'U0)}2iKՈ$DD4RT^f)sd;H\)0_~U:wr$\Zœgh0B7R(j)ܼifp;_eA)>ַ_лuLw5 ˀA(5Q_32.?x:IH%$T_ jR8~2N$/CI e4%6+lVG4) ԫi5Okw4'2Y4?TG?=p npCqf+ym[#{ Qcߟs^?IB% C R^^n̑U`@>)Ob]fϥzՌkI,U-SxAU<[{^wp'4'g|PR!dw>yY ) 91+?1ߕ&s TU(ge)#}IJ?.q"r] F@kjEpЃGY=XI1uUҦ3?Uː܂&z=(zYJPU)= !Yr1>'@rUÞYU }-,!F[@+ ;U9e;v `tha7kH:¥Ļ5QTEa^wD':dV0S$b^Q޷ˬE/Y@b>;4@L{g\QsQH ;4TK7P>zw>7_.'~TB"n%fZHfq9uX=(V7єݧ珙${\Kw8a˭!?4}0}0clނ~C3#z^_At= ٜ308X+YthaMY7zOOD`^Qn ?b&A]*n̓`D,j wLVp>!5;в'ax&%MDͩAz# y": 5@҅0WbC*^Bg r%Dߘڄ Gáu(: +l^w/{^D覎f3Ty,df ^)ηVUW~ÄCѐƁ>WH'Mwwy;) Hɕ7H+S+gc~Bޛ8DON)[f |1*r/^ԾlrcLS}`RbUMQ`*`{Z z|ޜ b/k@e )K3Z8Șk&B9b)!]r%f\okl q ^a<0yX`@p[lU=vL,T25o.ĩ%} /*De{#B]hˉ]ҁXw}Rz!<|,m]RTR |pnez뀎 >aFδFV7K(*OQβ{Cn|i\ pόdVw#ϰ] t-iF9ϡ2G{ͦ{ 7}5 WZ+jv34&"ŪA8gV!h6nUП xm/i_l|Wkރwi40ZX$F^.SU+y$;;g@&dLN޺\2珿rݦ<23&I yYe܄ ˛,Pyɕ685Z_շ>>QriL2Y>^a@agw:ǔ { 1^(w. i㻻̺dk=*W*ELzKbfmgχLq4VӍvk_sHҤ6-T?\MyL BG\yمgCVfYIK _,Lgdh_c>Tzm"$^BaJbJ~*i NEZkچM;a^l0'0ltP }[n \r88L~M:NAuÍ[|S%M/zAke1 =pqM-eʥP^ s"O+mZ^ooȆMa} $xi=J -k3j Jcp֋t$]j){U|ϊp|l\T8ҔR "1/,VD;L8x1D$Ǿ'IļhaN*C2*`THh:CwAj,l),,7"s~aфA){Ձ KJlV EXw|h voU9tƗ{Cr9nY :;x |vBa?LK`3VuU2hP`Q]'Pt <`.?;IEW@u7BIO7[IL\{2/ua16XFMY0@㇦ydİ䩵Q_w>} _k/K"=:K)uh[=mY⺣hsKTX ـTK,P?^ 3_Ak/c>}\`ђ8F2s/C- mŻ GG^\_ʣ‚BoOfVBy2G0h/iW, *Ot-|)kaO tYYڽDZ%v#R}2ʗ*C愐&%[K|5 K>ϔzFUJ^#?3h^%\GCCYfDW5k` kXSu仰'=&yiQh7WVmiإzvYn_qfj'Z:K 0A+}Esm郕3u/ 7v.-&Bb"C@%AiI+ʶ]61; z! ]7(v; o{)(>"L %zguzS_W$[IۑYU}AHgSU`JCize5yR=»y.h kl{c}*.=L y^@Dsu"Q2%?jzq\v.X![C(! )Kڮ3Ln Rp8 W<ؓ[\1UןZm9WHw/5mSOGO'ǒcz-eD;v+u6a1V i3V{.Z‹^fDm^?+!틍D,ƳoU8s!pQ^,GE0x9[N9EMgˊ&ce&4eG9Ҭ*:N~LrMNl"tǸ]+QM,:nklBO## 29i Aia3Y AeF%XRd!"R_6Ɨ2PmH;w>pGP.&yqVBu NSHY3?l~% +mp̂TCBuCnUe*`:6> E8ro_I{ui5G׾9861ouQ{#`_^MJJ:kVqU/Β!S7Q#ZJ"d!,LMyIhƾ2ۖnLm)CwA/SuFںuBœ߭w?Dw!j+Y@|۽ő~$5m\ф@xo\w"wNϘ~R76{Z٤c!о *:,O2>=2),;&YMF(4mډyaxizL$mmsWczB@6E\+ p!7=~@,۟by p`l#) m | Zڞט&堪L^˂㥄lu|^Ԫ(yPP0ZWM#ܡfZ\!8ju9?qWsθ O _<"8a=e=fv)'^^3_-I V09cC8vr?%-=F%gFkDGJOlaEJ^Mt3]./4@@`r XFKBQzE-|̦ vJbdEX֪ܺ at\>ZoZ>Msd2| ܿ+mr&~&/'/D\('ѼN&;&1T,P!D0!mu&s֘c\4)*OB5N qhN4(~ӚM5 %QQ), {Sy(VkRqxE0gw^ vm^h` _8NeŷdDrTEATDsEwqR,L>H AQLjc<>&u/y\.ٸ,]e ]Nޯ꾉zỄa͉AsԉBЇ ,T9|%6kg&a+_'a*H| ־o6[tzi;o(PR$ yWy )w`Jk|N 05׶;DuɌ<_# DcD;&{';=[L<ᆋ;QJI%Сek>4M,,}0eBaNsByEQc&7n(ض¢AUrbBߞʞ>Q;gтCay'֝ kJZ?Q)P RcjA]Lz^} P p68<;(?6Xwҭ-[ĥʨsڛ0F$4ޖGs;A@k"לDed[P9diw-9ՠ3X_oK~ZO҂Peq/˧.{ɓ ݥ+Jy;}Y.}Ɠ:&cm3f7ڹʠ+/$D)Sa*O"Ma4lM V6nY+${۠hN^h* M1UQ^"N-/7R ҨU#5iz\Q;ߤSMK9Oi6dxƞDA-lٲ jװ^6[k˸,P UxQ=s(Q!!S'5Q­P4wxf3rF.NEGT0DW\ {S{:=A3W9VV OҙԿFZ>\ 9)RĎjhkc A=/S?Iv3O=E@jD vdIspq~߀@4~@41dNMQQ{@7WKgP:I 0d8}BY* t)Dn @> ,ivKGFbh0G l (!uo\Wb Kp&.]FE [Jխ=s6 KrHSDBjԅuNʉ BF Ɏvu"YfQ o p3#<%+sR"F[x_DR.%'M,w'wNEW6B."J}E+)'= >/'~o*@Ne,+R{=!GEh|w-gط>x 0Uo\ =+GM.goS$7Lפrb>t!n_a{'ZO#T{0j8w2[D=9oId* mAdmo<k*uqPPsbeO=+f+t̽poAi :'RVeKa~ЫGؗ2Vb*72>ШO<U\e1}UqEpEr+> K6t!/j} sRcVxY^lUMh:%\~%EK4_~\&?0aG(|3ws]>5݅]%!8`K@zR@nQ$I!5sK:1gU; L/@pn2?-Emtx #,)#>[[dJ.e Zx+,V!KX`y, QFMՠ>3]l 㤯$LiI('6sִ_Nס#4#Q(6^ O$~%Tyayl1JG]Υy6EVŲ>fpB?\z5seIc[6?J6 ɶ6}\Z$-҉&pkk*j5v6F>q$$:G9zr":h;<՟⺽IJns=ڡ|7\sG( ^Ӂ bZtWF.?ڛj@0RWVi qSVF&(  ݨS~0PD^xwPW{pdJp5Zyc񦄥V40^ ] 3@iGp:)8yw_׹W.ҙ&Z=Tn%}TF+9^*CH1 9)^)/ᆵwDc6l uђ!f7 /6Xh"?oM˔SŤEMAi.sʥ6X4qd &ă&U'n,rpwi!ۣ9Mm, n=OAӾZ8;Tϟ*yЎ_=h<^AT CCo\}",N(@T=z7%̱*V<56@ݦ+_le1D‰G)c<4s$[KCz$bsV^ۀQ*!{=D ;݇xFO Us-6q맷mUV5 X)X^8; = ޽ցwG:.iq_X,- _R:)X>L Ԡ A~0kah9SZHc87j0Ǯ%Cd h(\[&8iw^`wXx7GPEkė{ֺ74CXq-$ ԯ$%[{qGC5ƁG38H5̮ڨv$49lG؇_w˓z4`@X8\Yv1rJ0Ĩ+w!.uHyζ )KPVxR?a,̵p>5r4[7fW/X2ݨ3G~*{OMpu/EA."x㻦ޘ鸫P72l-[MKVf0/;N`y !M_)d03!mͧ\iE{zgNv< ?V.L:+6<8 Lڕ-JnwiCjA۹o_׍ gNޱѣz ~_GޠْmE^rX*>į(!^^[YSfΖsSWUY_2yYe@"%Ӄ򹌧n_W( |dYDooVGR!ޘ ݅)ZY#DR8rSx6”޾ wI2rưev{Md=w6if Y4~[=6Z™"!pDx{fN,Iل=K:MaJ e'{7G3J%$A<dW#u!`dq\l?Am/ Tyh-W h:1f^4> Eᢀ5{8;wa2<{V/lL`yo~%O*>QT3LBX =E6YZa#?cbXnu}dͼ FJ}Eq*^ DQ?}W3(UFaݜ}uЏ|i3@-8DbŒ=SI"I/kePiXApNQ^AsUѤE6buaYx{b\^+; 꾹 VcE$XȊ9ØPPLi=0O1S+6DNnwq3NE6^ܪ{I>8[ [r e_!GkBL:M:(/St{8^bK@P@bf&K=Փs@С3E9Zr_XIp |!kbU Đ L6K6J2#J p]7Hx)v&."V EmcK˞[~MB\ÄNVoT[.lZD*xz/LAٕ r͘{=}Tà^˚L"AN0[#F:.F7Δ!*(oWbɯ~S]~;o6%ݯw0*,oډvj)SJI]L.Қ#Hhʿu \?Cq%+(ti 2ب(G^gD08׸YMn/ e&~RFu,4D]>G_ J2Ŋ ^Cĝ&9Ȓm-+j=W W]J[pQ?Bp0v%A9u k*Vֻ2X|'L^`M1Ec*m|!k xWl۝%! Ҿ-wei9Ny]TY`s/C,Nq0vk܏!O;\\wOIC 8Gr.kgv83m2ǴG2U3 i'U-l3n9GXYo jNF+pq ǔ7B sx"|!Iuia v9EN5k\{Ъ|#GA}ւ'',l%FD=%Q(sߩZҴ2|M{F*I4 m{]\̢0[Wb]z"71$Z6iq.I W ]:;AYb|" ^`eǜn~ sÉmړ`5hx>]ؖ+sE::@ 6A?Œ p0{6=ADV_%Wa GTI3"X39ʩrY8|UA\dTW 앯HS+S6Uv<)(1Lr[խMfh=# JĮe N!m$߲y0#%L*HP5N)hM0۹%,DUG}r} L<\AWBlNO(RK67˱X1:!abjWbjJ4ezJ9/]"(S׬{̣=Z7Bw 2shQ8nCgԲ7S5BS,C1& F\lD,OnrWF0QjW7 իϵr$ )"ĥ)5g9#gV@nSEP W!Ft%{/SuU_-Sk<2OXڡ(7ֱ9扏*ZN'@$eT,c`&NaWO^|R~%. g4೪b_AW,_0X/OFqQҽU돨WYePKF@=-i`G _G[ N6#BY4qBjl+m5@3gM.in .p؃X6{IX8ŻO3>, 9lHEJz'MupьkALĘ"zm)NqRVDF$/bb\XnDi'sXb!#xivpG-* oWC";2ەȂ5[h4`QvQrJ~y|0\̳]KW[W[qI%ul$,*G[Y"jdIN]ܝ&;SQGjEIYjE ٦-JI.8" jNcw^>rC5'im{YGbV:Q%2Lc|J,e rx2nDM64ldhf\@֑Y`j+X{p=2_eŶG5Mǂ G{ >Dke: .ϗ]+Pm8 ıOxCA<#8֏Pس_TcH>6? ndyTF˾.Sd0g؀["nKl-a5D6)> {-(=3Ӧu.G!05aDK9^nklj ; 3Z7YM&n>Br.L.# A씬 p1Fo}D@+ Xŗř㯺(XuvQqH$avG./mҼP SniX>LH76nAPb٥tВ-=yCWnLT4}Gc 3MtxOZnrU9v%Hc7db+DolL^%PX O\7^Q/Ʊj{%yL{ 6]]2黰0qJf.%1Gyc6;30V (F4k5n+}\PX٘`_ 9-bώ>rjuH1drRS[%J?%F{ ʍ#e~&Զ79n{s!ЄAǼўB>%Yq 0r1}vԏ- xޜ/5Q7K,wXh)&QTi~ \ՂJuQ"} j]e)OMpDAO!} Gyw"߼cF +וp8+RmNJ[RcD*,Rf]0~za>n8NBj:4"2nC&o} INje`21QzΣ ܹPY\ =5FoՓCjI蚭Y_`p! u&wavv[}A\7ѧ4"XRpQxYrfc2쫷>\܃J"!!ÓXNZYL| T|q٠*dA ɔ!yu#Ly>eKh`Fbb yEiFEf-h>H]I4s>._5r\X{ ٥;%'h~W1psh-[F^jiٰѻp %INs y%-2|,mc>/xdj!(QO7~R`.Q;&B$fX/k@ϙOS7?mqdחU3 4cY m6K Hg\榡,2gRv#: ` p#)@5&Wbр3^XƐQT`![B7OAN|0Ee8objӏ/J[ 9rnO~N(fYi7} ifM"HIB³BaNr2َ"7TfNSV:/?ʚޭKLwm\Sc?3} G].c "D Z9qa,a;.ё%RNh")0)VFdm̬)щ@`դʎ}4N\kXT\8CCmڴMTɃA0ĤuʺF}lƨ8ys1ߗ[ x_q ;^‡'@N ^H=0kpVj?YJ>ٔhQeiQ A0PDDX_iRWE!$@;g[Vq>xA&a;Ƴ? HcCi^i҂dS&^9bYkJQ@ܧ,3Y:s?;ͯx&R!,Hc[p8}cͿtk7ך>əljexK?Ass&Ѭgm͢fM =M 9$f}[Uw_FԻVVj;BC X|GSRvpRu,-KXQ ~%`76zQ4A#V&`fpJyBjGk&hs2BRd봽0Zw?heھGח9*}kh.^l`=4-4Җ$nr9Wޮ'SC-(cV!?h4k8sTX3U-4,73-Ր7*%!n.s9b/MDݟ\ _@Rhƒ &^y;x˺&V \sLM6FaRs1룝4ؗ G+ 1It=2V!8nսU3zVs P%pL3a-DŽsA=9L䵹b(aؖzA9uy-9}b2׷Lr Ed8S+x OwKET⽰%]5 t+M9uoӺx/5VuiRnX^.FR϶ ?qc$^q+Ϳ)AxQ6RM!m}8)Jlnpk<3VS~5c)8}Vq{w*OEuwps, ]H< ka %{t |lٜt0j^A`VxPӝ[)mC3L}ղk >J༝cu{jYRoT+kZ})"byC5=.vlKqNqaieq t?hhAd^.! ^L!_whש4xJϘё7J q.g6شrc.ζXB$%az^ē3o3ʪU͆ZCnp[-Q;TòYL47!Y_2FƢ{EsHű{gMTE+t' ,c,3.NalgJ6vqĄ1n϶Bej+K&@ȍB.A y'`KL{_>.6LX}jNi >ɣ_Jw0PLu8 a<3 9zF 3MI~p^ar;B/x*j:m {p/ ݯ_&E .ӯJ1`ǩ|O8ng"1[}5n,`BATzd,}x1zYU8f&Q#׳),D򘃲`]"0xI$Y6}](ph_=.k-= Mr!+IE\S6){PXp;`+=|!׵K|j+lnW@++g?Vt`LN7OXrN3; 'wL5JLHݢb]] 2J r>]L?y#&+em%0AR#݉6^wwB(M)9. cJ)CZ{Ҋ iVpz+rɊTcdӤf!#42g)_ hG7 T[KtVϓ'ޘ_ n[K/l%Wa>ePF۲Aiޣ'6k"f8!*_?x&CͶ3׹.6챾^P,ܚ@ltEdn,+aOHzX ]>~9:[ @?U[IёL 3"{d[ ~&Q񒭶`҄Ʃ å,C{lh $M"ipp_bDJ xA# !R*VMj)$=9gS5U!VLOp{ C&>XG|!N|Εc:^Ю $gq|>҈+C笩H?lH ^Dl`74X}$o Vu}$ė$`TĻrsN?>֜pPc\O9߉nA t F y%/.^r<5TerG.D m:UgW АW&?4ł h+Rs`Qzx+2SmNqU:OɈ OX!8Uw-i*vt,a c{ۉASr%En@|Gt>*I1\Gglwj9i|9拼NA `B@T+s :]p>Ҥ ٗՏ:Oi_DWO*{uEЇqbZ'守Cx ' F/1O@lOyD@7;o˼|a{yU0IVL0,nIu*n9PBf4 yslЀ`$-*hFu+%"lbb `?P2TƭWW5Ч~E2G1A1F b ;k߷pmwgm_#c3{]d Kl T(]!cw[y E((S(ˋo ^U6*jQ(JjGכp~K^ R=n>ēYЄwOZ7ĕzrLH2D| /R*Ѳ* {FڪAu]N:LDl֋%,*9̷kpֹ[>>LDžbO/q>8l|c[ U]fF㠊zZG>ݔiX7d\ gQ׆#Jc< ?+J"WΙhH Vg^Ӎw&]J 1 N1F dˏu>Lv V`={FH#@y*7n k9(A*;񗗀%*Ek=O:N9m rI:瘸&`OZ/lb! v6ձ%X+ld\#) ըg o~M,ۮf6NPz̶ήQ]߈@ Y0ﵪ;ZUMJcYJ$gRKMLPpwAly?# 9q&5Ec7g4/TZ*L44/V[b(!=i$@EpP[-#e&q Busq x. \ I:nY OeiGvN--{{a+R}ؕI2~~:iyr҄ȚuKcF(vfbNKX$[(S$xP@wqcN#Ր"ti3VF0D{-I(T 3XiܒiTLp6zB0wNkAon;. \!1Bf|#tUщI=R3o\s}CJ\poՐ93)9uqPl㟪~Qw֋0ȡ6^1!m l:[\ D~ :w\X/+ԥiHm\4: c:cTMC#ܩb{p$ym~ [ie>C竮.eHq=JzJg&r=)j%GH8ŽXG+H0POp{Է~1QQ~&nq:"36VrIԹ*M++gΉUs_CȦtbXiv>ͶPf%)n9ḰgNH @1wKwCz>'+RcџUuvLuʒU\Urvv8 ܔ1ʬ3;n?up7(.N}J |㸛fҠV3LC_ ۽&mW,z:/NXspE|+u;n /!RD8?=[,9F^(ySwm>LU}&i2:tcScw[|zP`!u\c~("`-*넛4@I&M{ـrݤ"wj=*Y?0F;DtB̛"iKN)ܴL-R|Yb8FKEY%fXwC?2CCgo,Ε^2I>C,m1QjsTc%r564|xjC0vr!Fs k\g`D:)ҷtFEz-&m&GΥk.WKO$IJ>NNsgb)ve>Q͛QqӅAZe,uybq;$*Mf*9Ϲ"aZc_#]*tj α cߥ5*t"T.m}J$ i\bUJ.SRd;?b6@?~$?[lL{+WI{ma3U(JՖ #ۛO<~guHﵕ9qm_p}Ϸ:c{Ōz_˚Rb@Uqy$G%- rh4(܏*opuz Yr˘xqV䣐ƣz 0 g;ӝZBRx3 t$NV^pR2a}?zh`~qOz,}mrVb ɺTd1E-8KwputQ Oλx@`">`L/<-t#\f)g{{\#Ʒ`_nn*x\8Fmec?z|:c*_^sWe~[w"<92Yt4o':sa ÝfCMk|7Si"}[|h)W &}-Dpm0xC/L*.*cɦ1">]0h ? )Yo|h]E4p"wF28m>Tu7|[Nɔs~}wDjj^ 7a#Q|;{)Zd {B5<Z֖DO)._jAe%Qjߴ Lس9uZp6/ 4*#5OaٽlVi~=Iݓ}jG={:8=A! ^©S,jV@8n6*ZQA[w?TH%~YyᾸd{6-ƷZ/U$Vn\>Nޢe  z}<9z@xjgXq9M.ӌgvu79'> "EB(;aZ`lb/Ǒ6XM@N9(ىIv($a@oD ap3:`JЄ),{#A.)(-9 1 po] k:+v.UJ63ԭGf CzQ|oRB<"yc͂y}/ΕYvon K0YTEuu. ĽsLX\K8nln#JڶVc5IJL2qOW\F Qx"1bCWaRv3lθQv &PmivAq vfq6 =~@}QHW,;|ۍ-LOI;ɺVxtR+p?gn(2\`NFj<ʥѴ^spFDmA6`q${%Ԫr9KDύ9 &ȀVe8<؎>C?]_vZ#p8[V<Ϣi7o#YKys >1~tkV!aNRx?[ x`'BSGG+۰7[S(>T19]ٕ)t6b~uXV@T#S4_m,F&ɾZ܊&!TnLj ƟWCt|g$r_xϵH}ˍ17õZfY߂o>fNnh_"ap~2:"gagym?L;UH$pBտ'_~HF>IxYJU“%`*wuѹ CVd"e(#OaW;c##Ck:z YgW¶s w(X`_K9**@\U)PAǙ-nc4|FKc.#3Mab >ʇ&˙L}2Z=߮rVgJ^X=QuUBu9\@\fFD5+DswÜ&GFj'ӅGDq E~mffuⰫTf%={=g]](jZ6IQtᬂFcE: 6 (ұf:}(pml[[ 5:O-Y-Y[sC@cQe+UV WjjA',Q% _H6/,3;PlA`*tWTfA b0E/ QWli-x]n|ZٮnKZ%%U14p,f}9_a#nj@߮ τVo thZ/jf1ֿ`ҩGC%p7eTSv:yȇ҂41Wh37=jX R < ជ{( Aem(2>2NVӁ B䁳Y$g S~1|:e_Rqⷀfh|CT,va{0H.Eܝ03Rd "_y0Ukf{fXJln0C fV>)CHNY5!4Ϭ}S9',+6߇I|#<*9'ps>կwWci}1VN- \J]dEO$Ú.Er2,a$tiniJxO6ɾH~M*@}5WNzglXK}Qa7A꜅fd$NgA7O=mOʾ5$j pHG# &w/ވKy|/mq/cHm{Ɖ^RQ9p(_զն:V ,n]?5/X_ͅ狏2g>Gm7fUEwnĽ} "X:?i7c!WmEծ]yJ^{"Tsom\-֨oN选$ ݳ̏M(!V:,/ .ҟs|o04Μ9 |]Z#œ͇%ib#|TN KG|8#V'}?O`TĜ~4L-Jv/֢er(MD)%VrV4,݉j]fбG&b);''fc+?h04\e2!ApvƬn3 N,$CZK2ѪāLcDK# 7y^nBho) +$ pox-uA/^->p "b(]-{^wKɃn[o3${:"$d*?0Pݝp L /;aߍ?^L7-iTn?rB5fan=xo|F=N]%@+vC0+{C8S0m cZ ^/ٴ (ݥp]g,mxhPe[DP@#PsTC3ʶךl_ӹgZ+f֍CZ4akn40T4茿F/oCA9j;t~'G%Yͯ '7 vR"494z;wR5Dϩyv9U-H&6&=ɘB~NEKh  kd@Lw>.Pomz |Г’9UMNݿxӈҶGgy/@DYcRrDN!I"{;bPcuJ Ys_דLs`Aӕn*!f(65k_b|_"u*c-W 4cߩ}tRn>m}r_+iߢ4]@{ V*/Ecd"JY b]~t'.3/0.qJ8XRſwj"smόcN97-Au%E&?Oϸԃ[ŽW ]"ɒ{8e':I'*P%uUCDX%HQgj )LAh*v5j\P UmӌdeyOuJK@(#*8@>8]%KvVjB (|0b;D[n87ar=T ]#B%OFaΔ?sTf[0>$R!{B2X76sj?Ȍ8J*%Ӈw? `"mP!x]T$$ed񢂦m`Wy 2UNvm y?_|ʹ} Ur'Shr9Su0L޽5fNg]KZ$ԝԒ%LRG nxGS~9NK MQ† zvlC bRעwrn4͵ym})iu1$';rL:0 Y >mf`!eF|>_< F7ϛQ~=֎B,OwԳ(iO]8ebbut>|1߫]LZ>he :q_pєp[rdX 3|NY3 :挀!W ({u串UdQ:tީK'[FQ$D` ΂X)z Uz^~A*Up)P^J\i֕m=]н @lSW0EMluXXFp.3LYpv 7?I.ڒ'z8v5aqEc.nZ,iȎ4?68#/~}BEҎ>tǂ5z;thUԫs,~~/`(0\?ڪ{⻦3B' 9$uрޱT`z{EY! y T63^yv tV - _` 2aڔЪjA{> ,ϪSǶES-> ]9V9 `L 0oc;3f i+L O,H*:Ă՜p%)JnSO-HX;>g:fU?7'>T@D 1y MTٯnJj5A؅aH˔Fv 7j;ū-ՎL wMH„vT$2ᯬ]H0:.zt2s.hߺ0}⼝o32@fMM,@Ky N?*2yUA`R7p+@0C>xLSY/hޟɅ]P>Xƭ<H{M\)IE+Tobd%/T 3q$&$K>5A){:3ŴL&@7+O@'Twh؉WRYx䓘&NНJzVN.9^zj{GSqyF oE{mm:j[CXggs 2(AócnxH=4C&a)[91B3%hAS+`#h p,@vD*I/+V "睌xfQ ` ecOXe9b.1#Qû;ti 4C@R[.SkoZu⛥\[0P˚Ӵݩy>XL)M3PETvd6J 9~O 1 <&V*4,hw59UoªExPh IiX~PKNS~'~611QgKAc^7?CSþ~&= ھ׎NK*z\z[GLׇ\ cmxḱew? }wmp9"a^f޲2frOZD6H࿲vq 5Y௳.܋eU4@O#=ԕ^%^oFd<-Z0ȷ˥7S%t̔e1=#"HkqF/j ZÕsZ7gb rC|⻶9XӾmVu&?R g^SbQuR[UG)H])IjMf9ˬImG |:{̤`}|ބd,0oduM{0ۼzbu ,UEq1aGv 5 P^Y|N`&Gs֘]c3[$~/z?l?},]JrwKuR,3kxKpzeAci9[{JQ;HA]\J,埀LS$6hLY.(7:\!2 P~w˒?-s@JVy?8/l:)8qHL$pC5VڻplOp {zI섯V3ǀeiu 쳢:ɴM ;wf#(k8Zh?;DΚE_ZX@, `HuSn}0*fz>Ix<只0=AN{>ђBfo,4/Sa鋳_ 1a7S $9ggWcnu:v.z :5׈Ynia`q C}w`B霭hr{o ,hV.?(v=u|v1mD;]u} K9ZQEϡ쑔|mpfr*С!aH-8PI-mY 9y-6Z98m fKXW\u)%e89@>a][KHwFvh$m#(I2 t }&Yl#k#TD'x:O'A 8Y18y8%r}J ^0je+=*޷2Kzon[U)ő<8+2v`Db{#xzWߐ*lx&[u]rVVSo{$Y$gp1mԕ-xvY9c#?@ܨ POT~?qU`L x<_Syׁ[pf(3Hđ~Rɛu9G8Pj" _#pD L#jǞI|&+(YΒ;V Axyк4noQS,6H`I,hxXQcb/)7n̡"̢̙;+̞rg4POjd;YTc!'6Vј~vxp}?E\V7Vn(؟Mِџ55?V0˄WJdO aI!׍yo8V(0]4Ia{i7jIbe{k>E}Lݠ l40Iu/gQ"_TqF$ǟQFuGT2F9ن^!ؤo)v%%ml1|.^Udeszؤ5vZi:_X/æ4I.up׾)Ua %d %[ʩIYa_h(\ΐXLq_: ͞ q_O4q,?z.11&ŝQTtx | Dib<3ph8)q`hѼn"ԁG5)Du&[c1JsN򢌧+F׵a?4Ɲ^ATBg6 @Rһ>i fp aיRﶩ4TIyU{ɤ F0SmF&Eθ$r:AIW>%hܚmSGժ]OڣzޣPf m:D;TտS$ӌn6!(o|Neǝsy<Ȼ$Bǯu$Άdbe2۩7ƳAebAhӭqV' hcn]@98A ~yΑcc/Be,7io݁v-FMa5)aߩ3ZM*beS-^_[ʬo.S Y564fPy;Z?@J2 *eKP! "P?0d^gą؟Ry9xUvEG4ƥړ-u<0Õi ^]~}ĭoX `J75"y=R_u4SX#%#<^706$sdnpa~Ĝ4)K)vim_l BC֞AuY3q~t/$k\(XĚӿj4"4J}ohTV1e7PoT( !cfa]vzcuZݟ)b%uE-IVvfaB1 eeEHyN-']y71J;fK+%nn Nadw5^塊mRBo>~QVT4%0f)T`h/ /mɺ*V ⢅/%$:hx/νH]%L"̧~.h R- DqMPaS*i`&@^%t𔓐7eVVbA? @?,>?jܷil(\$eZVd fWN Uңg\ unS%[-x>/wj,o^ |BfPCؗG1BmQY;ɶ۪04ZU:؊s=S^_+l~_wIJi1{F(%qCL HNĬ$kf |k=K*>*9 ]-Oִ84'wSmGS~N+)<N0_d ˩Jw7N6&c1LC[{"Uu@^b뾤`嫻!n6vHw`ZX^O{" bD4C4Xi*ɩ%Fe"dY^>Ql}i.fU굧R ⵒG걪4NNSs61R赺*So t]ŐS7C,4*֭yD#uR꼝Yc"^0sj2xWZ\C3pn+/A )<*[e r{lŭw">,|n c4v7?HZҙ7Hj ?VVę̂[?9aYD00fF[Z!Sި [f~Rna*%p*gHd̲#Xixn79Ҵ ,_=}5!Rm3r&"zgMo9\|+g0B):\VvB,ib-|7;h:- BZ& 1*s4dD(%KR[\pUJ7ݯu\Mg!K3cЄCW~mS%єɡ.++VQ.DTB/ƚ6ʥ E*[;z0w)WSz}UAc@onj 9X5ƈ7J)RnTafo-AzHFet-DwVO1r m b2& ĩ<3 ߒͱf`+ "g RL87xFn6텗oSΧ11]q$N U,Kx YbbK~ Z7Ryi4cs[!cRۿfa "g~ js=Kic'N_|Ih[ykkR~G,@,'Wa;X%@ ĵhJaI񐦂2#$&zS[+ ]%\`:iKU@ 4!_tߌ(ߛdV@JsFXB>bRɸr>g- ܢf`#` uU&YkP)KobNFiLX8vqϩv2go=U-i1Z$ 1 7xR4$×}:$".Yw ($y9.Ub'x;'j&ӵ8!̥]K=mJZW;aXp[=< ]“XЖfGS!%lYR`MEFg2~cy@S,t%6O4&Dy:.Οl/F-Ql["t-ukMZLT#"3ŗOdi "5 I[לXN6a0|)>ї?,, ٤QId 7 =;c~s&=uD^Ug˻y"HwyWjbѵִ=7' !vKRĈx3YU꺱5cʮcz"f ݣ9Gсh_\`ˌjwR˫K}IQ\єMI}- TJwju71Fio-Uj^kp ݷ0(yfrw{m0It.fIO&ڣY9>JhFwlY^ RݧfT:'9bWS`t_;e+dm;s^Ut.-f.! }E㦕ʘxŲ*15s:`"$]x"T"_tO2zzsʪjpȕ^L‘r6 .`0^78+&Qg#l`"ǐAf jJ6D?C#ۏ5'{h*$SMrs_Vp2{UtqVG7q)Q=PG~d J,c#c/j $v'293 cW[6I2)49zl<]eBOuV#Ab2oހ7x~zT=ufuiBE fX$Ɓ u=pF?Ɍ^0#3.`%(UwD0=mKbeg+QFIV}) oy}m#s+cQg@_a0Ғ;PT볾 ON*_6/hd"2MbqZ]0t6&^ZPPnX!؝uXKg.8 tR8>8M [LBdãle\  ?&հo4S+f2/y8@57zi{ "(IE)/VEGD/V ᚋC$37N8)Ȅ!;1R}dudW*v(>7c /|`/0p o672Oݽ>Ce- I }C8 “.Y\݁:s~\3Ceo* XQRw7Z1^6p³K71 \Ёd$X'jډwK0gLk<7;؝gJ!CТs%05y[TW`+h#x>1%u&S}POX<ZBFw,WlT8kѶ{_%JxVjAF?M 0ޢ$rj+N; "rQ/Ou}8AoF`ս$B"lRڏ@yOvi.'ŮAp2ZQ7+ǁ-֥yB+J(?i]DU|V' E*a)'@.sOŅzooF!,Kwɕ 3Igu]Lb<`]9f@8gR TcrzLf:9^is}iM'ϟI4ݍ*J0ff jд ?hכ4 y׸OGm;L#[޳뒬mO͢DBFlhG%HKm17arSM?K}F^eT׳4P۹ sEi ̩xXe Êydt=ڈd0< 8(p묋 99> 8 OWQ"q O❹E25.$VHP((F0J%2+唓/dz#=gVL W<%p0٩`P}i$Uhdu韢fu޳*84V7Qr'W=6wwkv"'#}gn2Z%֪cԄM6ϬyJ5*f/gGT!If_}>L O"DغP}\8,j;X?Н{P,s!t~qT]5!8{f"1H'P K ݩG,!&2 ,c%FebssSVmȵr xռf?h+Y{d\bW\~-7OTw-aua21g~ԯJjW99Wj9D7&(4}{XT06"Ijׄ/Np+GV59LXqjRg|T!3&Ȣ}}^݃,VϙԐ*31hCn׽W[Mh"/SZV43,~4C|n> WdB6GEH!u3`@S\|mq 2w -f9,)̈Ph]p4 *ډT9.Vݬg" BG$.&+ ܴ1s0pWF?pB{LIOfG3T6JhTMN%0j(8Gn|Cce9F*HYT*;ކ|p59鰃H[sH8-N=&WɈWC xr$]9iʎ;y5$PӨ:C=,\6ɂ4Қ&9d_>Yi ~6Rbߏ38,R(65$.c_r&9K۲Cpu*1)zWE& *l&Œ[Z:iHg)E8ϔ8ݑ4Ma|,dcȘM_w()VQ<_>Kjst9e4uC0пH,[ΚF6$ <;9.uNy_\$qۦ NI$oGoiY dh*N߳etDV]7F /FJ!LARwX&vFj5xb&%2{n +HE'cHآ,TGL8Ԣ䮸J~{`hQ۠'G<ƣD iv'S5`)z?TS[8g<¼d2DT*VP_g0 kBJS B}rTGQ)q  rodWoQZAf-f XwbFkV[_$Rk$1U,#S<ߢ?r8"$[5&Z S ԩ.ّujo 1C= w4u4"d[ [m2+2YK=xeIw̋kuضN/p_etOHv.F"oe2+w@v)!Ӿ+*C|#I䞀~Oi>Y]]ۈ@(_yX_Bj3B#^gTgϠA*>0-X~4y;$M$>8g??,]6I;!T-O , [ > kATP;24uYmLle&@#%dW:Ww,6DIJ-:wU_DGrDG%Qd.TCd-&TKt+D3?VT{+ǔ,WVy{`DhGspocb.}$t$f ;xja3)YOkg7[*Zǎ_]GCbb~R vNmv:U=j*j9nV UdVbkxD߳6y@~ qX0&jZt)" Dc.jhR%@?saHK˙Ev(s[y[{&bF@pM P W654[UevoPj@f+W> P[LKr,y*nw߿sA4ٰeuT.x)|y0!.C-qd NX#)JCϝպ%AM*}M[,Wʇ[!DK/Xip͓/=Jkϣrh~\VͲWi} Grۧaﵯ],q~n,,NSIɆjTS ;pg8R9A gސHX@m^1Fm9Ͼ 4/kD,4p33W1%eH7D "n\X(˶e qf5cL2]dtfu:7ó y/vsj[ʚKW\]F|_B|mXi8×BPԄN?%shÄ: Qm.+)CO1xZ[Bm4bmw}*q/RPɋ b+5%zÜ$X`xf$ZSab-! R6b*4]̿H:zюe)K3(gv C7*d"GjǬS sC:'FJpW!8Le=B g:/)JEm%>힃[:Yt09w ؏", \( 3Vbl@z1i#13> Fn }='$?e$TI߶>wʽ- QO0A2sؠC8ǑH9˸RR @>6&Yٵ~h6pO .b;d%t师@=xhS@QOjJV}Āz_5.M\G="Uc;98η`"$䚦nR\/vɍЎ304a.ҿIT=I0R3U!exy48<S8TI&,W\FY/|+0Vpr/k%g0 5qMʖ]Ž ;{Xki!1^qOvPЫ+:a#gGj:ZNНr\%"&3A ̹"1!T\;{GAHpAR<]q{|2"lX<0LOFrP@are!uxu"ݸ*uU}'HQA̶{sCжm;aj3o> $<~| V>`ᐜѲFh:gJXf!2F ~Q!LؘDhz n$ g*Q/=YjDSN~GH_}oژMXxVC`61B&P  24by<,[<(r ,II_ =ksm\(s?.6RVwP6I$3ak~ܯ_Pc7ķ |>^oeqC,r>9&ȢtyE\h[}U=V AI7$QُVYςn`J* .@ʞUZjêdPyIߩ{zxTlTpfgcw각e(bJ`jtF,~%wCj,oG X" yn8o데fLifڊlӷAg דx.,˳$)O|8|2~AR D@r0n>fzpli!?8O:qQ].^W 9*V;Q-{ã^侀sIY2"?10'NgݟS"C% +/av9{+, 'ے /g#d*?;WZcMMk>/!_toO.NO:Emt}@iVxՎo%ALyז(!6eꑾbG6 |^u: Wl@4X#=*|:8]02O}Oaj/B(be#͒ ӯ".ƨɍ!}|w<}BSA{5 fJ[`tM4i# 7\V[xkK&vDʪԸda+~S3ˆ=OEs}1дj4IOSokY lK6P1PBdza 63QRi|P8ӣƻ/7ӥ™'e~rb!AȄ~kRwƤ%;_IBYҝx\s[A4;ׅKr6PPͱ$f:HgLbCS ~JQanlը,DmمrFZWmޠgNx;#S4׾up3;G9|/8ijy4?^BHu:-̗]xf4w2}F3)P0KzAp!ZġCAK}/K `MfdA7B"=_&zPֶvu9,=xI2Q!aK o@SHsjm"?V pU\HTFZw}EFB 0[>}&zxAA|uD-i:_jIHe|Pa_NƜoWqm!  pA#wmL XM[N/WDX^e͚޷&Ïv5x+=qP<Ytjд7q8(&Y` ݫiJ{A3x$~K_-@i* LT ;~5촳# )]l|7=uSVcAapCvSWаn0ηFw ;*^nݞ៟2kIj$@`\j%/v͸Rg``/BbfNt-PQSæCn@X8> E*:T5J\ Z*>𒚓Fecûļ%,' `[9_^k,rZOLW3xtj3e] Z~lSg!UH3洡YV,rIid'9FⰒj=c`]&|hh+jrz؅8MN$M/9Ix.3 5ŗl) K7؊jBm:1ģx70Γǘb]T zxV P֕ u."B6[dOݫ+!ODTxH gm4Kj]s^}dC/׫cG=i[65ΫZ]2?έB-SuldZfF7bGE8sJ7Gw=ٔ)ǑsՈ3%ҏ-+x*XdKH3E^|-B㖼wݲ_nK}=|9Ȣ"ϿF c$Wl'Jov[(._;Qܳ>T䢷pg$[_U ꆼ ;[Հ=c0Y=9ߺ͠e ] VD (wBdw tiB{ws-=7HHɸG^\>@?A&BgHʊYmn<`dA b7}# z4j&Y(ۜ&l?͂ᣣlۇ+PQ됑].AY (@zmlԎ3 {r3c(uI_D]@.>=#wʿj E0-S47oz>?a0+ConQu]q$0PqQ#%)$pKF.*՝6Ύjo_5[\eo*G_7V5-ƭ*&`1-ToȲ{pPjfB21KL >NN% }1L+[+eB`?FAb{GocCvMXgZLrxm$0X+ί{N(ja#B>q 7SsA,iuQCo]}@&D_ k֟'}@Aqp]IT`R4Op4 7< gf|}.Xt57[b },~x.WN\֡OFʃ3\Z\j!? A3q\[s{"eJ,(H&Bbt>k:ĽβJ02Z3 xY#kֳC (%R2VhuTAuUӳ'D'*q @bB| tm-ӄc*z&(!pNQ?mҝ:eBhD$JӖ Ft?=3'r&*|>CBvNV O{Ncď] Y(O$^ua )7 n_*O.h|L-]?:)UڦIcDm17=G{rQ6FOyhVq0gf{~Ns&i!,B~:I\2KqlX< >k<*}9b44p(E܇a*@-;Ak-Q7K򱜇߂PZБC|Bm[) f u#BGޛ=(KuBЏ<;a63\[bަW0Ƭ1)0se2-ל44C(P/sT JOq={F _eY{gHxLfYϧY4- WWzr*A_6Vh$!wxHC{ Ly0IsU/C|qM$YtO['ÆrIr^ ߨꍴOP4 PT2qA1B%\/{r xsA+3T{E6 8W*\`Fz^w|s]0TZf&p}r.3%hH+khXm S;;y=tiUb1A0&7-WgGLIz٫Q)\m6mF6M3}j"(`} 5pڷk`";DPU3IPshWٍۼ, /7ZϤVJ,\ns]픭Ў|& ID-߸`V#tjJ.gB5Ia :5R`%W]B/[e^ޘTN%_v NKbT*wp-q{eNWjR" Ԋd )d;v@6RІm~'VV.gCsӠ 9Ydpa+C.۔d ZjڒD/s~\0vF0pn (zFj`SQӝAH aHR24= xЂ"mWywJX{M؂brv D!7T#C~O3W6Gnr`>"SPҳ?P%4Ƣwa!@ZuI!gA 5Ok PfZ9ng QgkIIA@1VP~"9$v̞H ĕ➛,I4ϬwTK̏XٙW5EżQc1eC87cQooO3G4#b)ksv'>P9Tʱ db8ǃR눌O̲%MՔꨀs9{%"rH@Cndyː/@\XvK4bYƹ9O?mͱZ7_E ZvGFC]DMi{)wL k'wkȎBzƒe! sO5jN]q #({.;%Y9mnƥJY,C~Ԏ(VK^+v8Wڃ0/ VE_<9J7ggȤZE<}mLY1L7^=*Ɍb`Zrof6:#eHEǙE>wX`-q#i佣%~˅$ͺ+fd1i&}ttva{CONB5_Z#f:̦}me6N_ P[ fNxq0N}cʩ XRtE$&M϶e!Aq #YC$ ?1q*GED mJ.N(\WG/'Hh7tȏ&UQw雘 JrK.ngcD7Gp-o{4?OM{udWR(Ĥ5ys#t \" #tskA3z5M`?i9|9-AڿkrN~=>#ȚMi&NvLDkGMS.Fذt ^;FK ؎I\ݵGF;ȌW&Kg̈́uGۼ%ۋnRSga>)79m.̴NAU_06xxkrywÉÞudCnnW̻;X]I~1U2IN`-Rf53:Z:ȯQr܍tOk< VFj+^Xn:"0߹T>RfS_Z cP Eɽ u^WaSj'[a;-IN8x9o3Aey)?́GBxy^? mFjp91_\zXzџR ^/ͻ\g " Y{hZ2a?j ^ߍR=:>><տ@~s_cę*b9 W7쩦3Hʷ;IFW=KѝpCf̾$٩;!8FGYI@3tt9 6 T rsi.?lmաT od=ްQ):etqC^hGf :s} BobH2oyƆxF~RHl*A/'3IԱ+BluU~J̏2gQ~]锽`vBƔdGE?mo}W/(ξSzeQ!L7Ze#nw Q|V)m;5|\ >N4J~*ӾblGxeJeD~(9ҽ+Ys,@ҥi 7Gljځ 5 y!5却X1 {q mB4jF#aq<˅4p*P8`'P=u %dXxk叨Yɯ<ݪŏSGu F9 r]iBjNϱ #}{*Y}`Է-J؜ 6 N[z1ۄ}|*YV~DAۗ\g€\ &_ۅHXZd8 }[F,(} ó ך7.¼]G'۝/g#TDRCnVmE$\- Zk5q &\q]*|-4_7yv V$Z1),~+~!j)t#hQ&7~%kf' ^MvӧnSޕa iDƣk S*(>%)<l0!j~N]ΐ BD8Nq8H0b{*=Ely07=Y[OѶV0>ԽY> ᳾Ԛ͢T6H ,QD\ OoTO~hx' doPPJ0p$C@Y?hnZZki4JhEC"S#{ތRrlzTQC1"^PP:PŽ0D*2l%*Jp ~Wno!"t!= ҟXm2.Xۢ@\65,6c=Q^ V!t~]>NҾM --)Ⱇa,H΅)&JWr}ܨIR:i`.{DC'z/G||>ߊXεJъ|$ulCW>Kt'˜xF^'ZZPJ 3Ůaō؏#v 0%>$ޫz@@ט/<9_J,uHXDlp&㞱h+l>yR`u2i-1cX?#3mݨ<WPk"pVKuv"-ՠ9F;38R> AF,d20ۀg^guLXW24OUnE²3fV&~C՛Iu߰x4K֭nCD[ER4zp1fVg*BBՅ4A*b/lKeb1 ÿĠw9`VdefE0!c|"I߹ e(N>x/Z.dhG fsj5'+Kf,jH/%R T3\jl @&ӜULW |믱eH{8S*AX_xnF&7x%-k+H7$t(I^)UH!o(x9/XjF @N* Ը61hwCP,N-$ܞaa5)R4#!Đ}$M_3.OvuPa L{Iql>Ld[ܑ& Rֲ.x)@FNM*TN[kdD1)~Z\1y|aSRH93ɷsleXWZUB-V>1!/ DE9[9>gI1 ~˳,@諗zAr|ຟc) (_%FAYhͬ@nj<6kDN`i A:<5is=R @Q瑌##^ dwp}dܣ5`=-O raIEܻ$.L )ńDǧU;(^-a5f Z# V8i9";I[Kբ AOhI[C+$o&u Ib8^sn8L g_4#`I&ql@»+֑r@cGcos!WrT짖Fښ)"'Y]UZH3mD\ʷX⹽@zիtص7qRjJ_Z"vqnt('vh|-PvtUIH Nu`XM4w/R/DJŽu ˆSDo-GvtjX[u#DX I6=Ut@& 8g F$K4 pɃ03#׽bt*AYNMf||gZʊ@7=Ƕ!jVg+RZZT$ƜVdebjK~3P\揪bT6.'{"uWJψyw`$ׯ9VJӽq9cڰ=JG{@(FǞk|!A`p] Э1 m:gSE+oGDS|}w? #w:%>¾ۣ諕XيaѤCYhJL̙`pؖ&~sI5ԮYJ0aoF^`˄􂥤(WCT Ku_ZJ9dw0m?HԼ^&O<49ÿJVrGc!n#w5dWY.c_= r:~x$axiIVQ-%A"*PAƫ%ep4q'^0Nkٗ)1o}?a0 WPsf|хò(47n/Z>QHC1(_{X3_@7PmVQ]S=(1VҐ+ |/tq)n5+uo j@I @hfl( 94>s5a-#V;s97#>їg}" 7co7.qi9xitċF9ϰ}R:'mbD2ES}zH[S7jaPbs>C=fbt Cŵ^q )ٹrS>px G9r(" w=U6|z[A_2dʗ[QqxgrX0;w 3׌L+:*8J @M_"0! ۼ%+hOejKIu"0h]$fdx "|4Jȋ|uT'~Q1"1ţꚻ|E&)7.mʹܻrFA=`KNa W!9t,_e>QX[fRU3idJ՞>{mm8Q-{ՖIeKJPybp;D丹j;=O'1|F%UXK0\D2-{i3F~Ȣvb_Ci63 I7Օ֤a&C=eOJ}: 45!g4 "̰ ӂDW%'ܒx~ƃKaCs?`Bʈ;~*ɂG1g\. f&z^6 ۅS6*c/˱V!xxW] wtTȆ&%ZbkdWmO~u$jiQ95ŧ"͈Pq/@.el Z ~feMbv; F%з|b8ϓɟ $)XJG:dmc=]p6S>[@mxpN1Vn 'P=7Jݐ?M4ȭ!mt)QW#p g KKA6\%'6ە2տDm?ΚDNѾcLF֤8ڰϊ"u2>&b;ͅPwk)oۿC(9:˕2C'[YiFv] y);6@ZEY+܆8@,/@igр.0<6pS*] :)>0>YXh(?pW(ȍZ g CXR_O*t]w K~ -{Kg9אU%Lb,S@Ub@gn 4SçZiyBgZIdZUزK40LgIK~hJ 0\(u{\p#5)gT 6HQ (&~_C엾 n}Q:}az}ƒbUPJ ox& `_J(zuB79ʬxGХҟKz+|Ϫ ->0Iy)?"2Ă! M\]X0(rjmW;Ai-8{P(!'8wT z2 I*#$+ȄVn G8 )+s rP?X38M2aaրڧ3|\7.cԳI"! ۾+ m{moկ(kb䮀崡(7 žպ KgR"HMQ~LNYM_3˂P^>>1|:G]%jd_;wGXN}DDQV/Iv {$_QʁsGH\&kYn% 2-2磏NO&jW\zdTn nҁķ ݍؕ1J9TW~N&fmv0w K2tJBVL./΁EȔ.WE҇AQL&}_{rbanmaQgm"G&ѕ8j]-\#Z@d|A4y ?,PȲW3y\mKwa)hc)1aE26T 8<ҘssoƇ쀎О~\y⠨'Z)Dt1$5_l.@Ze\.O@bMC.<윻޾1lgXE~PP#ѯ XK]S l6SiOq^  Pt~Vo՗jeOƫHqҒE0'$U)Og m7 nOC?Ac.y9 H/gC OfΆµ Gv "۶XbVhF+ vl:b䀪NLm&EEzWɄzbě^qY73x$qBLYJBf3˼Dž- G}o?% #_!.NΒ=''ۉ\I}]zW<ۀ!OuyV(0.xz"95tRPo8^q=a#A0%ZxsTqV$~Նi(?xO^34~IXgcg F9MV\a~ AȮ8S[ $HW% (ƴl[ifdکu.or?΂?DbksNi3 viIِ(<UQSW|^Њ˰5!ҦqrHF#C<2ۇ0a*P"ՊjeY8/p$nqS#< $opl,kK//X?mv]VW'.p[NSu! T(pvIL -B{20r tVn!œ2p>en.i=1UOx6Ut7=hCmϢQ2y]8\ c ) և,YgбkA¨zIe2SXmunJ ѫ+ؕ_f! HAir#zc1(Yi mg.E^ɞYPZk s@k<ߐ?S/t+uj]瀩;GpЉB;:@ZADיٔ<)}8UAbc^]T8&#Gc)0 NxQ5s>mypk9m33Y92 l@9[ƙƀ6NHXH&I=!sѬC[0n6Fxn6-̣Ώ@oԬY0pYE1׵X&P|1P9ʷ(W㉎ @0zJ:3$W"Q7xKN\9  JO`R{$%`S'cr nHk.E`x?D c$IUTVXFھA.*Afa0byV O*F O2v;IBEvřcUD+2%D˜ 2E y e50z MxO1/wqMi o)/]a!I;ڂz}TCw14lh"U0Qֶ 2>ni7 DQfꗫq.jih)Ey?rFOiLс'c\v]L_я=Lm|_ l3m UY|x#)?oFST#:փ'!\l}\>os~%Fbj,.v'{\z>a3t@|fq@T:(;6Xtz!)M@viE@[!y|SRWQ0l{Gp=ny'^MX ~$~coe.tU)ܰwH`f:q+ȊVHS;*{~RTyR]޻RB){ո^tiK^GU(rA@HY[.lKvZR+Ģ=Rje&JO mSyKڔ` K+% kvq-9~C4lD% oȥ)Xm!}XuE%6p%Gڍ=kY~.EϘR:WiIVP;^P;6@7gS/}Oo+g>O9QY(D ɩq ag։IQ,1D }Z& |0 (o|A)K qL_m9:.Q-vltSFqD1Eiނ{v(Ox9kv9{jמ̝f5 <%,ږh,J x&1Hǟqp̠Q_8DGɉYJNGŻ3lUD' ;n*VbPGQOՊyITQoN=M!dޯ */Z! MK[ F;XCX3AקH$:BMybf+dhD} {$=ٷ$5jwW|IƤ'字lMᶩw!JY[K\. QiҮ/ٖ}f[~5-xz(b-k'e͖GˆʿFY~x$y>q7̐Շn}l\I=} SU==5&Y3rSe̲]i7Qw(&N(W* qz^;Ρ <:{Q<*.b"s`m;,}aVȈ,^˄z۵|$nw[o2P O Tm%~>}[il=wBUQyzyOO"Zx_Q]ּ:HwST Bqg}g}@FDR׿}m j;z?Mf Η2|YZwxHycSsɩQbj j@RfMӨ?S4cyVyMꨲU`OOm^Ñ)Q ȿ$)jfݡm>1>gm+{pHen7˻L2jk3+:6V=*@( Ĥzbe5ԓGy MRD( IU3}-bGgwy.tQl!y?Y|oƲZѕgVn\[50SțECpiOgCQ ^^a>\JRhcDhtk'%,?I` NU#*. Yy̓C0+ BE1#3س2rL}`!WN{&ssއ0GϓJAѺe9'ɴ{nK/_ްlI^tc<2XVhO 1Ad!TdtYK< 0M,LCbu(5ɆBVPGGޞ#_$\zAlNΑ^gLNc-=d͐n0ITI wSOe">xaܳkQ9u:OavB =!8Mrj7cgex-.55@pC}9Ke> Cr sjP:J+A{&JxWt3 5BEl6DAU2aGoK0}7>Ot2M-a=<#u55:]MiO@[*Lfy- aܬCmvKË q2?^:vuPj:zƚ&7 =`mK}D霵[&){Ls?'u"BxTӆ.;87GKWf,Oy2Zά/d2*t=*5>eHD`|inХ !fpc ٶN OGیPBʿ~' u;ŜctPfCN8u7}嫆8>#2uk?DB|em膓tZx?/exڷhjt}2H0ԿiJH\0:AGwa Ol;D]Hri>,<ਙh- *V˩n~Te5qȿh7G"Y=23QA6 q0H3Am귞i3eD0nA@S)7y2u5Wr;upśJi^xA>%yX,U+?|x#;q&+V46Jy7r奒QAS7č;3s,7sS% I0U4dl5r-[ЮpЅ]AvS(jInt;}iRW jqD$96_ Y+/Q`%Qmxܾp90&h_$ɕ:E5dG/[G^`q I"H[}Y?cn5+oΎ+ZJ{S硅n'Nouv|L:;/Y7]/W$AX50U#vbd-隶L.5d6f%ج2Xkԥv>Fn 3驨2>BTj~G,aq R'H y$l-A4сJ)E>mKc/ZYDiYVۮu{AxJl,sEBt ~q,e577 QőqŠo7½jxGL-@߬~k~9 TPEV,D]PD5-͆,H B;;Kl\(^;BYUOwㆍ*I673̛gSJy24XwL`@C~Jne p#D"7j⶯dQkB,Ė>JʛȡćBj:sJu/e! }S6sS/ v 瀺ul]!M>R ̎SXWrhlbЄO73*b>t5ZW8d8Ĥ h0??/du9$! 阎?^5V 59 149M[4Yƻ"0WaCM*jt8|fA]mv4lJĊF{I2BMJʩ'ʟr(sƼ)LL*ՠPK ܏:<Tmg} aߜ]0ez9{q|wv}N \c4yC̨b{.JzZ-PX.8OTzt-a%D+ߣ[;Ą~*m'T5g2axuj6(j[PWy*F9ُo`1h]g}?@ݐ-#O(8T+~—dѧ!]XKPg=LFLfBE |_tnt 6[aR@*2Lf LN*閄NKJGxX\5P}_111DFuXƇzFcvs7 eǡ9~+zmA_9HD/OrG^$w%̭DcZeXTO7n:gav%Zprb绱WF SJ toVt^ϛ{KT7BqQ&?VLANБhIb7V9qaɖC`\W;ȑRTЮȍU`!y47>]B*ma#`ӝ!J9\1g8A>;y):BD-8"M]`S`0J@hJ!ֻ߯l- ci\R`V8&q P[2a&Ѻq*Mzط\'Zg}4=rz5m:yԻ*/4~m 2SE -hnLkۙ.F |M;Ѫ,,L7hlr>!ۿwlﰴGjQ&Ktr.D0n~|&dq|?¶01MϬCufI8ʵ5ST[震DꬵHil;#:E,l!2Pǥ󟪎wV;`U a$eH^&5zMB%Z$Q(%JS!X=fk|['zFZr 02 lB ~fEx ȗd|:',N,V$۬ t v%QÚ(rPf[i9 Q8Z1l͢Y:Qʦ`ȟ{m7j};p8^;ao;A+(Jjw,^um \WC5j\y drw$B~WS1jkHT]<8 AVJtszETρ{j߰tgސVRtD6љ ^J+!2%;J\eQF墍B)$7*%u7^mT,(v6 \ˁ";GMY\x5]Ƅx[۞1w-drW_4C߲}f}#Q"vaʠcz 9rqѭ迋K$HVۜNmlT-:;F'{zh|)pA|G[ZwYik_x/vk $?E]$EߋsvVLIu0Ns'-3[OdB+CMTL8!,45AoX"Jːg.0$h,@ uYXaISѝ,W>RJ$we^>pCXOBm=A:Ą pa̛58,VX%WKJ[;0B@{㬄U2YOӽH?r.%@h'| 5#昗KֈB"e;CBE#ks-,zUMtl%-3ݛk-SNfg$v6onbΤD&척|s UvMQҦ-6\hPzҨ2C_]g#?c'S2m+mQޢ{9; 1fBn.7YlqL×mଡM)Oat0?7&ƹ#`@e7{;Q e+;t){^C1]>Cw+_?'5XL@#?.'`ۼԻ'i&7!ayA˽0U/:qB_/x0c?@HO5m>kӣdtXpⅺ*{4, rrSӳd9?(_Bκ#f~#2j˵a*1NrvԼ_bh+ |%k\@^!]9fʯc%ut#qUF˜Wϋ,һc5}}7Nb ->?9o(&_0r!L9báUxIބ>q_-Z7REwhV{ q=^ZNXiP](r FTXɫ SI[\0_Bɫ1uĤANz24Z^(mg!|uqmF!ao돂oA-# 8Rٞ.@3 ~e!tzAL}CE(VCK_3 Qt9Y.h+-~zjO3fREKn&+CڻVy"%NJsnȓF2[`qi9;Hs(5D#sJf-A6 [{%>0Ii6إ'2I̻e3DR;tg9t{JxÇ1@8c2\1؄r2$b̹^:eʛ-EqΘZ0X.l׵lu9yɹjPr Yk•mcžg", Dt5e ).\jR&nGJ㯦s{Ess˒OCM4^`Rvw?ІQx4!wfBe+nG}m MF0M|seUyS N칊ʦ2׋SUA|#h0@:ܓ]_|{?!AXɥINEC%*O7 Ձ%D!}0WZ_Dm dYBk7YQcc%8A*Gu.%8W@ľة,}qIP9~V| NG|Q/p.wFQ2TMa[ce̼opsgU|B$n`]+jȩ⯐ 9>s$x)KZ"i"!"6@Tzj w* jN/zrZGO]WOTR^E; g6b:x@t>e/0ƶQ\b6e fd+R( $:^ձ>^)9@c_df%-0&xl!1C7ɂ2!Aܸuwt\!$.UnGLbOoJcyf&~гKzчM*NI 7QIܨDdY&PJp(6o![g/;Vü{' ZKONH"GNL7%i 4ș H N9ݮy'+ R ۟.I|8[jbb<+Σ8е})=ҍ6*k:~ ~)ۭ%vc:cVБ=\`GR٠0>-NC[ֆa=!gk0ݚ[l<$},$<" PlV7 8-Xc[$hζ@+ aԜtܟy[c - fp4t!=w F8hDX?r%T 2ca) dúntlFy:nBvGoz=*|P!GnH9W @eTބk!9,Ϫ]$ Ǒ[aKWu~p[58UU(ܹ*2qlD2x tht?Բ Y D *B?{cU L {퉆z8CMC!wbhr>/_R=3lȖmWr)&ta2>$]]YQ>3_f/jX (%XUٴc3S0\M4؈LY_@$$@!%l@ $FGrm-:G²-$#T2>o,|ZR<OI*|ף]1f,qlvHJf."!&Vn\GD>T|g۱Q`G:=U1VG܆l Bz 瑋 VO+q2ʛ;A܆rx9eT\,*=3"^]8CWb(i"dECQ|AaC+ص{Y@à&doALevOO?_Sl|ȥ?wapL@-Epi1O4(Q4Qgbx jLUCK0<{55O),$[bT-Eays "<Ⴃj˾6^کf{Xݶ.v= TĹ0G`n+d:e{o&mb!@à=Xz2oelw;7D0a64MzyvS~" b9\9#t9o~'<85rawdM%ߓ{W#DHO8q}rӃ9>S*uX*zfX Vf*@,/V[trY#AIl( ia}  z2R@xC_>8@VG|&gYCNe/:O4'ݬak.#s@I'_Vy p%&wiDL!nS!6j {BEMr5c/nr`^K<>P:tN(-6 %U{\ e`]RZJo :ۊWh[H59~s?`EJ/𐕫D|:Wr FLݻ+06lTfzMyzңB ] bDPVRtl37h+\fL.3雞P+kk@?1&K"@ @稯R \SfMyx(3Gu>Ư/lhE/f-P#","jj(C J[떷Y2yoZZmr1;)0ŸdEZ*{E=' MR:e۠QG^+E.zs1QDd&[RU:kRs@ 1Bb̹ESɖ5M=b>J`kLdO#ӟ!BJyIyE&5bψqy)FMrl(Z]t dRnY&q#s=aX<y@'I9s9J'B~UFXgm/ X o~BpƎṩwO ^{߮HQa;a_L) $5Ϸ ZRwQs% sg8j=#NOECWPtv[ ~;L1 FX )`S 00B)VBs}].}IPE@XMhq?hf`8ё$1Q6Q.E@0-H!]n'kI\FRWאߺ|X1r'Gu&~rw5hX> ɛ~4v.\AcG&?4NۛKDb:ާO$oxϮsOUg8-T , ;|-}'sG0=OUEN G _?0s(k(ECvj7KCa Z9iiʎ۴/!>e)'m2ʤ’>ZC7R昩ά @`aaV[ VW: ݂}yVQ{a_FۭV;(ݯiSϻ#U:r&13zE*):ʳ"XfLRק .-qI84cqi&RwjJmU?ǁ URo լbHA*q͙ZZƬjP z셕aG04nD{KLi 8%;$_[]tnJ:ɥV(J>rKn|G'vk)9ˉ}]A 4'S2:`B`ОFSGHZni,sihkbӜ5b'm#Uy- ;xj>"^Bgo7γ:0˪L )?:sϐ7=@MşO ] κȶ~I:kv2ݭVb^4UU,S1;95(Wy\ [2F+}^ey\6ځ 5m XC~K!U‘*t"sLUäI~d&.;r\_ZĎal=N =e)z`&dnsߑUEl*  x}(F_,7ĬGDPR)6(-e2r7KSݡ6 I$ `pBVJAæ ׹ioe1M#!:<#e'1\l&Sr PA'D~|Y:S9%1Si ^r[cLe9V3=i)7RGS1]3 uCJ;LS0ꑞ?00$_zl, t#idͶWȬs4#k#CjtE`A (ȃtwzio`p,sx7_SX0fX_ As|JtFv:z$ (a0YCQ蕛eu̬hN!klvf* 0dpsӼУ63?@(5k @`CXxj⭹yb H|C^]0f|yk?PaGE*pBɿ6& =вS9 JE6)cz|YNأF#TР09  s9H)ͫmh_̴@.,V-g?D*.M*ZL.RK5n\f%qdWjtcl3>v~.9>>摛,ڶC*ɠ޴@79n1zRf3q|`A}Fd*8Y+|TG wbNO_169咕{ 3(G>!c'33pdE/ *1QebA Yۜb7d.@2Bka!=yjm8!ߣԡɰX 5gmAp h;(ЊU7/\F 60#.+eԠ7 ߹ELo:K.I]$´tWRw0gLjj-/8BܯշF<8oBK&d8<5By-ߨӝTjǯ3P4cQ /9QQ sGUؓ5v!],׺!9I$फ़(&h DՔR 0nnRvDn/z}`D xJ/?!@F$F ,w 6"3&#ZR.CگizSWuQPq[z׀OxuC$6> F8!(?oD[$.T y M.᝹?}R 0MrA#oMLIx~ ӈ 2VpsX]V6}[zaN 0¢8=_SL HυtWB +9V84䨊@O0}CsIv@lZ[^ý96y7*炣k2 'ɒ%;G⿀ P k/juTT jvq ᱰVZ[ ]"=w!H5Ea yʩJ 2E&jQ[Z5pQ/|f8kMM+JnF$DV=NO%(1>bgxGlN@믄EuJ@ccTdm+:!U"UQM.~ [8v0J!f sF[hrd4ʸ2 k̰xq|;R"w|s5bߛ\e40rC;5#Ȣ|&&τFrFY{hf7oM羅cӋXb )?$BaGߥZ'dǣ] 1Pf,~lP N9Xi{Uo4j,9^aQGy ǑoORZ)_h\3oiIr)/9mTDx=*svKJ8T|LH YMB /|T)BR 698tLX_& m5FD'>_J)(0WfX򄗜`[_K;\V Fh4J1D_N3N/KIhLbJ=m]%%zԂ>+u&:4W~6{X2-fkՁvp˥"WGgѳCXC B+#uBcƟ0"S wkI,?x?W.nw}`ɵ]1X.Y5n^ zoB~4(NGm,]Tp)[\.Oq}`F3W\?!+(oAdlQ6ٝe.^[ڥf]&'w3?DMεòϻvt;G{L0.~F I[(Z. 䃌D#)=Vjs5@52Mϩc,Q T`zEխ.W`z|c4=Xįdx%veF,h:R1tyNƫ 6d-.<Vp/|ԯ C#zKuѷA/S3ao\J?jvxhE!feN"-\gK;ICä-'Cq@/u¹Rq{w2׺*)N^WQ w0߳)J:^iV?mTLU/Ov{ڋ0!}xo܈&Q.y[qNm!(e&y~7pc1n֥@wc,u4~_`,:Ggf!݅U'8SD'y%$Bu,ܣ) 1crSBEjs#{T ;srRa_UalWr7TB0GI|sUDa R)zK1Rv B 0i&sHՒ '{KZ 7]9ZiVPu_/R{\+l)n4QW _Xw%O~0 žڿv8X-ĴWm&8͕c{s  G`V7a19P 꾺l[q]4hw7Ui%Z c1\ uvbfEZ}xC/" ; t>dpnGu<{ehfBvNo6i+n8g9` 5:I$v e;'|D 21&gvLI?$SLY8[B!z >6]Z$~uK-2Zi(&Vf88F+uB rOŚhd}/!병D 0{' ~A=ai;_wt'ڃ|\ƤZ.SNyTqY"^,ܲnglեˁݨ؝sI77_UMFOB՟$,j91aʍX{B'm BCO[Oh* ; 4z7w~ dp _p{MQ_ UlA0le`W4, ҡy隰Y ClQ{K/ŹZ{"+O.v Xyb Tzgq ;d  YMIUx"ut u.$ k2R3A,^3vU0Ö(=vtE-Qapnt T [%AQH~o/Hi%6'2`7e͵P1ROԚՃa <V>f-Q Ek{E$A=j姥י,[Kejk{%1ůҷ(P4XoiF8eh-N2:\#"OH41#(>. 2(m[7~Nxn ]9(:DYw!@LfL갚q0ҿuE "(\4C]e 3g,7[:32ԁe6`3ggĸ'tB.%qWHMv+ݛGeI&xCeQ۱}C ,oD66Z u;rsI.'`:^`zөzȒaK-IR󘠊%Id K|Tϱs:[ 11N ɣk9ʥb֯rLdТ; D;F5fZ"(Fn\z3"P*u֜uY7\q!mz$BGIG.-߰n5fYb-5M?t?4|> O*UlɴaY/\Hcwnv!yqF`x`n*v&(oJ94Rg >'4/#r_{L%h);O|i ~ )}8\7EwnhDzYltht nN4CU]!'א"uUTh{aU ڙ$Ӹׁ0j{ơ:>ywa1XfʡJ{3^ik+%ZH3;V78SmZXFFRA~YLex<eai4POˬ`Cs=hU(YWKR ܄lkt1 H&=Ƞ4nդ2U҄q1 锜"kIɄZ4 qԆZˉpWRO+bs\<Jdщyt4:FdH>J;cM)Q4l|Bv9>-CG3&ݫIQ?#{~GtSM\+-Yy7e!Aۢh6ۀ,jgztqH(w'6NFlx怸 6@O|ѬBg`_vJN5yMrxF mqJLp@QX3~kx#VGPV@يq #ðқc(Zix|}Hd@ՖaoRØ5$f c@Zѱeg"Dx ~SKjMIRd ιSe\92*$yxTs7^YMzo;{.Ok^Κ oHZ@pFB7-8>v8LqY?=gxpZ~_ 1{xܛF>a0fdEN=8Xu]UQD""[%?F, Mr1ӎ9dF-C+*| N.voh9dt?RPE<LqTOm$6 0$ݖO$Ƽw<,%ү^u 8Uq-< UEYֈjMBJƒpB*&b0lҨl`恝8=](YY]=YuQ,lxсSGe{Ij-3s6|˩Bj)TOzsм tVgPLXtqoRƙM^.0&kCzx_qxAMPjy9&K)|4w\< ~2Hm!pǐO~,"t/y'2 w 8mBnFO`\f=AzO֒iO AKD5%x&v OHX=3I7>EqZɨg>an_"SMGu1yZDj5oR+] UfSje7EDZxNOn|zY{ HVSIoHc}*fFeo9p"MB' QZƭ'}P $ZgBR\@%b٨M?J1w^*̰3B .MO;u1k'ߤZgfY*f夰M}e5Ck4SI`}kk+Ȱ ż.Haeٗ?@u${nmhώ5>2…J#߶bHP< ?0LjR+m{6_i^i~B?^^.gA %XO++4t?#oo/.bv2g%&yVmބD|TkqzZ:4eE ^\϶ҡlGs]YF 5> Ʀ)WoS5R DXLh YvsDs#x=ZmRv@9K @Z#S]R%&ٶmݻDڵ*6t{)?1 5دٓCROMzc)(~21P7 X,RRyjbgx(|5R7TGնۨsW6Dbm|DNل0:U/11] $l۰b{R9K9'1SgaC<%FjSrf l \u$Rg<QH.)\iuzm%kԴg۩ h6lhE/4˱X1 GbƢRT p։nѫTD9\v Ck>aeWb~XwdX*LEд 1زIҁ؈!?5b#ƵK..ϙO7m}4V'߸֦`^XC(¨Aҷ APŀ6;",I>Ï=ٿSIT9Ϡb AַWPZnLEbBІK`i*ƍ%l>uA\o.8ψز+(J-'#k|73tgGsG";=PjiiaCiQTPOg$ukC#V(i/cb+)$FLɅI")$Pݛ?| 6"a]#[=]MSb;$xNڊ"pݓ! C;w˽ QI<@1K-l6"vF6F<_l6@*l!Wdš؞2Wt1ZmE[\ ~dd#'B < |#~ -(kk0}26Hc=vj$ʂlUp}UXMrĒe ViѴnoܳ,dGT*Y~xiHI+QzW͇?k4.Ⱦj&pH3#Z2%*TU +_ wӽvEyO$=sbכp=5?W]YSs3ui72E]m#عˣ1Lw-9k^5H$gYAN̪w-9<7db'eIdjj\ |k@tEx$䐎͈dJؐV~c63pܳ PP!EN_L>@'iͨQS‚+.s&+Y*NjT{x z3*CAB1̨\jƮ^'IPRa.1Ch^SmGIhB61EQKQZm^Omva|a\FNKŽA3)'l&[a%/ G̞1?$bPrPU=6BXWmԵn`E 9nGkQ9(9W뾕T.m]tS7 d}J'g> 6{TK,헐u8~* tҡez7l" ;ײT>13^aX#bfx}1Օ¼NEOoekMrBE.{4Z5%[CִSm|.^T\P$i(PVs29(cjI!Wr3|ɔ}aUd >_(3U. U}g40]~'9.dUs@kon V]iG0%\2P!1*W$fnFpn!Ati1YR7mQ=WmPLsi09H?iT+qd~ gT:DP,Jw8jF4ҔQf[okGطa[M#%ſy9|V0;s8Ӵ>e\amNiU #,r Iy6a v*գQ/h3gPa0]¦/= mLMnPOh)VcfɣaMoi` COKV{E3e|_, `/<\"I=59U v?nP~G a׺Ƅ/7~~E$B'O^2M꺔Ukyvuڼ6.go5ᦽTᢷ.:ⵧ-JIѕ.cJZ=4\t H,s zK|PeBq[)heH/ :l pr>,k  zPĉߐGqdjErJG@HP9%I?KZ_4(Hy(mH@ݿΈQuGiW^xBIMԧmgQ79,GaTD!E>U͌V"&R[w;GmVK'3!-\mT HI#}&<nD>'L oi5͖@kvoBC'K-Vgrb Jxk"Bl3]׳bz<Roќjꅠ0fuG2Ump=>4qBseM)HGJt fz}CH]" F/2LˆVVP{]x6;H] 76rG^HU=}(:ھeCaܾgر7^AX8PŘ ; }QǡwG%6WqTs9I㇁ 5|WOa_#$BCĀțqY,ػ ji\3D XQ2h,(tVF<NhnJn(P,5u"Rm ajajeű Iayc%W^iQ~-/`ݕ;b ž Rd@7>Ux(VCШOU%fwhW8QEmʘ<#KBBQgGf7P tc;T|8ED2=sѹq_jZ4D.}yxˈLD,O&g;dG4ǻ'jۗ/][7{`B2TA=tlj&2B'R9@GaZխ ,j&ͯ>9Ѓ,R#xR_y_iblbWs2F r Wv8!|vl5_P7U={m<ƹCŖVP7\>:S'%{Mlf&01_aP@weCrҋMDaue:ՠ WET\shnj6x]u*qn~%nʂP}?, m\'IG-Ҥumzs:.˃m,VGZ Sv>ǦB:-F?:=3s*@Ъ#Kk͋WIĐSAk> &<2%_Oq"!F3_Q3sڤ*u!?< 2T).V/fzwxCCWzZYKS>K ؑ3k.$S^,`dHW fC0A^=-J@ ՕʀQ5's @zgDF>[T1M ]Ed&a].pd caoŸ3[fL2t!3=|E0]ݡPjQHTՅDA=5! ! kWÔ#+hk0ԭ+8v9@yVq.|ɎúRx$=Ibly"AlA t͐d OmW .Hr3CQbxfרN^C|+ *vh#Vm[D䓢G0l\GYtϻR%ʁ+^ZoS[(WH'""_seԍҸ11,-Fu @$9&RōV#8žurW6pDJp|W7i|L25L-WKbCo $UuBePzxmT<0+zHO*i#=?(O6N 2~'kO]_PneȅMW0h!2?o3)!C&ϯ5"xJIuOBےp[q&Cg.^3g~^;?rIB>u~"R5}XmWп|_,2ػVYC\' ;I2- Xy%LBTԏ- {dr$#BV[-J'!Na[VPP`ڜr,c 0JbOkhJ]sۨoZބzOQS'CyA_ pT,'A*{c_t^ YQ6?045G Nnk&Y:̞wp[Q0oCբ' @s7&<yݑ=,(ذwd&.:8!Ґ'uvܒc6s@-mIXlDeNkm1082t'~K$sE-w"NZR#BwuMs2 ^WG-`t76 țgt*$zi8w#wV_ĝoj<+.s ]M^u(.w +ť/UZ` 4v#L ]-[2 R1ڽ@urɷ|I LDvQK#Zg]F[H/QE.]L!@¼g*m[Irک ȋ@&(yPܿhwrb4|e= lnCMj3W==\hNz-g\2gӝc2i@-@*l/b+pҗ,k)E;Xs??if:'ڝkhrb/'PnL0ѣoɄ~J+/V^^ߗqz(m&#:CFi"^ڒB1Z_?(q4Xw<{v$YYO϶h[h b7{C >**X.\ }IWT[a8DHhYG<Ѡs ^?]Uɑ$(CXR7d.TZ4w?Z0Js2{B4FNEUAmZcI D sc7vGzMrz4}pTdQjM;}qo'{<Fښ}G?hKPA-,$[otd#봼lSz?V޺OD#nY^j쑹M=KaI R)9Q|_|0-We[tp\/6.k﫶 x6aÆ0r3gV4 POSԋ11-@;w4KS6 MF} p!RY,Φ0lu(g?ɓTn1UѤ-|` S IǀQAmUHrbZHn;>2cROB`77LwPc5l'0~ "n,qֻBCPZR Bn~E};/3:z⧘SU.확_f&QG|X}Of]J4 2>@tx]hw,/^Ly$[ ~EDg#7N塹M^}> DFu,lɆ5 uh,R?6g\#nuΦ\1^E`ѽ[Pb"MzU;r1T$O ^*W rfśSLl-$U@Ւo+s:t,ARnLAFtIʳMO 3(nvsP.Tzf{-}/z핋5EGu-Dݩ0Y׌ZeV~q<ٸXvf#a"p􅋺.ч4_ͰI%cB3ҳvwy:$+vZZ]tZE]YѼ)ޞ1< eQ5yJ >t|ixPzȔJyo8zH4}0ի\=^nJvXB#"x'K&^qd3o |.B_܄*)[lX9 j[k/ 7~Ůڐ?HyRPdhg Wp2 7냾FC=#dGۨldl)$Lf λ!hpp#t6嵟YQTeb9689RwTцi8 Ňg1l]|j"ljWi0|grp0eiPhL3r;F*sc&Jueߝ| |x,fXQ͟T9p B Sqe9 9=H Z. 2kl㏒h?G6#qnkETcix!S@S@݊|+,'=0Wx\b?w~;#ѭcι`Nyr+KU_YKS شb;A ̤ߋ=`PiR5*JjG E~Sav}O'Ng?Gg'6txvXec`遳GJx$М5RX 0Vd))À/ɄL2_`Om* W,aTmIQqTݙZ0<;m >I+}Do$MO[ڼZVHxWfia.4x;@rR᝝^/3Yp$ߎZk$x3 |*bmkߌtd*b'U+×/"x?5ʲEzV7hcED MaDӀCUT@!q0 ^Mx\"$/hfQpnX#2ಣSq>+MVD3d8ڶ/I2A`!t.KtL8I&@[A(!SաO@? `N!e_l;I #q(|bH?{QjV}Ut"=.PgC9 q1@'Ø*j΀{ݯ>PP?c`/'E5^7jr5ֵVZ,v h`;l%p,R C "SkXt8zJsM'r&3~.C!,^ aYb/N_OPVQzd.N_(O! #WGMO#z:~s+uYxz7LWAZUSvQ:²~I4Ԯ>Wgz)"m4$|=p\ƪx5O[_h:]3 )nU= Z]qOVP[F0/zKI45?f lrjn"W>~4!nѡ 2ZRxqIMGlaFU{I$M_ݛӂIcqIE٥S4j8eg# )pJjSa~ UU=3GW 4-{1Џ28w1Fr[)H !ڛ"-,~saֲC$GK&F~O}˔pu6~HzgF_Jw.č!šyvT!!{  c(- S67-B$ F#tU! AR aҏ3U* 9&5'{i3Z|=^;g0!;7uAtdU(yw!=F8ܣ[J@0S$(c.DlsP4`P>f'r]B81¦/}x]&L͔(Jǁ(Pz6Ջ!hi'-`)pG֤5bBX+ȜME(! 5cX܌_;`WK ]`m14 WlmK) bw[+ x fs4P!ڀq}X@|,9wX{bKsK"ӣF*S$ma3FmˆGʿ/Xo$zȫjcLF0?&zsR0~?x3E.-WALƺx+1&lZ/@Êɕ /! ~~җJɕ M (r8?lÿ搝-a\}$[ߙYY *ϯ9k Ҝ>c+#QK{R@9k.ٜh_t7bp"%:˳@ S He&n6fX퀶n+xuXk#Mm-؄ WX'5m]W]^y~IŸ+uX@`eAa{NbFeFТGi8="q5TZgPbԄ46,^0gc/$dx)P2A*r ʏ[t,!Z*:h4P_Z]Nz$0WwFLWGNaQz)\+ZGPT;FD؜+GR/7l/ xc<'WW7Iܡ"1GMa5o,PXQ.L7Pk0KQ3FJ,Nsf]ӡ )`7=pԫh5#Ƞpǭ0'Ӱp{]`yGC:6&?~UuPv!E;#l;}Yp_Ћfw)hsFXtBf:mq(\קצWZ` WuG*s_]4D%$.iTMqȖ@Ҭ_i /TUՑa%:",GQd%V*l=gd¥D_7ӮLx'd4*r\ם3^-٧zѰHGvw oM?+",Yibm`9xcImބw]Lyd Y  X&WLຽ`j6% (纍F.CSKuwZ~DH/X.Hy?qU]!s;%|}[}RjpsH*VJ9Н#$"?D)uύ_;-ٖZO- K"`8iC!@_]Az-{`=A2oiTN1g ʩ멱<1e5Lor[v2W + EjIqfuh/Wsf43nEǸ#3} 1@08l+:ufXr"R倈'2y`1ps| VsH_#F2_W)dIA MM  ӆc*KcvRt2-hT#^L@XEg羽UwrI z}̹f,bn!YP0R8("|JmhZ}&xB_٢04 n=8I>9U&LzfZ͝CeZ}34RnC2uᾇrC&(SN43,7s2ewe4?q\d*It<+h@y(( *(rǕO1)'iAX_рw f}nD'o9\'${($D:NBV,9ᤶ)P5J?G .ÛVKK{jՂSe(J4徖dkV.";kO*!04,-w5>篰N0!l]{$40W3<u:GEidd&XjH^Pд-FK+DӕabK hLl & ,Vi$ϼsca dGHt(*X!L5/uv-b#[q^# ",TCIAR,5ĒGӿRb:CY5A%!> |J~p0C$vZ C[͑Y5SLSR !G³It MÛ+a D4p_/MJG4 WՄy_6ÌcŚnL0T]#Fo TlO"m)[&ū-L|״%50d:;֒.3 ѡF6P/Ѝ;YkrLhO=23 @Wmsn?zg >ZHDm`j}ک*L NƕcW}MrS|A~#C?󌃒K=HxP9V^=H8#1/W +1tmƫv# XeÖVxD|(@~J:o9Ğ[c{7RAfSm\t?G#q鸿5FS.T,e^ m&ٕ>:4p|Xg il%2:~j!u9s| 6/L~?笀we}P0 qWIX"鎺p^&O?k+ڑT.taɰO9J2I*|@ES`X;Yd|?`ؖM ӈ!{awh |@ތ7jٛ6', ΛG~/wyFI{ǿwJ̞02 +z99%;"%RW\NDieꤾ%V'\s3J3GfOؼuh4 .V0TJgS;+Vvbw]3B=@@CJə_ͩA89ۚ|@w!wLM{}EnasφZO\KAL=kc,FZ]za)Dj>+qoL%AD퀙KYXDž>Mr@Rz HPr RŽ xM$M#%Ba#غ.W-%wI^>B1eNHJN=!K buQwSΫHdش YIVnPT:ƧnɿsBk _;(fby,u8I(8=fIQXRǟty/8I8g1+]%H=Dʵ+ r3*Xgtގ"ISԾE`+W.ou7g >-QU3Ny3nb}Uk6u{ߐq lYY޲ҲpݹZ|?[6ճR P [{pt.w eq>H͹8AvYaQ\O z)S@ըݓ%#e\WIƧ'ݺzפbiZ8}~gcP^Qz4NU.DOƉb*mRibB ۵>KQ뾉ѿ:ߖᱰC/!b%)JUmdҏ o<GUmH,pZQeq8QDxӾU*ϰ1 u 欛FP:[챜؇_4.{9И>|';D1Kd]7*(S?aGЊ،8 Ê`jN  RbK*ş";S^NQ֤eVu ɴ44N8DYk8B,QXm{LeQJY{F?$xR$9Zeٽn7LP2b6_JlJNc/vњgL×$d3YU!T#K~ןpSGdzb dq9 ZJebviOsb8z5y^0M<{B]g_tWazAE[TEM6@Or!m]c8ۃaL؆iT>.h&eKrs6T (S+~H\<ԫvϿC̳z\G[%ݳB?L+o9Lj7GbרU}ܩ.3!͍ .$Ub\@Jdn#s &FLcIl8{~XQ;hט =QгEC Iv96Ռ$4[i&=wokeB`)v5 e1בOHR)gY꽗UJ/|o!܁t gKu$]جKF̳O[Yʃ3I5E *uM[s3YpK)Wn 6gr L=Όr!?LU C\&Xޱ_̷xv{b;@.F}*Qz?wDp5ijB #ގ: 4N 7+m=U-^=D%|8QWPXk{#Ed(p0sSVb7WɣPJ)kɽ!ivS׶G6,mڿ]cD/8dbP.=j "Ɲ` )ݥTarL(I?\qr>^ޤf"|ͩSl/L-"ZU{9Y2c돕,nm Ń?j6'̏u0$Nˉ;O*s&\p"@;U KrczNbtQ^L#> IZĪbbdS}܀( 3t}pyghI ^Id`OE &u`UgDyϢ?Falkۇ*;8[!\d2U{sL%94NIG;|[xO JTߖ"旙NOZ47yAk^$P@g#5٬{( iT>*t-Uϥ_WA: k3hi/ }<z*E283uStHF"Lip2:qoT^Lx->>K,5M2DLJ'w!3"a`j C( Z?yIt=9v${6GߘKwbLUSxsH4MpDa/@WV)*6i*&2x :-Ŋ£W8Lj=R6'vtՀEXljĹwLD?# l%7%V;dRve=;%rۀ[81rX QIQ2ʉ{a?|b1 JU ЍF$l 4zk?„BGE ԛԃa>xkCuT$mۙRcܵd5i<#)6GP24B0vha/M7`g >!JM2`V0F ź|''٢S>IPe%8i[٬k21'wc8BS^ N6v՞ha5oy徝Z- [xYh[KհÒ~qo7hDȬ:6/KKvǎ/Sf Ut_WKg p/u5iр*xg@/U rShsl4X{Q ]Re=x/lTV?1/P0q{>1WtaG8C obd/ր_k3NvlZ wxs)rjAq151͉8e_+Fam"AC:PL/|AԥI_& :'^ؙ,# 0k#SGv$ߢQT#6%TfaiCCyK@|mew2t(KPiUs}07`*k a`^lVN1H8(>_Ϊ{.T'b+z$,cĈZɥX=qwLONic\UlIw.V,ڱ7 7)mN >c=WVoͺ3k[V|J.l4vނ!^Q@J=u,ʗ6%zN)-6E\q$Rxe" )hPf ``T-m[bܩ%/ 7jڅvy>Qa)i fh5bH6 4Z+D*79pE{6R<}R;">N)Jy.&W*1}Pw=gHC~~}NPȚ1qзbQ TYkȂf~bWBNRvgXBLMt+ +my3tA-*]1۱VQ:`w:@-$ٶQ_9(P2 ǖ-@\ *eT?[HZJ^L/`\et^9]ڄP}'`=ͽZ"O&9S+8!-_MaJ>R4Уtmf\2b>uݤ"bTZ1K6bE؉3ひ+3qR'ףkBd[5~< &sPRcU%;$l|Sî4BTtemlB3)z\4ͩuFy?2C`nn }_M][vCpqvGɟ+#p׷˨J%lp@ƁJd` OkNehE«}T($1?ON4:8BX$V*@ڤU㫒 { go~lCZ&%X[nS20_yW|=^\yaRS IjKs>U#FOm^v'~uE$cǜimb϶%D49ϧ+kTY_j^b/SNAB5j kՍ/;ahMR0lj3AvEp pÈ9̨q7N^uSQ If&ѣ^yP?͒&icQ1%nl^A;u詸G3?W 9,x??<߷h$D$+nWO%.NSS]3S G(D / EOOp>ar5.{ӝS.4]ITı$@ ?|y;3F7HйC99 >j5]w+z^E&6ҁT&q^GkQ¢Zi V Y CΕI%X-"xHPEǸ1\VY:ߋ4)o@fKZ 鶏UTpj2Q3JF]%;dʑ V׹kA]'td ״Jq|k /!oXj^͒\x7sI*Lt 4YSn~~ #z2t  Xz{ϝ_ذOV$fMX2Wjj'mּq쩸wB8AtBO<2?|IW2 d{dCIA*%SQ+s>И4]#aGfd$ \K%׮MwcDHyiH2{&85ǻZmWώWUW[·#n^d]zf_|%|(DQ'5g[ݶKZ6yYK,\=VNY%w'%FCy`q"e]%>mZDCTa7ހnD6D 놉r55WK8 ݘoCre܊=vM'?_T6 PwVX*E~hm;9n1K'y\0{I?_Y+3ŽӑWJt_a懦d 7kRꐲ;<>I #jb@`@lP<^Ӡp%ׁ3泔G>JmD[uMgsMب"SP0vF(g_9jՍn?tfRyȇ>ָdN_*NaFr3aI+ފ 'em9|:lԵHEYaLl+"[69緷iXʱih={KPq6 *c(3N7$#gH,i 5I{-u/UXxBVM҇gqwk=j('* xQ%4aMۆU wA𸸤Z!5}WtdM'p/v qb_ (悷Ĵ^Z# Awg"rFA~ P[o$hg-DJ|T.XXRwSg47 C$0==KI}K 峲z=Z%X0px WugbC{`L%pQi|AW/^h%rE+NK$d|GffQfĈ@ʡb=dL#x7/󾡆2Wܶ0 wYFHzR70FLDOec0wTe쒬1x9wPt 0T#!!)NEE4 gZz5: IG}+ G/Ag9b7?Ovn$@t"5nVgyOsZ (S XuK|tFԳ-SXu% H).XllO˭Bf.?W`~k챚e2 "X7|)aUsCuoTV"NI/)hē-=+\'cF `Ξt?^bԥ+fGyVA١g`ئ)pL viwU>j-ڟK;3O\j"Ð1b!!"lZR1 L;a}kQgFp=?u0ζ9{X&ŁfK#ù;}eb΃_ u@N_xb' xMQmJ@_muGK7E/r[axD&%.+0Y__]2YU\s^D[#i}nRqC8΃SK|;{ex1a>D/w@قWP*`X  {NXニkءD/GF_Ypu9 񋑄4~nP54Oν 1XDCA9%$ꂂ) AYi ԁ[|<wOFpV|ks[=OE5JR ˔ MJ2vh%Br@O;wW@88<o14OmS?+\-\I|Hצ2/@UOr. N(-BcK`6sG|Mp yĩmF[߭ʋtDWCLHZ <ˀQq_ț#,vk $0Juk4,J3m<;SN!hzsSU Iouu6h(BB&sR"1ⶇ,oIJ4hĢ+Q)}ѵ֟sY~]X:JjyA')ڠҏBnt&\f_zr!SeJOX3x^t!!vaǛz?<-#|‡A2H[Ą=r8閺xl5 f;A(BI➹,vwS5.%Ll~p6eR,ܙݑ,Z42B 'X%Gzj'\5O{Ŝ@%\7҅*'5RFPsu[\$`1H߉x h!?ndf {v'<[ X;Rq=yj` tgl%,4)u4.TLz0H Ms’WATK;WG_ 7xedA_Dܖg+0D)ߥmR` 4y4Y=D;-1s@kOw MȸP*c3NxϥB@(w3sĕ\sHZsf*ꨝe>9ۭJqTeM#!c"+ 420<<%!1Nehnz./(1 1fіuȊ1 8y@u>]FX铆ZâY[k[ڹ!3bsk0%%nՋ<{_t]w(5<ޚQT\{QB[,Dtdh3sfWLXH8bqXq+28JwS5QOF-˗~u\ҙ{k0e;K K fU̫@mJZ{" ~b凝Qv'˗^Ŭ0`Fo'(e Ygk-eV<=ΠjSeshٿn*ݖE” ٵ1Îv 0ct 11奏?0Z" C=\Ln[$I,W 4? VJBWpQ80T|JCO_[*; E? i94gǜ<8vo+ 5mm̃ցlN`q.W  \l뵹_&fPmc%%3yVC{M=f8IC:췖_(R?BuB8e2fgXUMhjx(z&mET%ͽ2rsBI9 fi}[T#'p&H,퀅2\xQ@OF&l<5W,,noa-ӦG?Sx0‘g/1" vFU3F/Y?Z޿~i]aքegezA7;8ju&ÿ<RzU#wyPB 2t}`|fwVZYҥzr)DrDa 3ɸY|DSxS& ؕW1#ӼYƙE":e'_m0 1 0ګ ٜcRUS+fQ:2*ZB}2a=Yqbk@+.f X0 8JUSXUDrC*xLQ ,`Nv9Y6"ǼHN&#o5_hk+M(Z783Yugdw(욂ےC͔}i:qĉ_{CHb?Rsy hb@_y=aXFKW6N8^wɚ %/cMΨh*EU=X]!D#U?T5yQ̥t'Gsؕ0{}ʀR`K0&+u}gݫ$ܾ*C&b?;Ug[B]MDCX(fɣð?6HT  G[:A`1TJ끗b*l5FڕmhفcQ_H7.Wᗘ"Wx^O+Ax{1̗$l0 z8o%u][XbɬN +w%Hw!{p#/О~LJN2ͫ۩,k XVސO%ݫ!;HnON$#a[ͥk|L;fNIm3u%k?Z] òz!;=>j??ڵfR޼w݅)4>0h|?Zvu= 2*Aٵ 8հvuvNa':!IHaxju{|)ά/mR<~+ lgQ"UGhLg` "1U(ʋ̎~TvVSYlx=Ş:*K}CC2 h۪Ɉ;y @jN' OXy1f* VGL~M}>P9t&Qpxʎ^em~}j@.r~'3> X/s.T TYj+ME18~ ǫg[]8ZCe5\ãatRrJk=E CTE( `+HeL Eiz.3Q} =pt7tCYwLt _P-1zOۿcڞ}:r>;,ֿ-+t3W4fhV+sRY.=#E}=e0ft23%@^=:P)AO-~T5jSȀ>rjqp`hec_N ~A*QvۊJ {D"z3ZɎUCE" ;Z;y~f)j/tdajm&L( ̛ gpu- mM-pQ)ץEycsdԴYeO03;2g0S`4Ɲr@4S,h} R0%׆mP+B&,/`7M)FEox|:Ҍ&H8ۍCX.Bwk %hP~J!TB[aQP[/$7ף?/+k 0{ôzFD0{&~}J;+sl \z;e=q~|s݅lpYMX)jMik"yokW(74)"P:nuGUS9ހ(:-ck#-:dV˶aΩhύޭrlPعVhRNCnAܢixZy ¾|>(\Ȏɽ) uN]{ ">| :$e}VCxaDirv1VU2M ]9p\SD%g xE7ꏕ ؃8Ǔg7Jk2x&?3;`JV W:O<4f$H|-nOU? @GNm3o4C-)l]ljnSrZq`9 {grE9v=ɠ_Tj>gaPJڿ vMh#I؀YeG%o錢-CkQȕ,d'\;|L{J?r5?˹3&A:{!h8T/>j?槝^U4o|gtQ8Xígb׈i]nJQ>$1@Oͭ," :PLig:6.ER_LO|$f]XEN<[ t 1 ~vHXnއTݥb05{z>/mޑIۛ7'Mn]gIf_΃Qx ǛT>m":ӖaUXTU*)A"l2Dř# %郰tu_)yR[Gb($pYba86+]A2Gy&ӻϥ dRڌTEVԞlvCpN>"9gj?o~gNrr> k4'aQIyDߊtKVcr<R? g£ىd/ LAo&W$. vnqDx(LU!|)5c[t7jwKy6WG&/-mi,_!BRYs:NTkl'T&OQ>\ A}%=Rmroo=8O~'QX):*8YHz %uz%ҼyEÈ6?1UAM Ys_^-neQy=i1}٧&ݦ[CR9- zoW+OZF_rL}]{AJK{ǯT\&\'Sy@: \lTn@e zEuU("-z6`:vom2E{ Qs ^ۯ vXcgΞ[E0n/l\zL%E$]ϣgjh<7sEJŚFwW*uR}>opKs"nnMmw?t-)am'#M2cjօSe@xgfVӿo?/|/b>, .3t"U E>S[`\Rƌ(eP?p^!̇'wFyq奲מ\U y e,{=OfoyXUC?Ibdm+ $: VKZ>q1edG~&k=a4)1G TF筼/^h=OGJ]X{<5ahp}JE,3%B[03Iյ]0eh߉l|$]d6N͡4YR_)``LM24#V]  %xRb5.^ùHߘd߳[}h ;5$"KQbnl4H<|z+,MSN .Kcћ=;ǖ^BSd~,,Ȇm ,iNmPsvܶfxC?_t5Sҕݯ9Uo(%N,RY?ᅧۧ,ܡ_ &! 3XF zE4kzV DW_ӕAxi[߂*uuOD@J`omN/+BbEaBlk!;r?-]T;E4^sH.%CgNRO\CS$2 `GkuIv^2}n7ߛbOP?$_GH^@a&r)6VfzB[-:5e.q {O1{ t\8Kd|()lk#wN#ȰÕЈuq~1@^ze$ 08N(FgMb;J_yFEMkowF)mlekjopvM/^^hg{Hzl `$ܔX{ռS!rj?ޔ"㣰Lȕ0ByGRݗN >:b; }5YXQM--7bH"{C(1# a>΢>f/AvY`LP0 sdgkF`(V, L~p*4x)B< 8ؗiw!޿0ٞR 䪂2c*و2B0qv#f`GOR]Atzq BTwIkfwSѷDq%*iI3E9ӟ Yɦ]~dףa IWJ\ZU[[<;=V!))9;ꎨ;|Sάult꛻ F5Hk p*?\K&?}toD[a88 H`Fq>,Ee0BdytC::׋NI![ĎMz yh؉V pWk(/+nfNhEʄ MkOa6h"߳cZoW|Ȏf6]I}Ϝzitn `8z\ɋ#9xx[Dh">Pq-(:ʈ.׎";{쒂mV&pUF]SZa!(耙h-+owfu: %b`8QF;E0ɒ^;iԅjiރF\ 46p2SZ1+ G#<\0_ c`ɐݥՀ|\k<)P2yW Y|8bO>}QYDpܗOB5LovcwI 5sU>جK?U33-s.\RUOI _>$?n({K7V!}Heh92<,!%b~$RЊ7/B͈攔c1tk&Ĵ^;fV|18-Xlz/ Toq@/Sd U])G6uD՜}oiz R[6wV1~gN *)dcWVM#Qu<,E=dg?ϙxa̡3GW"]Bn^CeXBt_Z#ŚҭɎ8m>"}W6z GѫRGցkT"):3\`oɋJ!L ?umB4S9 rz[%`rh_CرA1)WF{λ *괩I|X&e54ܸ%/ zJ2KN>v;lo CEt94p֝EUDag]7n0$ _(VDƓAǠC#zY.\{5d5 ~'mOQ\btI6 RVOذt y# R2'Yة=(f|B'Rb?R~69$X ;TRwHC]t{;{gu#e-?_v X6Xξx43VE:|_mnPh]a~E?X #_F[i4sUYߡ*9~O [kUSgbNJ#=A׻O2.J2t0HMLOyVɯ.Q@Z`u1:ԲqN;ү5? 5 cCWS48~|gA3OJYrK<ŒUdF,=cbW]ae8y'̖r~|@imA#+WlJ+o^HMPDR G@(HɟU4#WYZ.{{ \))ezC?xġ DsA+<,W(8+}*Ĵi1 |:};pG,tx7;/^Yba.5 [f32H>/T՟v-EP;i۹MGA3گx\֪[ك@$L+1] 1AK*Є+3eRoWW Y1RW6R툠Cà:^Z$>A5/LYa_vqŠ8& hڗuEn6cE6{g]i)hj5jcIA ˓8PQ6< S@!gj 9ށ;IHs H%ƻC \-T‚"J1ʻ! L*Qv)eeJ&V"Q|Ņ[q.p Q'5Ut eg)0%/*MǝǾ'@aIv|P(2bTf9F6A^.҄]IF(7>BX"-Py3yڍ zo]XDy 3%{Z\κWUwp4|'<H޴}`*3^})24roB%֢S>nN CcF+y='T4AA7Pgܞkc6%HjP%\TfQ(q\xAdpz,r;s]F@Z^ĨG^_G(1Sj˗rh6/Xrv%pW; fME4rk>^my ~oSqZ8&܍cyǦCcnV^2c@UHMwX i#bϴEzeΑoh4 gs'\Q5 D&*̿ )2ԖM--Y؃Gny|{)Y xNo!9^㔈ـxme8t%!_;P ^U4%ey *&!zy;t 2Lyws|h~꯿ON,JeQ\ȁx5Mg@hi )ʉ3!)nMlfB6 Tז^ZeEK[>c,ӵP}4̻R \/0Lw ]h*3_'E f@PfU<۴ySz}q5O Հ~q[L56 O[Pi-Pev0ox 1/ч -v$3ш(>6KBd^GyVyNAH?qXs h~Y<ڕXK;=<3]{^dcF"_&(3NiQ8W˴cPHP^(MBF|I=L:ُQIg w9ß )eC/,%N!2zsM ~Zi#ϛhDas9JT&?LK0a*֣ @se]FAȵeGaQےQ}eB\i4 -+K/jN~L[Xn $]*j7|PP)rDnf֫Eؖc{g\je( ؛z1@~z@(1>˘>b12[DNIyG-nعi;o0:BkOazh1I&䳣a{oV4oX0ߗJɶrwD Q|ܡ`c#X*Fo﷿(! _ 7OE5o(S '7 >V4Wf=[04<_JE#rl+9QsMN5ǝgpr:|U(obܶyxtX?AqXNCX\BTh./(MWI;/pRM/P93YVEKL`C6.k[ B !zA/Lբu. kd[ YT`z˸~UJ4ID/ DҠvDV.lYk+Q ҳV u=saa m0T+=_e3I ;eEa@{;D"l2 @Jo0cmtAgn[L̑]DTYNFX°dk[,^!H k9S+ҾAjSrEbt}^SOwXW KBdמ܆5ݰrpXk9IE]5Rf'Ϡ+eZ9.8A%Dৠ"wH9zJ)_S*80u'^H^n_7w?BX>yeWL!9YL2 i>!K程~`8:1dd/n\E+HΣx%-c>7x$q9-(&t\hdsF >bNLSr_h I:QeFoO0aM8hj7P? za}s(܍W$YWl/)ʿ3n4֫Ui 9!h؋;0 9# p\W?`8׿Wyh01ؑ٨G]ze]-7&/&~gjƢ`L1[ֆ:)W=E?+}}7//]J +t.&?כ۰sn]5Ѣ8pu  ~0LevXtBNjClvXE-jJ6 *|oӋ8ch3KEiQh+ [5Ó'M/ӊ1jiNJCA)isgFqeANæ_Xm:h{,"s_eP])A3 1iO+䎉=)? w h`yˆk4)˫z$aEۈj?;s_& A!Rjx^·ݔBNɍieOQ,J&=N@d/mcQ Ǣ?ȏAm0NkINZqO틀xb[nX[i[I&;sNҒ`'ebOd9'lffeD&q,Ǭz2U1!LztJ7)G Sn~)۸Qn6y[ #⦩]t^G[l:/=dWO؛'O1݇f3jf419IM9R ,CJ_s>1CKڨMK~bjj;85M-fz{!8ٮ71J$HB@m\m=:h˚7 ښ#mvvęJ4TA~J Kk@礏1 Ғ֚sJ,g b={x۹UXe{i-hiڙa2aC7JNb~]44_ jƈnmͬEʋԕ[Uq)h䓼,xȇ㦾(۪4yLM@| xHi"ϽX}m-7ѡXثzF!AD@Y{-u4}Mh|@V-}ɠ,ONKgg!aKh͡jnBIxS_Q!|[Yr0`mMu@á+u`p'1\TZˋ^ErZ`J!{.2Q~N;z)l%A]c`CFO"vD7| k,M.HT6蠄uh1G8K΃ͪp-ܯ2-l][YU_J^VBw̔k-Rp^蒻e?Ue -: jhc܇/b4F[p}I2W/x[wJUkct]G> }T:7MĶ1KVGqR5Ew߉ t"LosEZHQܗމ1~anEփz$2%A˱Vg aK.ROf>"9XSgTrnn7ޟ8w) lrm.XiNӨ &^ ?Z/aO蠄\/"^/V{ܳ_ q9gi=fOrz6׻8J<$`yf#Gq IA`q+]dn<Ѱbfߓa>?w ,fyxf]ӓ*v!9zI4ί E+m\Rȇ-[ə 2V;09<˟v: ZX|[%Fƈu ϋ~O=K?6{LO?s=6B4@~i0+' 7RlXlZrR~qch wm~uԷfi @[;'Jb8m2x/Apb۲wHFMV{6;ºeRoJ [^x,]4מyoM 8t9/_Btp#ͩ^zt &`ٟarMFIl m: ZY;*!H-CWC3:lQDUc5rM *C"e.43 1dSfS^_|f_60,M;EhaAII >cg6X,P+ i. |e5/9,"|MoOFۧ pҽdZwd؇\7 s#^쿺raߎ@ } 0Wqe/D[d=`Yx!tU䍌' Ĵ?n0oZG̘0o#?dX%Ĩc~ YK/ ȃ n00+^8rz$A3 q&gIѢaߨXp1t@<BT_Rg$u:2و,؈XCYcOaX Rz׎@e b,d (mY9;MLyK6̨*:+(ΒS r .}]Ԑs`;\]j[ܲsUZt|1K#~0ͅz`:}2%-u^< sI|C=3 G#Au3Cq:a~E;_[_sZoMthD&cz7<[A ]?D/N}c X.-v Xq]xyv41Mj#" ޳Aeù?8S7c{_緪"Pj^t=zh@*io2__S=swÙ|M&FIc~h m閟7CNlY?0e1ɾCt&9 Cp #tݾZt "UYphcJS[0U 'KE`>p2V- uf/hT?Hq2w!@xЦL*PoVB}hI~nY66]?@z'z>ٵ]CMz4=\\7. iufѴ)ڍB;7D-ߥp{ą.-!Di[.,DL; †R7_dWTGY_j%:7P3S؂E_Jצ+~.2O#D4b|%au'Hp|N9S&t/R,L-S}OՈSC77lvgN P8Ԅf6>gȣ\-?ͅ1(߁mž^=֭DgL O&mHpTy`O,%NFkRPq(֏yNӏA8[`ص\WwՇJ.ѹbݏI2mB 3%Zn9]VNQ#'atGƺ-]R@ [t*3iy6sBSVȹ@QgCGT H)+swO^Sي%cWZ_BRatnߘ)gqO3(A׸z˸ΨaSbƤ٢"й(Y.>J9]gÌ qtCcWvH-$;78~ 7%;`:z $F}IilN),U4N([dulO0fo,KECR(Ck?ȹGgDotf_n11:"0+wxtP;B1:SaǃI>4LNj_u|a1)'}rȊeL=\kQ*ͳCA?4( .^R6s JpL, >Jd$MMv 廄{ FA(BsB8" D}뒇%թZMΆOfyg0<0\:1US+V|G%YIAK_5 N#}aIoDAHa[C;: &{c1=0]*Y,[K|=Q2ٳi>&:xBHtOK6.un3u-Bl­2{|:SɈ7ih%XYR `N1TO]yrVş$#ę<:=32!4 &@m?+';$v!c(:QE!P2Ɨv<^w 9QL՘[Do,tv:Wfܿg=`.?\ϟ +N(/ nCSd:073EE\73?$wVbޤ|`Vnzq\TNPt%b@AJ_ptV BOmcWbs>W;wlM0U t}E~V şS-~^}O~=ÉNuMsq{4훡dܿ<# \ˁy A)pUKJiZbHS G7O-فv(i2]V6Z/~x=)QԦ 5xpe>?<<Bw |H\u'?X#`Dkt'K[`%(~0[g^tXta3zN~x,*0zukFcԱmg2]k#f:>iիGR޻/u{ƱZ)QU?i|Ɍ&'C9I?C/[ꉑ.ntPOkacfKӇO]79]R`()yȗn&' WcKRavʋ>x\NbPZK@:Lh:Ѷ!S%aGYʼn|lpK 49z z,A$\'uH>kP+lWe(L0.GVoUTkBE *!S+mgN"FhIe9g'KPл~a|pLe & /?P =5 @ Cÿ7HEg}S`OZ^*npKD9T'|Uz`Qt5Dz.?:QU6.DԾ+_)p D|jIYX䗣t4-y[Rq~u_{IXN(9k^ H/Q}öx }|\kPmܢ/u*j]&[s9z5i#5D/Im\PϽoB(Kf̫pDeJ@.v# W+!I0ďXl|&T2}V!X7}YDS8@ֳd|n'uT t7"|U>p_Kጼ({{PdLN$խ[%D%ִEgF ȝʦG_JZ8ͫ.d9(/nʷ3qJZv< N= t Q*ka2| 9P\K{ኲIv%w.%FDqm7!{$_:F?@˜ǡ$/*>z Nq~GR܆ I-B'V/cxuACc5MH' PLu)Ly0(q4N]o5 3:!aonYƴYI al NGT(qcR|G&+tqV=+`X.ȫ O|F>@' f&U 0K:BgNiGLf/!&@Js=*yǬYja֠iZ<Vn.$@C",)&jk1DɆd0tzvOzI+ )XE!SaH/5ga¦J0t2wp+w ;s<_rJZI Py 6 Sx d<7>REn2nЎFaw#S֣jY86O`9{eFA6{+;S0Q}n.il]Xŕ@pM6M5'Q(xџ<rsIi֘FI:DP 7K s_a&[)@)|gQ]@>dkf&D?YK'FNICJPMװrMU}#`,-`"[B8R`/ Wh|J7aCQVIt#3[% eB AdjNizG&Z *Pfo_A6Q~N}4?$%2hv~*1'm"/TL@sco0r[hZ}֐' BCΙ+󖽧̬du_2{::~iܬ]#QʹDzAʼv ߍ-zQ@YZkernlab/R/0000755000176000001440000000000012560414652012126 5ustar ripleyuserskernlab/R/kkmeans.R0000644000176000001440000004650711304023134013700 0ustar ripleyusers## kernel kmeans function ## author: alexandros setGeneric("kkmeans",function(x, ...) standardGeneric("kkmeans")) setMethod("kkmeans", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) na.act <- attr(mf, "na.action") x <- model.matrix(mt, mf) res <- kkmeans(x, ...) cl[[1]] <- as.name("kkmeans") if(!is.null(na.act)) n.action(res) <- na.action return(res) }) setMethod("kkmeans",signature(x="matrix"),function(x, centers, kernel = "rbfdot", kpar = "automatic", alg ="kkmeans", p = 1, na.action = na.omit, ...) { x <- na.action(x) rown <- rownames(x) x <- as.matrix(x) m <- nrow(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","stringdot")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(kkmeans(as.kernelMatrix(x), centers= centers)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot"||kernel=="stringdot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(length(centers) == 1){ suppressWarnings(vgr<- vgr2 <- split(sample(1:m,m),1:centers)) ncenters <- centers } else { ncenters <- ns <- dim(centers)[1] dota <- rowSums(x*x)/2 dotb <- rowSums(centers*centers)/2 ktmp <- x%*%t(centers) for(i in 1:ns) ktmp[,i]<- ktmp[,i] - dota - rep(dotb[i],m) prts <- max.col(ktmp) vgr <- vgr2 <- lapply(1:ns, function(x) which(x==prts)) } if(is.character(alg)) alg <- match.arg(alg,c("kkmeans","kerninghan", "normcut")) if(alg == "kkmeans") { p <- NULL D <- NULL D1 <- NULL w <- rep(1,m) } if(alg=="kerninghan") { p <- p D <- kernelMult(kernel,x, , rep(1,m)) w <- rep(1,m) D1 <- NULL } if(alg=="normcut") { p <- p D1 <- 1 w <- kernelMult(kernel,x, , rep(1,m)) } ## initialize lower bound and distance matrix dismat <- lower <- matrix(0,m,ncenters) ## calculate diagonal kdiag <- rep(1,m) for (i in 1:m) kdiag[i] <- drop(kernel(x[i,],x[i,])) ## initialize center-newcenter distance vector second sum vector secsum <- dc <- rep(1,ncenters) mindis <- rep(0,m) cind <- 1:ncenters for ( i in 1:ncenters) { ## compute second sum eq. 1 secsum[i] <- sum(affinMult(kernel, x[vgr[[i]],,drop=FALSE],,w[vgr[[i]]], p , D, D1) * w[vgr[[i]]])/sum(w[vgr[[i]]])^2 ## calculate initial distance matrix and lower bounds lower[,i] <- dismat[,i] <- - 2 * affinMult(kernel,x,x[vgr[[i]],,drop=FALSE], w[vgr[[i]]], p ,D, D1)/sum(w[vgr[[i]]]) + secsum[i] + kdiag } cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) while(1){ for (z in 1:ncenters) dc[z] <- -2*sum(affinMult(kernel, x[vgr2[[z]],,drop=FALSE], x[vgr[[z]],,drop=FALSE], w[vgr[[z]]], p, D, D1)*w[vgr2[[z]]])/(sum(w[vgr[[z]]])*sum(w[vgr2[[z]]])) + sum(affinMult(kernel, x[vgr[[z]],,drop=FALSE], ,w[vgr[[z]]], p, D, D1) * w[vgr[[z]]]) / sum(w[vgr[[z]]])^2 + sum(affinMult(kernel, x[vgr2[[z]],,drop=FALSE], ,w[vgr2[[z]]], p, D, D1) * w[vgr2[[z]]]) / sum(w[vgr2[[z]]])^2 ## assign new cluster indexes vgr <- vgr2 if(sum(abs(dc)) < 1e-15) break for (u in 1:ncenters){ ## compare already calulated distances of every poit to intra - center distance to determine if ## it is necesary to compute the distance at this point, we create an index of points to compute distance if(u > 1) compin <- apply(t(t(dismat[,1:(u-1)]) < dismat[,u] - dc[u]),1,sum)==0 else compin <- rep(TRUE,m) ## compute second sum eq. 1 secsum[u] <- sum(affinMult(kernel, x[vgr[[u]],,drop=FALSE], ,w[vgr[[u]]], p, D, D1) * w[vgr[[u]]])/sum(w[vgr[[u]]])^2 ## compute distance matrix and lower bounds lower[compin,u] <- dismat[compin,u] <- - 2 * affinMult(kernel,x[compin,],x[vgr[[u]],,drop=FALSE], w[vgr[[u]]], p , D, D1)/sum(w[vgr[[u]]]) + secsum[u] + kdiag[compin] } ## calculate new cluster indexes cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) } cluster <- max.col(-dismat) size <- unlist(lapply(1:ncenters, ll <- function(l){length(which(cluster==l))})) cent <- matrix(unlist(lapply(1:ncenters,ll<- function(l){colMeans(x[which(cluster==l),])})),ncol=dim(x)[2], byrow=TRUE) withss <- unlist(lapply(1:ncenters,ll<- function(l){sum((x[which(cluster==l),] - cent[l,])^2)})) names(cluster) <- rown return(new("specc", .Data=cluster, size = size, centers=cent, withinss=withss, kernelf= kernel)) }) ## kernel Matrix interface setMethod("kkmeans",signature(x="kernelMatrix"),function(x, centers, ...) { m <- nrow(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(length(centers) == 1){ suppressWarnings(vgr<- vgr2 <- split(sample(1:m,m),1:centers)) ncenters <- centers } else ncenters <- dim(centers)[1] ## initialize lower bound and distance matrix dismat <- lower <- matrix(0,m,ncenters) ## diagonal kdiag <- diag(x) ## weigths (should be adapted for future versions !!) w <- rep(1,m) ## initialize center-newcenter distance vector second sum vector secsum <- dc <- rep(1,ncenters) mindis <- rep(0,m) cind <- 1:ncenters for ( i in 1:ncenters) { ## compute second sum eq. 1 secsum[i] <- sum(drop(crossprod(x[vgr[[i]],vgr[[i]],drop=FALSE],w[vgr[[i]]])) * w[vgr[[i]]])/sum(w[vgr[[i]]])^2 ## calculate initial distance matrix and lower bounds lower[,i] <- dismat[,i] <- - 2 * x[,vgr[[i]],drop=FALSE]%*%w[vgr[[i]]]/sum(w[vgr[[i]]]) + secsum[i] + kdiag } cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) while(1){ for (z in 1:ncenters) dc[z] <- -2*sum((x[vgr2[[z]],vgr[[z]],drop=FALSE] %*% w[vgr[[z]]])*w[vgr2[[z]]])/(sum(w[vgr[[z]]])*sum(w[vgr2[[z]]])) + sum(drop(crossprod(x[vgr[[z]],vgr[[z]],drop=FALSE],w[vgr[[z]]])) * w[vgr[[z]]]) / sum(w[vgr[[z]]])^2 + sum(drop(crossprod(x[vgr2[[z]],vgr2[[z]],drop=FALSE],w[vgr2[[z]]])) * w[vgr2[[z]]]) / sum(w[vgr2[[z]]])^2 ## assign new cluster indexes vgr <- vgr2 if(sum(abs(dc))<1e-15) break for (u in 1:ncenters){ ## compare already calulated distances of every point to intra - center distance to determine if ## it is necesary to compute the distance at this point, we create an index of points to compute distance if(u > 1) compin <- apply(t(t(dismat[,1:(u-1)]) < dismat[,u] - dc[u]),1,sum)==0 else compin <- rep(TRUE,m) ## compute second sum eq. 1 secsum[u] <- sum(drop(crossprod(x[vgr[[u]],vgr[[u]],drop=FALSE],w[vgr[[u]]])) * w[vgr[[u]]])/sum(w[vgr[[u]]])^2 ## compute distance matrix and lower bounds lower[compin,u] <- dismat[compin,u] <- - 2 * (x[which(compin),vgr[[u]],drop=FALSE] %*% w[vgr[[u]]])/sum(w[vgr[[u]]]) + secsum[u] + kdiag[compin] } ## calculate new cluster indexes cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) } cluster <- max.col(-dismat) size <- unlist(lapply(1:ncenters, ll <- function(l){length(which(cluster==l))})) cent <- matrix(unlist(lapply(1:ncenters,ll<- function(l){colMeans(x[which(cluster==l),])})),ncol=dim(x)[2], byrow=TRUE) withss <- unlist(lapply(1:ncenters,ll<- function(l){sum((x[which(cluster==l),] - cent[l,])^2)})) return(new("specc", .Data=cluster, size = size, centers=cent, withinss=withss, kernelf= "Kernel matrix used")) }) ## List interface setMethod("kkmeans",signature(x="list"),function(x, centers, kernel = "stringdot", kpar = list(length=4, lambda=0.5), alg ="kkmeans", p = 1, na.action = na.omit, ...) { x <- na.action(x) m <- length(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(length(centers) == 1){ suppressWarnings(vgr<- vgr2 <- split(sample(1:m,m),1:centers)) ncenters <- centers } else ncenters <- dim(centers)[1] if(is.character(alg)) alg <- match.arg(alg,c("kkmeans","kerninghan", "normcut")) if(alg == "kkmeans") { p <- NULL D <- NULL D1 <- NULL w <- rep(1,m) } if(alg=="kerninghan") { p <- p D <- kernelMult(kernel,x, , rep(1,m)) w <- rep(1,m) D1 <- NULL } if(alg=="normcut") { p <- p D1 <- 1 w <- kernelMult(kernel,x, , rep(1,m)) } ## initialize lower bound and distance matrix dismat <- lower <- matrix(0,m,ncenters) ## calculate diagonal kdiag <- rep(1,m) for (i in 1:m) kdiag[i] <- drop(kernel(x[[i]],x[[i]])) ## initialize center-newcenter distance vector second sum vector secsum <- dc <- rep(1,ncenters) mindis <- rep(0,m) cind <- 1:ncenters for ( i in 1:ncenters) { ## compute second sum eq. 1 secsum[i] <- sum(affinMult(kernel, x[vgr[[i]]],,w[vgr[[i]]], p , D, D1) * w[vgr[[i]]])/sum(w[vgr[[i]]])^2 ## calculate initial distance matrix and lower bounds lower[,i] <- dismat[,i] <- - 2 * affinMult(kernel,x,x[vgr[[i]]], w[vgr[[i]]], p ,D, D1)/sum(w[vgr[[i]]]) + secsum[i] + kdiag } cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) while(1){ for (z in 1:ncenters) dc[z] <- -2*sum(affinMult(kernel, x[vgr2[[z]]], x[vgr[[z]]], w[vgr[[z]]], p, D, D1)*w[vgr2[[z]]])/(sum(w[vgr[[z]]])*sum(w[vgr2[[z]]])) + sum(affinMult(kernel, x[vgr[[z]]], ,w[vgr[[z]]], p, D, D1) * w[vgr[[z]]]) / sum(w[vgr[[z]]])^2 + sum(affinMult(kernel, x[vgr2[[z]]], ,w[vgr2[[z]]], p, D, D1) * w[vgr2[[z]]]) / sum(w[vgr2[[z]]])^2 ## assign new cluster indexes vgr <- vgr2 if(sum(abs(dc))<1e-15) break for (u in 1:ncenters){ ## compare already calulated distances of every poit to intra - center distance to determine if ## it is necesary to compute the distance at this point, we create an index of points to compute distance if(u > 1) compin <- apply(t(t(dismat[,1:(u-1)]) < dismat[,u] - dc[u]),1,sum)==0 else compin <- rep(TRUE,m) ## compute second sum eq. 1 secsum[u] <- sum(affinMult(kernel, x[vgr[[u]]], ,w[vgr[[u]]], p, D, D1) * w[vgr[[u]]])/sum(w[vgr[[u]]])^2 ## compute distance matrix and lower bounds lower[compin,u] <- dismat[compin,u] <- - 2 * affinMult(kernel,x[compin,],x[vgr[[u]]], w[vgr[[u]]], p , D, D1)/sum(w[vgr[[u]]]) + secsum[u] + kdiag[compin] } ## calculate new cluster indexes cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) } cluster <- max.col(-dismat) size <- unlist(lapply(1:ncenters, ll <- function(l){length(which(cluster==l))})) cent <- matrix(unlist(lapply(1:ncenters,ll<- function(l){colMeans(x[which(cluster==l),])})),ncol=dim(x)[2], byrow=TRUE) withss <- unlist(lapply(1:ncenters,ll<- function(l){sum((x[which(cluster==l),] - cent[l,])^2)})) return(new("specc", .Data=cluster, size = size, centers=cent, withinss=withss, kernelf= kernel)) }) setGeneric("affinMult",function(kernel, x, y = NULL, z, p, D, D1, blocksize = 256) standardGeneric("affinMult")) affinMult.rbfkernel <- function(kernel, x, y=NULL, z, p, D, D1,blocksize = 256) { if(is.null(p)&is.null(D)&is.null(D1)) res <- kernelMult(kernel,x,y,z) else{ if(!is.matrix(y)&&!is.null(y)) stop("y must be a matrix") if(!is.matrix(z)&&!is.vector(z)) stop("z must be a matrix or a vector") sigma <- kpar(kernel)$sigma n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 dota <- as.matrix(rowSums(x^2)) if (is.null(y) & is.null(D1)) { if(is.vector(z)) { if(!length(z) == n) stop("vector z length must be equal to x rows") z <- matrix(z,n,1) } if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(sigma*(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n))))%*%z - z[lowerl:upperl,]*(1-p) lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n))))%*%z- z[lowerl:upperl,]*(1-p) } if(is.matrix(y) & is.null(D1)) { n2 <- dim(y)[1] if(is.vector(z)) { if(!length(z) == n2) stop("vector z length must be equal to y rows") z <- matrix(z,n2,1) } if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize if(upperl < n2) res[lowerl:upperl,] <- exp(sigma*(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2))))%*%z-z[lowerl:upperl,]*(1-p) - z[lowerl:upperl,]*D[lowerl:upperl] if(upperl >n2 & lowerl n2 & n>=n2){ res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) -dota[lowerl:n]%*%t(rep.int(1,n2))))%*%z res[lowerl:n2,] <- res[lowerl:n2,] - z[lowerl:n2,]*(1-p) - z[lowerl:n2,]*D[lowerl:n2] } else res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) - dota[lowerl:n]%*%t(rep.int(1,n2))))%*%z } } if (is.null(y) & !is.null(D1)) { if(is.vector(z)) { if(!length(z) == n) stop("vector z length must be equal to x rows") z <- matrix(z,n,1) } if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize tmp <- exp(sigma*(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n)))) D1 <- 1/colSums(tmp) res[lowerl:upperl,] <- D1*tmp%*%diag(D1)%*%z - z[lowerl:upperl,]*(1-D1) lowerl <- upperl + 1 } } if(lowerl <= n){ tmp <- exp(sigma*(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n)))) res[lowerl:n,] <- D1*tmp%*%diag(D1)%*%z- z[lowerl:upperl,]*(1-D1) } } if(is.matrix(y) &!is.null(D1)) { n2 <- dim(y)[1] if(is.vector(z)) { if(!length(z) == n2) stop("vector z length must be equal to y rows") z <- matrix(z,n2,1) } if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) ones <- rep(1,blocksize) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize if(upperl < n2) tmp <- exp(sigma*(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2)))) D1 <- 1/colSums(tmp) res[lowerl:upperl,] <- D1*tmp%*%diag(D1)%*%z-z[lowerl:upperl,]*(1-D1) if(upperl >n2 & lowerl n2 & n>=n2){ tmp <- exp(sigma*(2*x[lowerl:n,]%*%t(y) -rep.int(1,n+1-lowerl)%*%t(dotb) -dota[lowerl:n]%*%t(rep.int(1,n2)))) D1 <- 1/colSums(tmp) res[lowerl:n,] <- D1*tmp%*%diag(D1)%*%z res[lowerl:n2,] <- res[lowerl:n2,] - z[lowerl:n2,]*(1-D1) } else{ tmp <- exp(sigma*(2*x[lowerl:n,]%*%t(y) -rep.int(1,n+1-lowerl)%*%t(dotb) -dota[lowerl:n]%*%t(rep.int(1,n2)))) D1 <- 1/colSums(tmp) res[lowerl:n,] <- D1*tmp%*%diag(D1)%*%z } } } } return(res) } setMethod("affinMult",signature(kernel="kernel", x="matrix"),affinMult.rbfkernel) kernlab/R/gausspr.R0000644000176000001440000003567112560414652013751 0ustar ripleyusers## Gaussian Processes implementation. Laplace approximation for classification. ## author : alexandros karatzoglou setGeneric("gausspr", function(x, ...) standardGeneric("gausspr")) setMethod("gausspr",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m[[1]] <- as.name("model.frame") m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 x <- model.matrix(Terms, m) y <- model.extract(m, "response") if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- gausspr(x, y, scaled = scaled, ...) kcall(ret) <- cl terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("gausspr",signature(x="vector"), function(x,...) { x <- t(t(x)) ret <- gausspr(x, ...) ret }) setMethod("gausspr",signature(x="matrix"), function (x, y, scaled = TRUE, type = NULL, kernel = "rbfdot", kpar = "automatic", var = 1, variance.model = FALSE, tol = 0.0005, cross = 0, fit = TRUE, ... ,subset ,na.action = na.omit) { ## should become an option reduced <- FALSE ## subsetting and na-handling for matrices ret <- new("gausspr") if (!missing(subset)) x <- x[subset,] if (is.null(y)) x <- na.action(x) else { df <- na.action(data.frame(y, x)) y <- df[,1] x <- as.matrix(df[,-1]) } ncols <- ncol(x) m <- nrows <- nrow(x) if (is.null (type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- type x.scale <- y.scale <- NULL ## scaling if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")] if (is.numeric(y)&&(type(ret)!="classification")) { y <- scale(y) y.scale <- attributes(y)[c("scaled:center","scaled:scale")] y <- as.vector(y) } tmpsc <- list(scaled = scaled, x.scale = x.scale, y.scale = y.scale) } } if (var < 10^-3) stop("Noise variance parameter var has to be greater than 10^-3") # in case of classification: transform factors into integers if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) } else { if (type(ret) == "classification" && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if(type(ret) == "classification") lev(ret) <- unique (y) } # initialize nclass(ret) <- length (lev(ret)) if(!is.null(type)) type(ret) <- match.arg(type,c("classification", "regression")) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot")) if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") p <- 0 if (type(ret) == "classification") { indexes <- lapply(1:nclass(ret), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) xd <- matrix(0,(li+lj),dim(x)[2]) xdi <- 1:(li+lj) <= li xd[xdi,rep(TRUE,dim(x)[2])] <- x[indexes[[i]],] xd[xdi == FALSE,rep(TRUE,dim(x)[2])] <- x[indexes[[j]],] if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) yd <- c(rep(1,li),rep(-1,lj)) else yd <- c(rep(-1,li),rep(1,lj)) if(reduced == FALSE){ K <- kernelMatrix(kernel,xd) gradnorm <- 1 alphag <- solut <- rep(0,li+lj) while (gradnorm > tol) { f <- crossprod(K,alphag) grad <- -yd/(1 + exp(yd*f)) hess <- exp(yd*f) hess <- hess / ((1 + hess)^2) ## We use solveiter instead of solve to speed up things ## A <- t(t(K)*as.vector(hess)) ## diag(A) <- diag(A) + 1 ## alphag <- alphag - solve(A,(grad + alphag)) solut <- solveiter(K, hess, (grad + alphag), solut) alphag <- alphag - solut gradnorm <- sqrt(sum((grad + alphag)^2)) } } else if (reduced ==TRUE) { yind <- t(matrix(unique(yd),2,length(yd))) ymat <- matrix(0, length(yd), 2) ymat[yind==yd] <- 1 ##Z <- csi(xd, ymat, kernel = kernel, rank = dim(yd)[1]) ##Z <- Z[sort(pivots(Z),index.return = TRUE)$ix, ,drop=FALSE] Z <- inchol(xd, kernel = kernel) gradnorm <- 1 alphag <- rep(0,li+lj) m1 <- dim(Z)[1] n1 <- dim(Z)[2] Ksub <- diag(rep(1,n1)) while (gradnorm > tol) { f <- drop(Z%*%crossprod(Z,alphag)) f[which(f>20)] <- 20 grad <- -yd/(1 + exp(yd*f)) hess <- exp(yd*f) hess <- as.vector(hess / ((1 + hess)^2)) alphag <- alphag - (- Z %*%solve(Ksub + (t(Z)*hess)%*%Z) %*% (t(Z)*hess))%*%(grad + alphag) + (grad + alphag) gradnorm <- sqrt(sum((grad + alphag)^2)) } } alpha(ret)[[p]] <- alphag alphaindex(ret)[[p]] <- c(indexes[[i]],indexes[[j]]) } } } if (type(ret) == "regression") { K <- kernelMatrix(kernel,x) if(variance.model) { sol <- solve(K + diag(rep(var, length = m))) rm(K) alpha(ret) <- sol%*%y } else alpha(ret) <- solve(K + diag(rep(var, length = m))) %*% y } kcall(ret) <- match.call() kernelf(ret) <- kernel xmatrix(ret) <- x if(variance.model) sol(ret) <- sol fitted(ret) <- if (fit) predict(ret, x) else NA if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression"){ if (!is.null(scaling(ret)$y.scale)) fitted(ret) <- fitted(ret) * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center" error(ret) <- drop(crossprod(fitted(ret) - y)/m) } } if(any(scaled)) scaling(ret) <- tmpsc cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="classification") { cret <- gausspr(x[cind,], y[cind], scaled = FALSE, type=type(ret),kernel=kernel,var = var, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="regression") { cret <- gausspr(x[cind,],y[cind],type=type(ret),scaled = FALSE, kernel=kernel,var = var,tol=tol, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) if (!is.null(scaling(ret)$y.scale)) scal <- scaling(ret)$y.scale$"scaled:scale" cerror <- drop((scal^2)*crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } return(ret) }) setMethod("predict", signature(object = "gausspr"), function (object, newdata, type = "response", coupler = "minpair") { sc <- 0 type <- match.arg(type,c("response","probabilities","votes", "variance", "sdeviation")) if (missing(newdata) && type!="response") return(fitted(object)) else if(missing(newdata)) { newdata <- xmatrix(object) sc <- 1 } ncols <- ncol(xmatrix(object)) nrows <- nrow(xmatrix(object)) oldco <- ncols if (!is.null(terms(object))) { newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = na.action) } else newdata <- if (is.vector (newdata)) t(t(newdata)) else as.matrix(newdata) newcols <- 0 newnrows <- nrow(newdata) newncols <- ncol(newdata) newco <- newncols if (oldco != newco) stop ("test vector does not match model !") if (is.list(scaling(object)) && sc != 1) newdata[,scaling(object)$scaled] <- scale(newdata[,scaling(object)$scaled, drop = FALSE], center = scaling(object)$x.scale$"scaled:center", scale = scaling(object)$x.scale$"scaled:scale" ) p <- 0 if(type == "response") { if(type(object)=="classification") { predres <- 1:newnrows votematrix <- matrix(0,nclass(object),nrows) for(i in 1:(nclass(object)-1)) { jj <- i+1 for(j in jj:nclass(object)) { p <- p+1 ret <- kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[p]],],alpha(object)[[p]]) votematrix[i,ret>0] <- votematrix[i,ret>0] + 1 votematrix[j,ret<0] <- votematrix[j,ret<0] + 1 } } predres <- sapply(predres, function(x) which.max(votematrix[,x])) } } if(type == "probabilities") { if(type(object)=="classification") { binprob <- matrix(0, newnrows, nclass(object)*(nclass(object) - 1)/2) for(i in 1:(nclass(object)-1)) { jj <- i+1 for(j in jj:nclass(object)) { p <- p+1 binprob[,p] <- 1/(1+exp(-kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[p]],],alpha(object)[[p]]))) } } ## multiprob <- sapply(1:newnrows, function(x) couple(binprob[x ,],coupler = coupler)) multiprob <- couple(binprob, coupler = coupler) } } if(type(object) == "regression") { if (type == "variance"||type == "sdeviation") { Ktest <- kernelMatrix(kernelf(object),xmatrix(object), newdata) predres <- diag(kernelMatrix(kernelf(object),newdata) - t(Ktest) %*% sol(object) %*% Ktest) if (type== "sdeviation") predres <- sqrt(predres) if (!is.null(scaling(object)$y.scale)) predres <- predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center" } else { predres <- kernelMult(kernelf(object),newdata,xmatrix(object),as.matrix(alpha(object))) if (!is.null(scaling(object)$y.scale)) predres <- predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center" } } if (is.character(lev(object))) { ##classification & probabilities : return probabilitie matrix if(type == "probabilities") { colnames(multiprob) <- lev(object) return(multiprob) } ##classification & type response: return factors if(type == "response") return(factor (lev(object)[predres], levels = lev(object))) ##classification & votes : return votematrix if(type == "votes") return(votematrix) } else ##else: return raw values return(predres) }) setMethod("show","gausspr", function(object){ cat("Gaussian Processes object of class \"gausspr\"","\n") cat(paste("Problem type:", type(object),"\n")) cat("\n") show(kernelf(object)) cat(paste("\nNumber of training instances learned :", dim(xmatrix(object))[1],"\n")) if(!is.null(fitted(object))) cat(paste("Train error :", round(error(object),9),"\n")) ##train error & loss if(cross(object)!=-1) cat("Cross validation error :",round(cross(object),9),"\n") }) solveiter <- function(B,noiseproc,b,x,itmax = 50,tol = 10e-4 ,verbose = FALSE) { ## ---------------------------- ## Preconditioned Biconjugate Gradient method ## solves linear system Ax <- b for general A ## ------------------------------------------ ## x : initial guess ## itmax : max # iterations ## iterates while mean(abs(Ax-b)) > tol ## ## Simplified form of Numerical Recipes: linbcg ## ## The preconditioned matrix is set to inv(diag(A)) ## A defined through A <- I + N*B diagA <- matrix(1,dim(B)[1],1) + colSums(B)+ diag(B)*(noiseproc-1) ## diags of A cont <- 0 iter <- 0 r <- .Amul2(x,B,noiseproc) r <- b - r rr <- r znrm <- 1 bnrm <- sqrt(sum((b)^2)) z <- r/diagA err <- sqrt(sum((.Amul2(x,B,noiseproc) - b)^2))/bnrm while (iter <= itmax){ iter <- iter + 1 zm1nrm <- znrm zz <- rr/diagA bknum<- drop(crossprod(z,rr)) if (iter == 1) { p <- z pp <- zz } else { bk <- bknum/bkden p <- bk*p + z pp <- bk*pp + zz } bkden <- bknum z <- .Amul2(p,B,noiseproc) akden <- drop(crossprod(z,pp)) ak <- bknum/akden zz <- .Amul2T(pp,B,noiseproc) x <- x + ak*p r <- r - ak*z rr <- rr - ak*zz z <- r/diagA znrm <- 1 err <- mean(abs(r)) if (err tol && counter < maxiter ) { ## Aggressively allocate memory if(counter %% BLOCKSIZE == 0) { Tktmp <- matrix(0, m, dim(Tk)[2] + BLOCKSIZE) Tktmp[1:m > 0, 1:(dim(Tk)[2] + BLOCKSIZE) <= dim(Tk)[2]] <- Tk Tk <- Tktmp Ttmp <- matrix(0, dim(T)[1]+BLOCKSIZE, BLOCKSIZE+counter) ind <- 1:(dim(T)[1]+BLOCKSIZE) <= dim(T)[1] ind2 <- 1:(BLOCKSIZE + counter) <= counter Ttmp[ind , ind2] <- T Ttmp[ind == FALSE, ind2 == FALSE] <- diag(1, BLOCKSIZE) T <- Ttmp padded.veck.tmp <- matrix(0,dim(padded.veck)[1]+BLOCKSIZE) padded.veck.tmp[1:(dim(padded.veck)[1]+BLOCKSIZE) <= dim(padded.veck)[1]] <- padded.veck padded.veck <- padded.veck.tmp pivots.tmp <- matrix(0, dim(pivots)[1]+BLOCKSIZE) pivots.tmp[1:(dim(pivots)[1] + BLOCKSIZE)<= dim(pivots)[1]] <- pivots pivots <- pivots.tmp maxresiduals.tmp <- matrix(0,dim(maxresiduals)[1]+BLOCKSIZE) maxresiduals.tmp[1:(dim(maxresiduals)[1]+BLOCKSIZE) <= dim(maxresiduals)[1]] <- maxresiduals maxresiduals <- maxresiduals.tmp if(counter == 0) t <- rep(0,BLOCKSIZE) else t <- rep(0,length(t)+BLOCKSIZE) } veck <- kernelFast(kernel, x, x[index, ,drop=FALSE],dota) if (counter == 0) { ## No need to compute t here tau <- sqrt(veck[index]) ## Update T T[1, 1] <- tau ## Compute the update for Tk update <- veck/tau } else { padded.veck[1:counter] <- veck[pivots[1:counter]] ## First compute t ## t <- t(crossprod(padded.veck,backsolve(T,diag(1,nrow=dim(T)[1])))) ## cat("T: ",dim(T), " p:",length(padded.veck),",\n") t[1:counter] <- backsolve(T, k=counter, padded.veck, transpose = TRUE) ## Now compute tau tau <- as.vector(sqrt(veck[index] - crossprod(t))) ## Update T T[1:counter, counter+1] <- t[1:counter] T[counter + 1, counter + 1] <- tau ## Compute the update for Tk update <- (1/tau) * (veck - Tk %*% t) } ## Update Tk Tk[,counter + 1] <- update ## Update diagonal residuals diag.residues <- diag.residues - update^2 ## Update pivots pivots[counter + 1] <- index ## Monitor residuals maxresiduals[counter + 1] <- residue ## Choose next candidate residue <- max( diag.residues ) index <- which.max(diag.residues) ## Update counter counter <- counter + 1 ## Report progress to the user if(counter%%blocksize == 0 && (verbose == TRUE)) cat("counter = ",counter," ", "residue = ", residue, "\n") } ## Throw away extra columns which we might have added Tk <- Tk[, 1:counter] pivots <- pivots[1:counter] maxresiduals <- maxresiduals[1:counter] return(new("inchol",.Data=Tk,pivots=pivots,diagresidues = diag.residues, maxresiduals = maxresiduals)) }) kernlab/R/csi.R0000644000176000001440000003653711304023134013027 0ustar ripleyusers## 15.09.2005 alexandros setGeneric("csi", function(x, y, kernel="rbfdot",kpar=list(sigma=0.1), rank, centering = TRUE, kappa =0.99 ,delta = 40 ,tol = 1e-4) standardGeneric("csi")) setMethod("csi",signature(x="matrix"), function(x, y, kernel="rbfdot",kpar=list(sigma=0.1), rank, centering = TRUE, kappa =0.99 ,delta = 40 ,tol = 1e-5) { ## G,P,Q,R,error1,error2,error,predicted.gain,true.gain ## INPUT ## x : data ## y : target vector n x d ## m : maximal rank ## kappa : trade-off between approximation of K and prediction of y (suggested: .99) ## centering : 1 if centering, 0 otherwise (suggested: 1) ## delta : number of columns of cholesky performed in advance (suggested: 40) ## tol : minimum gain at iteration (suggested: 1e-4) ## OUTPUT ## G : Cholesky decomposition -> K(P,P) is approximated by G*G' ## P : permutation matrix ## Q,R : QR decomposition of G (or center(G) if centering) ## error1 : tr(K-G*G')/tr(K) at each step of the decomposition ## error2 : ||y-Q*Q'*y||.F^2 / ||y||.F^2 at each step of the decomposition ## predicted.gain : predicted gain before adding each column ## true.gain : actual gain after adding each column n <- dim(x)[1] d <- dim(y)[2] if(n != dim(y)[1]) stop("Labels y and data x dont match") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") m <- rank ## make sure rank is smaller than n m <- min(n-2,m) G <- matrix(0,n,min(m+delta,n)) ## Cholesky factor diagK <- rep(drop(kernel(x[1,],x[1,])),n) P <- 1:n ## pivots Q <- matrix(0,n,min(m+delta,n)) ## Q part of the QR decomposition R <- matrix(0,min(m+delta,n),min(m+delta,n)) ## R part of the QR decomposition traceK <- sum(diagK) lambda <- (1-kappa)/traceK if (centering) y <- y - (1/n) * t(matrix(colSums(y),d,n)) sumy2 <- sum(y^2) mu <- kappa/sumy2 error1 <- traceK error2 <- sumy2 predictedgain <- truegain <- rep(0,min(m+delta,n)) k <- 0 # current index of the Cholesky decomposition kadv <- 0 # current index of the look ahead steps Dadv <- diagK D <- diagK ## makes sure that delta is smaller than n - 2 delta <- min(delta,n - 2) ## approximation cost cached quantities A1 <- matrix(0,n,1) A2 <- matrix(0,n,1) A3 <- matrix(0,n,1) GTG <- matrix(0,m+delta,m+delta) QTy <- matrix(0,m+delta,d) QTyyTQ <- matrix(0,m+delta,m+delta) ## first performs delta steps of Cholesky and QR decomposition if(delta > 0) for (i in 1:delta) { kadv <- kadv + 1 ## select best index diagmax <- Dadv[kadv] jast <- 1 for (j in 1:(n-kadv+1)) { if (Dadv[j+kadv-1] > diagmax/0.99){ diagmax <- Dadv[j+kadv-1] jast <- j } } if (diagmax < 1e-12){ kadv <- kadv - 1 ## all pivots are too close to zero, stops break ## this can only happen if the matrix has rank less than delta } else{ jast <- jast + kadv-1 ## permute indices P[c(kadv,jast)] <- P[c(jast,kadv)] Dadv[c(kadv, jast)] <- Dadv[c(jast, kadv)] D[c(kadv, jast)] <- D[c(jast, kadv)] A1[c(kadv, jast)] <- A1[c(jast, kadv)] G[c(kadv, jast),1:kadv-1] <- G[c(jast,kadv),1:kadv-1] Q[c(kadv, jast),1:kadv-1] <- Q[c(jast, kadv),1:kadv-1] ## compute new Cholesky column G[kadv,kadv] <- Dadv[kadv] G[kadv,kadv] <- sqrt(G[kadv,kadv]) newKcol <- kernelMatrix(kernel, x[P[(kadv+1):n],,drop = FALSE],x[P[kadv],,drop=FALSE]) G[(kadv+1):n,kadv]<- (1/G[kadv,kadv])*(newKcol - G[(kadv+1):n,1:kadv-1,drop=FALSE] %*% t(G[kadv,1:kadv-1,drop=FALSE])) ## update diagonal Dadv[(kadv+1):n] <- Dadv[(kadv+1):n] - G[(kadv+1):n,kadv]^2 Dadv[kadv] <- 0 ## performs QR if (centering) Gcol <- G[,kadv,drop=FALSE] - (1/n) * matrix(sum(G[,kadv]),n,1) else Gcol <- G[,kadv, drop=FALSE] R[1:kadv-1,kadv] <- crossprod(Q[,1:kadv-1, drop=FALSE], Gcol) Q[,kadv] <- Gcol - Q[,1:kadv-1,drop=FALSE] %*% R[1:kadv-1,kadv,drop=FALSE] R[kadv,kadv] <- sqrt(sum(Q[,kadv]^2)) Q[,kadv] <- Q[,kadv]/drop(R[kadv,kadv]) ## update cached quantities if (centering) GTG[1:kadv,kadv] <- crossprod(G[,1:kadv], G[,kadv]) else GTG[1:kadv,kadv] <- crossprod(R[1:kadv,1:kadv], R[1:kadv,kadv]) GTG[kadv,1:kadv] <- t(GTG[1:kadv,kadv]) QTy[kadv,] <- crossprod(Q[,kadv], y[P,,drop = FALSE]) QTyyTQ[kadv,1:kadv] <- QTy[kadv,,drop=FALSE] %*% t(QTy[1:kadv,,drop=FALSE]) QTyyTQ[1:kadv,kadv] <- t(QTyyTQ[kadv,1:kadv]) ## update costs A1[kadv:n] <- A1[kadv:n] + GTG[kadv,kadv] * G[kadv:n,kadv]^2 A1[kadv:n] <- A1[kadv:n] + 2 * G[kadv:n,kadv] *(G[kadv:n,1:kadv-1] %*% GTG[1:kadv-1,kadv,drop=FALSE]) } } ## compute remaining costs for all indices A2 <- rowSums(( G[,1:kadv,drop=FALSE] %*% crossprod(R[1:kadv,1:kadv], QTy[1:kadv,,drop=FALSE]))^2) A3 <- rowSums((G[,1:kadv,drop=FALSE] %*% t(R[1:kadv,1:kadv]))^2) ## start main loop while (k < m){ k <- k +1 ## compute the gains in approximation for all remaining indices dJK <- matrix(0,(n-k+1),1) for (i in 1:(n-k+1)) { kast <- k+i-1 if (D[kast] < 1e-12) dJK[i] <- -1e100 ## this column is already generated by already ## selected columns -> cannot be selected else { dJK[i] <- A1[kast] if (kast > kadv) ## add eta dJK[i] <- dJK[i] + D[kast]^2 - (D[kast] - Dadv[kast])^2 dJK[i] <- dJK[i] / D[kast] } } dJy <- matrix(0,n-k+1,1) if (kadv > k){ for (i in 1:(n-k+1)) { kast <- k+i-1 if (A3[kast] < 1e-12) dJy[i] <- 0 else dJy[i] <- A2[kast] / A3[kast] } } ## select the best column dJ <- lambda * dJK + mu * dJy diagmax <- -1 jast <- 0 for (j in 1:(n-k+1)) { if (D[j+k-1] > 1e-12) if (dJ[j] > diagmax/0.9){ diagmax <- dJ[j] jast <- j } } if (jast==0) { ## no more good indices, exit k <- k-1 break } jast <- jast + k - 1 predictedgain[k] <- diagmax ## performs one cholesky + QR step: ## if new pivot not already selected, use pivot ## otherwise, select new look ahead index that maximize Dadv if (jast > kadv){ newpivot <- jast jast <- kadv + 1 } else{ a <- 1e-12 b <- 0 for (j in 1:(n-kadv)) { if (Dadv[j+kadv] > a/0.99){ a <- Dadv[j+kadv] b <- j+kadv } } if (b==0) newpivot <- 0 else newpivot <- b } if (newpivot > 0){ ## performs steps kadv <- kadv + 1 ## permute P[c(kadv, newpivot)] <- P[c(newpivot, kadv)] Dadv[c(kadv, newpivot)] <- Dadv[c(newpivot, kadv)] D[c(kadv, newpivot)] <- D[c(newpivot, kadv)] A1[c(kadv, newpivot)] <- A1[c(newpivot, kadv)] A2[c(kadv, newpivot)] <- A2[c(newpivot, kadv)] A3[c(kadv, newpivot)] <- A3[c(newpivot, kadv)] G[c(kadv, newpivot),1:kadv-1] <- G[c(newpivot, kadv),1:kadv-1] Q[c(kadv, newpivot),1:kadv-1] <- Q[ c(newpivot, kadv),1:kadv-1] ## compute new Cholesky column G[kadv,kadv] <- Dadv[kadv] G[kadv,kadv] <- sqrt(G[kadv,kadv]) newKcol <- kernelMatrix(kernel,x[P[(kadv+1):n],,drop=FALSE],x[P[kadv],,drop=FALSE]) G[(kadv+1):n,kadv] <- 1/G[kadv,kadv]*( newKcol - G[(kadv+1):n,1:kadv-1,drop=FALSE]%*%t(G[kadv,1:kadv-1,drop=FALSE])) ## update diagonal Dadv[(kadv+1):n] <- Dadv[(kadv+1):n] - G[(kadv+1):n,kadv]^2 Dadv[kadv] <- 0 ## performs QR if (centering) Gcol <- G[,kadv,drop=FALSE] - 1/n * matrix(sum(G[,kadv]),n,1 ) else Gcol <- G[,kadv,drop=FALSE] R[1:kadv-1,kadv] <- crossprod(Q[,1:kadv-1], Gcol) Q[,kadv] <- Gcol - Q[,1:kadv-1, drop=FALSE] %*% R[1:kadv-1,kadv, drop=FALSE] R[kadv,kadv] <- sum(abs(Q[,kadv])^2)^(1/2) Q[,kadv] <- Q[,kadv] / drop(R[kadv,kadv]) ## update the cached quantities if (centering) GTG[k:kadv,kadv] <- crossprod(G[,k:kadv], G[,kadv]) else GTG[k:kadv,kadv] <- crossprod(R[1:kadv,k:kadv], R[1:kadv,kadv]) GTG[kadv,k:kadv] <- t(GTG[k:kadv,kadv]) QTy[kadv,] <- crossprod(Q[,kadv], y[P,,drop =FALSE]) QTyyTQ[kadv,k:kadv] <- QTy[kadv,,drop = FALSE] %*% t(QTy[k:kadv,,drop = FALSE]) QTyyTQ[k:kadv,kadv] <- t(QTyyTQ[kadv,k:kadv]) ## update costs A1[kadv:n] <- A1[kadv:n] + GTG[kadv,kadv] * G[kadv:n,kadv]^2 A1[kadv:n] <- A1[kadv:n] + 2 * G[kadv:n,kadv] * (G[kadv:n,k:kadv-1,drop = FALSE] %*% GTG[k:kadv-1,kadv,drop=FALSE]) A3[kadv:n] <- A3[kadv:n] + G[kadv:n,kadv]^2 * sum(R[k:kadv,kadv]^2) temp <- crossprod(R[k:kadv,kadv,drop = FALSE], R[k:kadv,k:kadv-1,drop = FALSE]) A3[kadv:n] <- A3[kadv:n] + 2 * G[kadv:n,kadv] * (G[kadv:n,k:kadv-1] %*% t(temp)) temp <- crossprod(R[k:kadv,kadv,drop = FALSE], QTyyTQ[k:kadv,k:kadv,drop = FALSE]) temp1 <- temp %*% R[k:kadv,kadv,drop = FALSE] A2[kadv:n] <- A2[kadv:n] + G[kadv:n,kadv,drop = FALSE]^2 %*% temp1 temp2 <- temp %*% R[k:kadv,k:kadv-1] A2[kadv:n] <- A2[kadv:n] + 2 * G[kadv:n,kadv] * (G[kadv:n,k:kadv-1,drop=FALSE] %*% t(temp2)) } ## permute pivots in the Cholesky and QR decomposition between p,q p <- k q <- jast if (p < q){ ## store some quantities Gbef <- G[,p:q] Gbeftotal <- G[,k:kadv] GTGbef <- GTG[p:q,p:q] QTyyTQbef <- QTyyTQ[p:q,k:kadv] Rbef <- R[p:q,p:q] Rbeftotal <- R[k:kadv,k:kadv] tempG <- diag(1,q-p+1,q-p+1) tempQ <- diag(1,q-p+1,q-p+1) for (s in seq(q-1,p,-1)) { ## permute indices P[c(s, s+1)] <- P[c(s+1, s)] Dadv[c(s, s+1)] <- Dadv[c(s+1, s)] D[c(s, s+1)] <- D[c(s+1, s)] A1[c(s, s+1)] <- A1[c(s+1, s)] A2[c(s, s+1)] <- A2[c(s+1, s)] A3[c(s, s+1)] <- A3[c(s+1, s)] G[c(s, s+1),1:kadv] <- G[c(s+1,s), 1:kadv] Gbef[c(s, s+1),] <- Gbef[c(s+1, s),] Gbeftotal[c(s, s+1),] <- Gbeftotal[c(s+1, s),] Q[c(s, s+1),1:kadv] <- Q[c(s+1, s) ,1:kadv] ## update decompositions res <- .qr2(t(G[s:(s+1),s:(s+1)])) Q1 <- res$Q R1 <- res$R G[,s:(s+1)] <- G[,s:(s+1)] %*% Q1 G[s,(s+1)] <- 0 R[1:kadv,s:(s+1)] <- R[1:kadv,s:(s+1)] %*% Q1 res <- .qr2(R[s:(s+1),s:(s+1)]) Q2 <- res$Q R2 <- res$R R[s:(s+1),1:kadv] <- crossprod(Q2, R[s:(s+1),1:kadv]) Q[,s:(s+1)] <- Q[,s:(s+1)] %*% Q2 R[s+1,s] <- 0 ## update relevant quantities if( k <= (s-1) && s+2 <= kadv) nonchanged <- c(k:(s-1), (s+2):kadv) if( k <= (s-1) && s+2 > kadv) nonchanged <- k:(s-1) if( k > (s-1) && s+2 <= kadv) nonchanged <- (s+2):kadv GTG[nonchanged,s:(s+1)] <- GTG[nonchanged,s:(s+1)] %*% Q1 GTG[s:(s+1),nonchanged] <- t(GTG[nonchanged,s:(s+1)]) GTG[s:(s+1),s:(s+1)] <- crossprod(Q1, GTG[s:(s+1),s:(s+1)] %*% Q1) QTy[s:(s+1),] <- crossprod(Q2, QTy[s:(s+1),]) QTyyTQ[nonchanged,s:(s+1)] <- QTyyTQ[nonchanged,s:(s+1)] %*% Q2 QTyyTQ[s:(s+1),nonchanged] <- t(QTyyTQ[nonchanged,s:(s+1)]) QTyyTQ[s:(s+1),s:(s+1)] <- crossprod(Q2, QTyyTQ[s:(s+1),s:(s+1)] %*% Q2) tempG[,(s-p+1):(s-p+2)] <- tempG[,(s-p+1):(s-p+2)] %*% Q1 tempQ[,(s-p+1):(s-p+2)] <- tempQ[,(s-p+1):(s-p+2)] %*% Q2 } ## update costs tempG <- tempG[,1] tempGG <- GTGbef %*% tempG A1[k:n] <- A1[k:n] - 2 * G[k:n,k] * (Gbef[k:n,] %*% tempGG) # between p and q -> different if(k > (p-1) ) kmin <- 0 else kmin <- k:(p-1) if((q+1) > kadv) qmin <- 0 else qmin <- (q+1):kadv A1[k:n] <- A1[k:n] - 2 * G[k:n,k] * (G[k:n,kmin,drop=FALSE] %*% GTG[kmin,k,drop=FALSE]) # below p A1[k:n] <- A1[k:n] - 2 * G[k:n,k] * (G[k:n,qmin,drop=FALSE] %*% GTG[qmin,k,drop=FALSE]) # above q tempQ <- tempQ[,1] temp <- G[k:n,qmin,drop=FALSE] %*% t(R[k,qmin,drop=FALSE]) temp <- temp + G[k:n,kmin,drop=FALSE] %*% t(R[k,kmin,drop=FALSE]) temp <- temp + Gbef[k:n,] %*% crossprod(Rbef, tempQ) A3[k:n] <- A3[k:n] - temp^2 A2[k:n] <- A2[k:n] + temp^2 * QTyyTQ[k,k] temp2 <- crossprod(tempQ,QTyyTQbef) %*% Rbeftotal A2[k:n] <- A2[k:n] - 2 * temp * (Gbeftotal[k:n,,drop=FALSE] %*% t(temp2)) } else { ## update costs A1[k:n] <- A1[k:n] - 2 * G[k:n,k] * (G[k:n,k:kadv,drop=FALSE] %*% GTG[k:kadv,k,drop=FALSE]) A3[k:n]<- A3[k:n] - (G[k:n,k:kadv,drop=FALSE] %*% t(R[k,k:kadv,drop=FALSE]))^2 temp <- G[k:n,k:kadv,drop=FALSE] %*% t(R[k,k:kadv,drop=FALSE]) A2[k:n] <- A2[k:n] + (temp^2) * QTyyTQ[k,k] temp2 <- QTyyTQ[k,k:kadv,drop=FALSE] %*% R[k:kadv,k:kadv,drop=FALSE] A2[k:n] <- A2[k:n] - 2 * temp * (G[k:n,k:kadv,drop=FALSE] %*% t(temp2)) } ## update diagonal and other quantities (A1,B1) D[(k+1):n] <- D[(k+1):n] - G[(k+1):n,k]^2 D[k] <- 0 A1[k:n] <- A1[k:n] + GTG[k,k] * (G[k:n,k]^2) ## compute errors and true gains temp2 <- crossprod(Q[,k], y[P,]) temp2 <- sum(temp2^2) temp1 <- sum(G[,k]^2) truegain[k] <- temp1 * lambda + temp2 * mu error1[k+1] <- error1[k] - temp1 error2[k+1] <- error2[k] - temp2 if (truegain[k] < tol) break } ## reduce dimensions of decomposition G <- G[,1:k,drop=FALSE] Q <- Q[,1:k,drop=FALSE] R <- R[1:k,1:k,drop=FALSE] ## compute and normalize errors error <- lambda * error1 + mu * error2 error1 <- error1 / traceK error2 <- error2 / sumy2 repivot <- sort(P, index.return = TRUE)$ix return(new("csi",.Data=G[repivot, ,drop=FALSE],Q= Q[repivot,,drop = FALSE], R = R, pivots=repivot, diagresidues = error1, maxresiduals = error2, truegain = truegain, predgain = predictedgain)) }) ## I guess we can replace this with qr() .qr2 <- function(M) { ## QR decomposition for 2x2 matrices Q <- matrix(0,2,2) R <- matrix(0,2,2) x <- sqrt(M[1,1]^2 + M[2,1]^2) R[1,1] <- x Q[,1] <- M[,1]/x R[1,2] <- crossprod(Q[,1], M[,2]) Q[,2] <- M[,2] - R[1,2] * Q[,1] R[2,2] <- sum(abs(Q[,2])^2)^(1/2) Q[,2] <- Q[,2] / R[2,2] return(list(Q=Q,R=R)) } kernlab/R/lssvm.R0000644000176000001440000005505412560414652013426 0ustar ripleyusers## reduced least squares support vector machines ## author : alexandros setGeneric("lssvm", function(x, ...) standardGeneric("lssvm")) setMethod("lssvm",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m$scaled <- NULL m[[1]] <- as.name("model.frame") m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 ## no intercept x <- model.matrix(Terms, m) y <- model.extract(m, "response") if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- lssvm(x, y, scaled = scaled, ...) kcall(ret) <- cl attr(Terms,"intercept") <- 0 ## no intercept terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("lssvm",signature(x="vector"), function(x,...) { x <- t(t(x)) ret <- lssvm(x, ...) return(ret) }) setMethod("lssvm",signature(x="matrix"), function (x, y, scaled = TRUE, kernel = "rbfdot", kpar = "automatic", type = NULL, tau = 0.01, reduced = TRUE, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, ## prob.model = FALSE, cross = 0, fit = TRUE, ..., subset, na.action = na.omit) { ## subsetting and na-handling for matrices ret <- new("lssvm") if (!missing(subset)) x <- x[subset,] df <- unique(na.action(data.frame(y, x))) y <- df[,1] x <- as.matrix(df[,-1]) n.action(ret) <- na.action if(!is.null(type)) type(ret) <- match.arg(type,c("classification","regression")) if (is.null(type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- type ## scaling, subsetting, and NA handling x.scale <- y.scale <- NULL ## scaling if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")] } } ncols <- ncol(x) m <- nrows <- nrow(x) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","matrix")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(lssvm(as.kernelMatrix(x), y = y,type = NULL, tau = 0.01, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ...)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(type(ret)=="classification") { if (!is.vector(y) && !is.factor (y)) stop("y must be a vector or a factor.") if(is(y,"vector")) { y <- as.matrix(y) if (nrows != nrow(y)) stop("x and y don't match.") } if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (nrows != length(y)) stop("x and y don't match.") } else if (is.numeric(y)) { y <- as.integer(y) lev(ret) <- unique (y) } else stop ("dependent variable has to be of factor or integer type for classification mode.") ## initialize nclass(ret) <- length (unique(y)) p <- 0 svindex <- NULL ## create multidimensional y matrix yind <- t(matrix(1:nclass(ret),nclass(ret),m)) ymat <- matrix(0, m, nclass(ret)) ymat[yind==y] <- 1 if(reduced == FALSE) { K <- kernelMatrix(kernel,x) KP <- K - (1/m)*colSums(K) beta <- solve((KP%*%K + m * tau * K), KP%*%ymat) b <- colMeans(ymat) - colMeans(K%*%beta) alphaindex(ret) <- 1:m } else { G <- csi(x, ymat, rank = rank ,kernel= kernel, delta = delta , tol = tol) rep <- sort(pivots(G),index.return=TRUE)$ix G <- G[rep,] GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat[rep,,drop=FALSE] beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alphaindex(ret) <- rep[1:dim(G)[2]] } alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau ## calculate class prob. ## if (prob.model& reduced== TRUE) # warning("Class Probapilities not supported for reduced model.) ## if(prob.model & reduced == FALSE) ## { ## pos <- as.vector(ymat)==1 ## neg <- as.vector(ymat)==-1 ## ones <- rep(1,dim(x)[1]) ## onesneg <- ones[pos] <- 0 ## ones <- rep(1,dim(x)[1]) ## onespos <- ones[neg] <- 0 ##Kpos <- kernelMult(kernel,x,x[pos,],rep(1,sum(pos))) ##Kneg <- kernelMult(kernel,x,x[neg,],rep(1,sum(neg))) ## Kpos <- K[,pos]%*%rep(1,sum(pos)) ## Kneg <- K[,neg]%*%rep(1,sum(neg)) ## classmeans <- c(sum( Kpos * coef(ret)[pos] * as.vector(ymat)[pos]),sum( Kneg * coef(ret)[pos] * as.vector(ymat)[pos])) ## kneg <- K%*%onesneg ## kpos <- K%*%onespos ## M <- (diag(dim(x)[1])- (1/dim(x)[1])*rep(1,dim(x)[1])%*%t(rep(1,dim(x)[1]))) ## kcentered <- M%*%solve(diag(dim(x)[1]) - tau*M%*%K%*%M)%*%M ## prob.model(ret) <- list(Kpos=Kpos, Kneg=Kneg, kcentered=kcentered, classmeans=classmeans) ## } } if(type(ret)=="regression") { if (nrows != nrow(x)) stop("x and y don't match.") ## initialize p <- 0 svindex <- NULL ymat <- y G <- csi(x, ymat, rank = rank ,kernel= kernel, delta = delta , tol = tol) GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict alphaindex(ret) <- pivots(G)[1:dim(G)[2]] ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } kcall(ret) <- match.call() kernelf(ret) <- kernel ## param(ret) <- list(C=C, nu = nu, epsilon = epsilon) xmatrix(ret) <- x[alphaindex(ret),,drop = FALSE] ymatrix(ret) <- y nSV(ret) <- length(svindex) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, x) else NA scaling(ret) <- list(scaled = scaled, x.scale = x.scale) if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- lssvm(x[cind,],y[cind],type = type(ret),kernel=kernel,kpar = NULL,reduced = reduced, tau=tau, tol=tol, rank = floor(rank/cross), delta = floor(delta/cross), scaled=FALSE, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } cross(ret) <- cerror } return(ret) }) ## kernelMatrix interface setMethod("lssvm",signature(x="kernelMatrix"), function (x, y, type = NULL, tau = 0.01, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ...) { ## subsetting and na-handling for matrices ret <- new("lssvm") if(!is.null(type)) type(ret) <- match.arg(type,c("classification","regression")) if (is.null(type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- type ncols <- ncol(x) m <- nrows <- nrow(x) if(type(ret)=="classification") { if (!is.vector(y) && !is.factor (y)) stop("y must be a vector or a factor.") if (is(y,"vector")) { y <- as.matrix(y) if (nrows != nrow(y)) stop("x and y don't match.")} if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (nrows != length(y)) stop("x and y don't match.") } else if (is.numeric(y)) { y <- as.integer(y) lev(ret) <- unique (y) } else stop ("dependent variable has to be of factor or integer type for classification mode.") ## initialize nclass(ret) <- length (unique(y)) p <- 0 svindex <- NULL ## create multidimensional y matrix yind <- t(matrix(1:nclass(ret),nclass(ret),m)) ymat <- matrix(0, m, nclass(ret)) ymat[yind==y] <- 1 KP <- x - (1/m)*colSums(x) beta <- solve((KP%*%x + m * tau * x), KP%*%ymat) b <- colMeans(ymat) - colMeans(x%*%beta) alphaindex(ret) <- 1:m alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } if(type(ret)=="regression") { if (nrows != nrow(x)) stop("x and y don't match.") ## initialize p <- 0 svindex <- NULL ymat <- y G <- csi(x, ymat, rank = rank , delta = delta , tol = tol) GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat[pivots(G),,drop=FALSE] beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict alphaindex(ret) <- pivots(G)[1:dim(G)[2]] ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } kcall(ret) <- match.call() ## param(ret) <- list(C=C, nu = nu, epsilon = epsilon) xmatrix(ret) <- x ymatrix(ret) <- y kernelf(ret) <- "Kernel matrix used for training." nSV(ret) <- length(svindex) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, x) else NA if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- lssvm(x[cind,cind],y[cind],type = type(ret), tau=tau, rank = floor(rank/cross), delta = floor(delta/cross), cross = 0, fit = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind,drop = FALSE][,svindex,drop=FALSE])) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } cross(ret) <- cerror } return(ret) }) ## list interface setMethod("lssvm",signature(x="list"), function (x, y, scaled = TRUE, kernel = "stringdot", kpar = list(length=4, lambda = 0.5), type = NULL, tau = 0.01, reduced = TRUE, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ..., subset) { ## subsetting and na-handling for matrices ret <- new("lssvm") if (!missing(subset)) x <- x[subset] if(!is.null(type)) type(ret) <- match.arg(type,c("classification","regression")) if (is.null(type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- type m <- nrows <- length(x) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","stringdot")) if(is.character(kpar)) if(kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot" || kernel == "rbfdot" || kernel == "laplacedot" ) { stop("List interface supports only the stringdot kernel.") } } if(is(kernel,"kernel")) if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(type(ret)=="classification") { if (!is.vector(y) && !is.factor (y)) stop("y must be a vector or a factor.") if (nrows != nrow(x)) stop("x and y don't match.") if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) } else if (is.numeric(y)) { y <- as.integer(y) lev(ret) <- unique (y) } else stop ("dependent variable has to be of factor or integer type for classification mode.") ## initialize nclass(ret) <- length (unique(y)) p <- 0 svindex <- NULL ## create multidimensional y matrix yind <- t(matrix(1:nclass(ret),nclass(ret),m)) ymat <- matrix(0, m, nclass(ret)) ymat[yind==y] <- 1 if(reduced == FALSE) { K <- kernelMatrix(kernel,x) KP <- K - (1/m)*colSums(K) beta <- solve((KP%*%K + m * tau * K), KP%*%ymat) b <- colMeans(ymat) - colMeans(K%*%beta) alphaindex(ret) <- 1:m } else { G <- csi(x, ymat, rank = rank ,kernel= kernel, delta = delta , tol = tol) GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat[pivots(G),,drop=FALSE] beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alphaindex(ret) <- pivots(G)[1:dim(G)[2]] } alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } if(type(ret)=="regression") { if (nrows != nrow(x)) stop("x and y don't match.") ## initialize p <- 0 svindex <- NULL ymat <- y G <- csi(x, ymat, rank = rank ,kernel= kernel, delta = delta , tol = tol) GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat[pivots(G),,drop=FALSE] beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict alphaindex(ret) <- pivots(G)[1:dim(G)[2]] ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } kcall(ret) <- match.call() kernelf(ret) <- kernel ## param(ret) <- list(C=C, nu = nu, epsilon = epsilon) xmatrix(ret) <- x[alphaindex(ret)] ymatrix(ret) <- y SVindex(ret) <- svindex nSV(ret) <- length(svindex) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, x) else NA if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- lssvm(x[cind,],y[cind],type = type(ret),kernel=kernel,kpar = NULL,reduced = reduced, tau=tau, tol=tol, rank = floor(rank/cross), delta = floor(delta/cross), scaled=FALSE, cross = 0, fit = FALSE ) cres <- predict(cret, x[vgr[[i]],]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } cross(ret) <- cerror } return(ret) }) #**************************************************************# setMethod("predict", signature(object = "lssvm"), function (object, newdata, type = "response", coupler = "minpair") { sc <- 0 type <- match.arg(type,c("response","probabilities","decision")) if (missing(newdata) && type!="response") return(fitted(object)) else if(missing(newdata)) { newdata <- xmatrix(object) sc <- 1 } ncols <- ncol(xmatrix(object)) nrows <- nrow(xmatrix(object)) oldco <- ncols if (!is.null(terms(object))) { if(!is.matrix(newdata)) newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = n.action(object)) } else newdata <- if (is.vector(newdata)) t(t(newdata)) else as.matrix(newdata) newcols <- 0 newnrows <- nrow(newdata) newncols <- ncol(newdata) newco <- newncols if (oldco != newco) stop ("test vector does not match model !") p<-0 if (!is.null(scaling(object)$x.scale) && sc != 1) newdata[,scaling(object)$scaled] <- scale(newdata[,scaling(object)$scaled, drop = FALSE], center = scaling(object)$x.scale$"scaled:center", scale = scaling(object)$x.scale$"scaled:scale" ) if(is(newdata,"kernelMatrix")) res <- newdata %*% coef(object) - b(object) else res <- t(t(kernelMult(kernelf(object), newdata,xmatrix(object), alpha(object))) + b(object)) if(type == "response" && type(object)=="classification"){ predres <- max.col(res) return(factor (lev(object)[predres], levels = lev(object))) } if (type == "decision" || type(object)=="regression") return(res) if (type =="probabilities" && type(object)=="classification") { res - prob.model(object)$classmeans return(res) } }) #****************************************************************************************# setMethod("show","lssvm", function(object){ cat("Least Squares Support Vector Machine object of class \"lssvm\"","\n") cat("\n") cat(paste("problem type :",type(object), "\n")) cat(paste(" parameter : tau =",param(object)$tau, "\n")) cat("\n") show(kernelf(object)) cat(paste("\nNumber of data points used for training :", nSV(object),"\n")) if(!is.null(fitted(object))) cat(paste("Training error :", round(error(object),6),"\n")) if(cross(object)!= -1) cat("Cross validation error :",round(cross(object),6),"\n") }) ##.partopro <- function(z,s,m){ ##return(2*pi*(1/sqrt((1/z)+s^2))*exp(-(m^2)/(2*((1/z)+s^2)))) ##} kernlab/R/kcca.R0000644000176000001440000000451012105726255013152 0ustar ripleyusers## Simple kernel canonical corelation analysis ## author: alexandros karatzoglou setGeneric("kcca",function(x, y, kernel="rbfdot", kpar=list(sigma = 0.1), gamma=0.1, ncomps = 10, ...) standardGeneric("kcca")) setMethod("kcca", signature(x = "matrix"), function(x,y,kernel="rbfdot",kpar=list(sigma=0.1), gamma=0.1, ncomps =10, ...) { x <- as.matrix(x) y <- as.matrix(y) if(!(nrow(x)==nrow(y))) stop("Number of rows in x, y matrixes is not equal") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") Kx <- kernelMatrix(kernel,x) Ky <- kernelMatrix(kernel,y) n <- dim(Kx)[1] m <- 2 ## Generate LH VK <- matrix(0,n*2,n); VK[0:n,] <- Kx VK[(n+1):(2*n),] <- Ky LH <- tcrossprod(VK, VK) for (i in 1:m) LH[((i-1)*n+1):(i*n),((i-1)*n+1):(i*n)] <- 0 ## Generate RH RH <- matrix(0,n*m,n*m) RH[1:n,1:n] <- (Kx + diag(rep(gamma,n)))%*%Kx + diag(rep(1e-6,n)) RH[(n+1):(2*n),(n+1):(2*n)] <- (Ky + diag(rep(gamma,n)))%*%Ky + diag(rep(1e-6,n)) RH <- (RH+t(RH))/2 ei <- .gevd(LH,RH) ret <- new("kcca") kcor(ret) <- as.double(ei$gvalues[1:ncomps]) xcoef(ret) <- matrix(as.double(ei$gvectors[1:n,1:ncomps]),n) ycoef(ret) <- matrix(as.double(ei$gvectors[(n+1):(2*n),1:ncomps]),n) ## xvar(ret) <- rotated(xpca) %*% cca$xcoef ## yvar(ret) <- rotated(ypca) %*% cca$ycoef return(ret) }) ## gevd compute the generalized eigenvalue ## decomposition for (a,b) .gevd<-function(a,b=diag(nrow(a))) { bs<-.mfunc(b,function(x) .ginvx(sqrt(x))) ev<-eigen(bs%*%a%*%bs) return(list(gvalues=ev$values,gvectors=bs%*%ev$vectors)) } ## mfunc is a helper to compute matrix functions .mfunc<-function(a,fn=sqrt) { e<-eigen(a); y<-e$vectors; v<-e$values return(tcrossprod(y%*%diag(fn(v)),y)) } ## ginvx is a helper to compute reciprocals .ginvx<-function(x) {ifelse(x==0,0,1/x)} kernlab/R/kmmd.R0000644000176000001440000002030012560371302013166 0ustar ripleyusers## calculates the kernel maximum mean discrepancy for samples from two distributions ## author: alexandros karatzoglou setGeneric("kmmd",function(x,...) standardGeneric("kmmd")) setMethod("kmmd", signature(x = "matrix"), function(x, y, kernel="rbfdot",kpar="automatic", alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 150, frac = 1, ...) { x <- as.matrix(x) y <- as.matrix(y) res <- new("kmmd") if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","matrix")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(kmmd(x= as.kernelMatrix(x), y = y, Kxy = as.kernelMatrix(x)%*%y, alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 100, frac = 1, ...)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=sigest(rbind(x,y),scaled=FALSE)[2]) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") m <- dim(x)[1] n <- dim(y)[1] N <- max(m,n) M <- min(m,n) Kxx <- kernelMatrix(kernel,x) Kyy <- kernelMatrix(kernel,y) Kxy <- kernelMatrix(kernel,x,y) resmmd <- .submmd(Kxx, Kyy, Kxy, alpha) H0(res) <- (resmmd$mmd1 > resmmd$D1) Radbound(res) <- resmmd$D1 Asymbound(res) <- 0 mmdstats(res)[1] <- resmmd$mmd1 mmdstats(res)[2] <- resmmd$mmd3 if(asymptotic){ boundA <- .submmd3bound(Kxx, Kyy, Kxy, alpha, frac, ntimes, replace) AsympH0(res) <- (resmmd$mmd3 > boundA) Asymbound(res) <- boundA } kernelf(res) <- kernel return(res) }) setMethod("kmmd",signature(x="list"), function(x, y, kernel="stringdot",kpar=list(type="spectrum",length=4), alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 150, frac = 1, ...) { if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") Kxx <- kernelMatrix(kernel,x) Kyy <- kernelMatrix(kernel,y) Kxy <- kernelMatrix(kernel,x,y) ret <- kmmd(x=Kxx,y = Kyy,Kxy=Kxy, alpha=alpha, asymptotic= asymptotic, replace = replace, ntimes = ntimes, frac= frac) kernelf(ret) <- kernel return(ret) }) setMethod("kmmd",signature(x="kernelMatrix"), function (x, y, Kxy, alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 100, frac = 1, ...) { res <- new("kmmd") resmmd <- .submmd(x, y, Kxy, alpha) H0(res) <- (resmmd$mmd1 > resmmd$D1) Radbound(res) <- resmmd$D1 Asymbound(res) <- 0 mmdstats(res)[1] <- resmmd$mmd1 mmdstats(res)[2] <- resmmd$mmd3 if(asymptotic){ boundA <- .submmd3bound(x, y, Kxy, alpha, frac, ntimes, replace) AsympH0(res) <- (resmmd$mmd1 > boundA) Asymbound(res) <- boundA } kernelf(res) <- " Kernel matrix used as input." return(res) }) .submmd <- function(Kxx,Kyy, Kxy, alpha) { m <- dim(Kxx)[1] n <- dim(Kyy)[1] N <- max(m,n) M <- min(m,n) sumKxx <- sum(Kxx) if(m!=n) sumKxxM <- sum(Kxx[1:M,1:M]) else sumKxxM <- sumKxx dgxx <- diag(Kxx) sumKxxnd <- sumKxx - sum(dgxx) R <- max(dgxx) RM <- max(dgxx[1:M]) hu <- colSums(Kxx[1:M,1:M]) - dgxx[1:M] sumKyy <- sum(Kyy) if(m!=n) sumKyyM <- sum(Kyy[1:M,1:M]) else sumKyyM <- sumKyy dgyy <- diag(Kyy) sumKyynd <- sum(Kyy) - sum(dgyy) R <- max(R,dgyy) RM <- max(RM,dgyy[1:M]) # RM instead of R in original hu <- hu + colSums(Kyy[1:M,1:M]) - dgyy[1:M] sumKxy <- sum(Kxy) if (m!=n) sumKxyM <- sum(Kxy[1:M,1:M]) else sumKxyM <- sumKxy dg <- diag(Kxy) # up to M only hu <- hu - colSums(Kxy[1:M,1:M]) - colSums(t(Kxy[1:M,1:M])) + 2*dg # one sided sum mmd1 <- sqrt(max(0,sumKxx/(m*m) + sumKyy/(n*n) - 2/m/n* sumKxy)) mmd3 <- sum(hu)/M/(M-1) D1 <- 2*sqrt(RM/M)+sqrt(log(1/alpha)*4*RM/M) return(list(mmd1=mmd1,mmd3=mmd3,D1=D1)) } .submmd3bound <- function(Kxx,Kyy, Kxy, alpha, frac, ntimes, replace) { ## implements the bootstrapping approach to the MMD3 bound by shuffling ## the kernel matrix ## frac : fraction of data used for bootstrap ## ntimes : how many times MMD is to be evaluated m <- dim(Kxx)[1] n <- dim(Kyy)[1] M <- min(m,n) N <- max(m,n) poslabels <- 1:m neglabels <- (m+1):(m+n) ## bootstrap bootmmd3 <- rep(0,ntimes) for (i in 1:ntimes) { nsamples <- ceiling(frac*min(m,n)) xinds <- sample(1:m,nsamples,replace=replace) yinds <- sample(1:n,nsamples,replace=replace) newlab <- c(poslabels[xinds],neglabels[yinds]) samplenew <- sample(newlab, length(newlab), replace=FALSE) xinds <- samplenew[1:nsamples] yinds <- samplenew[(nsamples+1):length(samplenew)] newm <- length(xinds) newn <- length(yinds) newM <- min(newm,newn) ##get new kernel matrices (without concat to big matrix to save memory) xind1 <- xinds[xinds<=m] xind2 <- xinds[xinds>m]- m yind1 <- yinds[yinds<=m] yind2 <- yinds[yinds>m]-m ##Kxx (this should be implemented with kernelMult for memory efficiency) nKxx <- rbind(cbind(Kxx[xind1,xind1],Kxy[xind1,xind2]), cbind(t(Kxy[xind1,xind2]),Kyy[xind2,xind2])) dgxx <- diag(nKxx) hu <- colSums(nKxx[1:newM,1:newM]) - dgxx[1:newM] # one sided sum rm(nKxx) #Kyy nKyy <- rbind(cbind(Kxx[yind1,yind1],Kxy[yind1,yind2]), cbind(t(Kxy[yind1,yind2]), Kyy[yind2,yind2])) dgyy <- diag(nKyy) hu <- hu + colSums(nKyy[1:newM,1:newM]) - dgyy[1:newM] rm(nKyy) ## Kxy nKxy <- rbind(cbind(Kxx[yind1,xind1],Kxy[yind1,xind2]), cbind(t(Kxy[xind1,yind2]),Kyy[yind2,xind2])) dg <- diag(nKxy) hu <- hu - colSums(nKxy[1:newM,1:newM]) - colSums(t(nKxy[1:newM,1:newM])) + 2*dg rm(nKxy) ## now calculate mmd3 bootmmd3[i] <- sum(hu)/newM/(newM-1) } bootmmd3 <- sort(bootmmd3, decreasing=TRUE); aind <- floor(alpha*ntimes) ## better less than too much (-> floor); ## take threshold in between aind and the next smaller value: bound <- sum(bootmmd3[c(aind,aind+1)])/2; return(bound) } setMethod("show","kmmd", function(object){ cat("Kernel Maximum Mean Discrepancy object of class \"kmmd\"","\n","\n") show(kernelf(object)) if(is.logical(object@H0)){ cat("\n") cat("\n","H0 Hypothesis rejected : ", paste(H0(object))) cat("\n","Rademacher bound : ", paste(Radbound(object))) } cat("\n") if(Asymbound(object)!=0){ cat("\n","H0 Hypothesis rejected (based on Asymptotic bound): ", paste(AsympH0(object))) cat("\n","Asymptotic bound : ", paste(Asymbound(object))) } cat("\n","1st and 3rd order MMD Statistics : ", paste( mmdstats(object))) cat("\n") }) kernlab/R/kqr.R0000644000176000001440000002445112560414652013054 0ustar ripleyuserssetGeneric("kqr", function(x, ...) standardGeneric("kqr")) setMethod("kqr",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m[[1]] <- as.name("model.frame") m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 x <- model.matrix(Terms, m) y <- model.extract(m, "response") if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- kqr(x, y, scaled = scaled, ...) kcall(ret) <- cl terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("kqr",signature(x="vector"), function(x,...) { x <- t(t(x)) ret <- kqr(x, ...) ret }) setMethod("kqr",signature(x="matrix"), function (x, y, scaled = TRUE, tau = 0.5, C = 0.1, kernel = "rbfdot", kpar = "automatic", reduced = FALSE, rank = dim(x)[1]/6, fit = TRUE, cross = 0, na.action = na.omit) { if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1") ret <- new("kqr") param(ret) <- list(C = C, tau = tau) if (is.null(y)) x <- na.action(x) else { df <- na.action(data.frame(y, x)) y <- df[,1] x <- as.matrix(df[,-1]) } ncols <- ncol(x) m <- nrows <- nrow(x) tmpsc <- NULL x.scale <- y.scale <- NULL ## scaling if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")] y <- scale(y) y.scale <- attributes(y)[c("scaled:center","scaled:scale")] y <- as.vector(y) tmpsc <- list(scaled = scaled, x.scale = x.scale,y.scale = y.scale) } } ## Arrange all the kernel mambo jumpo if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot")) if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE,frac=1)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") ## Setup QP problem and call ipop if(!reduced) H = kernelMatrix(kernel,x) else H = csi(x, kernel = kernel, rank = rank) c = -y A = rep(1,m) b = 0 r = 0 l = matrix(C * (tau-1),m,1) u = matrix(C * tau ,m,1) qpsol = ipop(c, H, A, b, l, u, r) alpha(ret)= coef(ret) = primal(qpsol) b(ret) = dual(qpsol)[1] ## Compute training error/loss xmatrix(ret) <- x ymatrix(ret) <- y kernelf(ret) <- kernel kpar(ret) <- kpar type(ret) <- ("Quantile Regresion") if (fit){ fitted(ret) <- predict(ret, x) if (!is.null(scaling(ret)$y.scale)) fitted(ret) <- fitted(ret) * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center" error(ret) <- c(pinloss(y, fitted(ret), tau), ramploss(y,fitted(ret),tau)) } else fitted(ret) <- NULL if(any(scaled)) scaling(ret) <- tmpsc ## Crossvalidation cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { pinloss <- 0 ramloss <- 0 crescs <- NULL suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- kqr(x[cind,],y[cind], tau = tau, C = C, scale = FALSE, kernel = kernel, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) crescs <- c(crescs,cres) } if (!is.null(scaling(ret)$y.scale)){ crescs <- crescs * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center" ysvgr <- y[unlist(vgr)] * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center" } else ysvgr <- y[unlist(vgr)] pinloss <- drop(pinloss(ysvgr, crescs, tau)) ramloss <- drop(ramloss(ysvgr, crescs, tau)) cross(ret) <- c(pinloss, ramloss) } return(ret) }) setMethod("kqr",signature(x="list"), function (x, y, tau = 0.5, C = 0.1, kernel = "strigdot", kpar = list(length=4, C=0.5), fit = TRUE, cross = 0) { if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") K <- kernelMatrix(kernel,x) ret <- kqr(K,y = y,tau = tau, C = C, fit = fit, cross = cross) kernelf(ret) <- kernel kpar(ret) <- kpar return(ret) }) setMethod("kqr",signature(x="kernelMatrix"), function (x, y, tau = 0.5, C = 0.1, fit = TRUE, cross = 0) { if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1") ret <- new("kqr") param(ret) <- list(C = C, tau = tau) ncols <- ncol(x) m <- nrows <- nrow(x) y <- as.vector(y) ## Setup QP problem and call ipop H = x c = -y A = rep(1,m) b = 0 r = 0 l = matrix(C * (tau-1),m,1) u = matrix(C * tau ,m,1) qpsol = ipop(c, H, A, b, l, u, r) alpha(ret)= coef(ret) = primal(qpsol) b(ret) = dual(qpsol)[1] ## Compute training error/loss ymatrix(ret) <- y kernelf(ret) <- "Kernel Matrix used." type(ret) <- ("Quantile Regresion") if (fit){ fitted(ret) <- predict(ret, x) error(ret) <- c(pinloss(y, fitted(ret), tau), ramploss(y,fitted(ret),tau)) } else NA ## Crossvalidation cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { pinloss <- 0 ramloss <- 0 crescs <- NULL suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- kqr(x[cind,cind],y[cind], tau = tau, C = C, scale = FALSE, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],vgr[[i]]]) crescs <- c(crescs,cres) } ysvgr <- y[unlist(vgr)] pinloss <- drop(pinloss(ysvgr, crescs, tau)) ramloss <- drop(ramloss(ysvgr, crescs, tau)) cross(ret) <- c(pinloss, ramloss) } return(ret) }) pinloss <- function(y,f,tau) { if(is.vector(y)) m <- length(y) else m <- dim(y)[1] tmp <- y - f return((tau *sum(tmp*(tmp>=0)) + (tau-1) * sum(tmp * (tmp<0)))/m) } ramploss <- function(y,f,tau) { if(is.vector(y)) m <- length(y) else m <- dim(y)[1] return(sum(y<=f)/m) } setMethod("predict", signature(object = "kqr"), function (object, newdata) { sc <- 0 if (missing(newdata)) if(!is.null(fitted(object))) return(fitted(object)) else stop("newdata is missing and no fitted values found.") if(!is(newdata,"kernelMatrix")){ ncols <- ncol(xmatrix(object)) nrows <- nrow(xmatrix(object)) oldco <- ncols if (!is.null(terms(object))) { newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = na.action) } else newdata <- if (is.vector (newdata)) t(t(newdata)) else as.matrix(newdata) newcols <- 0 newnrows <- nrow(newdata) newncols <- ncol(newdata) newco <- newncols if (oldco != newco) stop ("test vector does not match model !") if (is.list(scaling(object)) && sc != 1) newdata[,scaling(object)$scaled] <- scale(newdata[,scaling(object)$scaled, drop = FALSE], center = scaling(object)$x.scale$"scaled:center", scale = scaling(object)$x.scale$"scaled:scale" ) predres <- kernelMult(kernelf(object),newdata,xmatrix(object),as.matrix(alpha(object))) - b(object) if (!is.null(scaling(object)$y.scale)) return(predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center") else return(predres) } else { return(newdata%*%alpha(object) - b(object)) } }) setMethod("show","kqr", function(object){ cat("Kernel Quantile Regression object of class \"kqr\"","\n") cat("\n") show(kernelf(object)) cat("\n") cat("Regularization Cost Parameter C: ",round(param(object)[[1]],9)) cat(paste("\nNumber of training instances learned :", dim(xmatrix(object))[1],"\n")) if(!is.null(fitted(object))) cat(paste("Train error :"," pinball loss : ", round(error(object)[1],9)," rambloss :", round(error(object)[2],9),"\n")) ##train error & loss if(cross(object)!=-1) cat("Cross validation error :", " pinballoss : ", round(cross(object)[1],9)," rambloss :", round(cross(object)[2],9),"\n") }) kernlab/R/onlearn.R0000644000176000001440000001667712560371302013722 0ustar ripleyusers## kernel based on-line learning algorithms for classification, novelty detection and regression. ## ## created 15.09.04 alexandros ## updated setGeneric("onlearn",function(obj, x, y = NULL, nu = 0.2, lambda = 1e-4) standardGeneric("onlearn")) setMethod("onlearn", signature(obj = "onlearn"), function(obj , x, y = NULL, nu = 0.2, lambda = 1e-4) { if(onstart(obj) == 1 && onstop(obj) < buffer(obj)) buffernotfull <- TRUE else buffernotfull <- FALSE if(is.vector(x)) x <- matrix(x,,length(x)) d <- dim(x)[2] for (i in 1:dim(x)[1]) { xt <- x[i,,drop=FALSE] yt <- y[i] if(type(obj)=="novelty") { phi <- fit(obj) if(phi < 0) { alpha(obj) <- (1-lambda) * alpha(obj) if(buffernotfull) onstop(obj) <- onstop(obj) + 1 else{ onstop(obj) <- onstop(obj)%%buffer(obj) + 1 onstart(obj) <- onstart(obj)%%buffer(obj) +1 } alpha(obj)[onstop(obj)] <- lambda xmatrix(obj)[onstop(obj),] <- xt rho(obj) <- rho(obj) + lambda*(nu-1) } else rho(obj) <- rho(obj) + lambda*nu rho(obj) <- max(rho(obj), 0) if(onstart(obj) == 1 && onstop(obj) < buffer(obj)) fit(obj) <- drop(kernelMult(kernelf(obj), xt, matrix(xmatrix(obj)[1:onstop(obj),],ncol=d), matrix(alpha(obj)[1:onstop(obj)],ncol=1)) - rho(obj)) else fit(obj) <- drop(kernelMult(kernelf(obj), xt, xmatrix(obj), matrix(alpha(obj),ncol=1)) - rho(obj)) } if(type(obj)=="classification") { if(is.null(pattern(obj)) && is.factor(y)) pattern(obj) <- yt if(!is.null(pattern(obj))) if(pattern(obj) == yt) yt <- 1 else yt <- -1 phi <- fit(obj) alpha(obj) <- (1-lambda) * alpha(obj) if(yt*phi < rho(obj)) { if(buffernotfull) onstop(obj) <- onstop(obj) + 1 else{ onstop(obj) <- onstop(obj)%%buffer(obj) + 1 onstart(obj) <- onstart(obj)%%buffer(obj) +1 } alpha(obj)[onstop(obj)] <- lambda*yt b(obj) <- b(obj) + lambda*yt xmatrix(obj)[onstop(obj),] <- xt rho(obj) <- rho(obj) + lambda*(nu-1) ## (1-nu) ?? } else rho(obj) <- rho(obj) + lambda*nu rho(obj) <- max(rho(obj), 0) if(onstart(obj) == 1 && onstop(obj) < buffer(obj)) fit(obj) <- drop(kernelMult(kernelf(obj), xt, xmatrix(obj)[1:onstop(obj),,drop=FALSE], matrix(alpha(obj)[1:onstop(obj)],ncol=1)) + b(obj)) else fit(obj) <-drop(kernelMult(kernelf(obj), xt, xmatrix(obj), matrix(alpha(obj),ncol=1)) + b(obj)) } if(type(obj)=="regression") { alpha(obj) <- (1-lambda) * alpha(obj) phi <- fit(obj) if(abs(-phi) < rho(obj)) { if(buffernotfull) onstop(obj) <- onstop(obj) + 1 else{ onstop(obj) <- onstop(obj)%%buffer(obj) + 1 onstart(obj) <- onstart(obj)%% buffer(obj) +1 } alpha(obj)[onstop(obj)] <- sign(yt-phi)*lambda xmatrix(obj)[onstop(obj),] <- xt rho(obj) <- rho(obj) + lambda*(1-nu) ## (1-nu) ?? } else{ rho(obj) <- rho(obj) - lambda*nu alpha(obj)[onstop(obj)] <- sign(yt-phi)/rho(obj) } if(onstart(obj) == 1 && onstop(obj) < buffer(obj)) fit(obj) <- drop(kernelMult(kernelf(obj), xt, matrix(xmatrix(obj)[1:onstop(obj),],ncol=d), matrix(alpha(obj)[1:onstop(obj)],ncol=1)) + b(obj)) else fit(obj) <- drop(kernelMult(kernelf(obj), xt, xmatrix(obj), matrix(alpha(obj),ncol=1)) + b(obj)) } } return(obj) }) setGeneric("inlearn",function(d, kernel = "rbfdot", kpar = list(sigma=0.1), type = "novelty", buffersize = 1000) standardGeneric("inlearn")) setMethod("inlearn", signature(d = "numeric"), function(d ,kernel = "rbfdot", kpar = list(sigma=0.1), type = "novelty", buffersize = 1000) { obj <- new("onlearn") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") type(obj) <- match.arg(type,c("novelty","classification","regression")) xmatrix(obj) <- matrix(0,buffersize,d) kernelf(obj) <- kernel onstart(obj) <- 1 onstop(obj) <- 1 fit(obj) <- 0 b(obj) <- 0 alpha(obj) <- rep(0, buffersize) rho(obj) <- 0 buffer(obj) <- buffersize return(obj) }) setMethod("show","onlearn", function(object){ cat("On-line learning object of class \"onlearn\"","\n") cat("\n") cat(paste("Learning problem :", type(object), "\n")) cat cat(paste("Data dimensions :", dim(xmatrix(object))[2], "\n")) cat(paste("Buffersize :", buffer(object), "\n")) cat("\n") show(kernelf(object)) }) setMethod("predict",signature(object="onlearn"), function(object, x) { if(is.vector(x)) x<- matrix(x,1) d <- dim(xmatrix(object))[2] if(type(object)=="novelty") { if(onstart(object) == 1 && onstop(object) < buffer(object)) res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object)[1:onstop(object),],ncol= d), matrix(alpha(object)[1:onstop(object)],ncol=1)) - rho(object)) else res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object),ncol=d), matrix(alpha(object),ncol=1)) - rho(object)) } if(type(object)=="classification") { if(onstart(object) == 1 && onstop(object) < buffer(object)) res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object)[1:onstop(object),],ncol=d), matrix(alpha(object)[1:onstop(object)],ncol=1)) + b(object)) else res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object),ncol=d), matrix(alpha(object),ncol=1)) + b(object)) } if(type(object)=="regression") { if(onstart(object) == 1 && onstop(object) < buffer(object)) res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object)[1:onstop(object),],ncol=d), matrix(alpha(object)[1:onstop(object)],ncol=1)) + b(object)) else res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object),ncol=d), matrix(alpha(object),ncol=1)) + b(object)) } return(res) }) kernlab/R/kha.R0000644000176000001440000001042211304023134012775 0ustar ripleyusers #Kernel Hebbian Algorithm function setGeneric("kha",function(x, ...) standardGeneric("kha")) setMethod("kha", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) na.act <- attr(mf, "na.action") Terms <- attr(mf, "terms") x <- model.matrix(mt, mf) res <- kha(x, ...) ## fix up call to refer to the generic, but leave arg name as `formula' cl[[1]] <- as.name("kha") kcall(res) <- cl attr(Terms,"intercept") <- 0 terms(res) <- Terms if(!is.null(na.act)) n.action(res) <- na.act return(res) }) setMethod("kha",signature(x="matrix"), function(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 5, eta = 0.005, th = 1e-4, maxiter = 10000, verbose = FALSE, na.action = na.omit, ...) { x <- na.action(x) x <- as.matrix(x) m <- nrow(x) ret <- new("kha") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") ## Initialize A dual variables A <- matrix(runif(features*m),m,features)*2 - 1 AOld <- A ## compute square norm of data a <- rowSums(x^2) ## initialize the empirical sum kernel map eskm <- rep(0,m) for (i in 1:m) eskm[i] <- sum(kernelFast(kernel,x,x[i,,drop=FALSE], a)) eks <- sum(eskm) counter <- 0 step <- th + 1 Aold <- A while(step > th && counter < maxiter) { y <- rep(0, features) ot <- rep(0,m) ## Hebbian Iteration for (i in 1:m) { ## compute y output etkm <- as.vector(kernelFast(kernel,x,x[i,,drop=FALSE], a)) sum1 <- as.vector(etkm %*% A) sum2 <- as.vector(eskm%*%A)/m asum <- colSums(A) sum3 <- as.vector(eskm[i]*asum)/m sum4 <- as.vector(eks * asum)/m^2 y <- sum1 - sum2 - sum3 + sum4 ## update A yy <- y%*%t(y) yy[upper.tri(yy)] <- 0 tA <- t(A) A <- t(tA - eta * yy%*%tA) A[i,] <- A[i,] + eta * y } if (counter %% 100 == 0 ) { step = mean(abs(Aold - A)) Aold <- A if(verbose) cat("Iteration :", counter, "Converged :", step,"\n") } counter <- counter + 1 } ## Normalize in Feature space cA <- t(A) - colSums(A) Fnorm <- rep(0,features) for (j in 1:m) Fnorm <- Fnorm + colSums(t(cA[,j] * cA) * as.vector(kernelFast(kernel,x,x[j,,drop=FALSE],a))) if(any(Fnorm==0)) { warning("Normalization vector contains zeros, replacing them with ones") Fnorm[which(Fnorm==0)] <- 1 } A <- t(t(A)/sqrt(Fnorm)) pcv(ret) <- A eig(ret) <- Fnorm names(eig(ret)) <- paste("Comp.", 1:features, sep = "") eskm(ret) <- eskm kcall(ret) <- match.call() kernelf(ret) <- kernel xmatrix(ret) <- x return(ret) }) ## Project a new matrix into the feature space setMethod("predict",signature(object="kha"), function(object , x) { if (!is.null(terms(object))) { if(!is.matrix(x)) x <- model.matrix(delete.response(terms(object)), as.data.frame(x), na.action = n.action(object)) } else x <- if (is.vector(x)) t(t(x)) else as.matrix(x) if (is.vector(x)||is.data.frame(x)) x<-as.matrix(x) if (!is.matrix(x)) stop("x must be a matrix a vector or a data frame") n <- nrow(x) m <- nrow(xmatrix(object)) A <- pcv(object) y <- matrix(0,n,dim(A)[2]) eks <- sum(eskm(object)) a <- rowSums(xmatrix(object)^2) ## Project data sum2 <- as.vector(eskm(object)%*%A)/m asum <- colSums(A) sum4 <- as.vector(eks * asum)/m^2 for (i in 1:n) { ## compute y output etkm <- as.vector(kernelFast(kernelf(object),xmatrix(object),x[i,,drop=FALSE], a)) sum1 <- as.vector(etkm %*% A) sum3 <- sum(etkm)*asum/m y[i,] <- sum1 - sum2 - sum3 + sum4 } return(y) }) kernlab/R/aobjects.R0000644000176000001440000010724112055335057014050 0ustar ripleyusers## S4 object definitions and assigment/accessor functions for the slots. ## ## created 10.09.03 alexandros karatzoglou ## updated 23.08.05 setClass("kernel",representation("function",kpar="list")) setClass("kernelMatrix",representation("matrix"),prototype=structure(.Data=matrix())) setClassUnion("listI", c("list","numeric","vector","integer","matrix")) setClassUnion("output", c("matrix","factor","vector","logical","numeric","list","integer","NULL")) setClassUnion("input", c("matrix","list")) setClassUnion("kfunction", c("function","character")) setClassUnion("mpinput", c("matrix","data.frame","missing")) setClassUnion("lpinput", c("list","missing")) setClassUnion("kpinput", c("kernelMatrix","missing")) setClass("vm", representation(alpha = "listI", ## since setClassUnion is not working type = "character", kernelf = "kfunction", kpar = "list", xmatrix = "input", ymatrix = "output", fitted = "output", lev = "vector", nclass = "numeric", error = "vector", cross = "vector", n.action= "ANY", terms = "ANY", kcall = "call"), contains= "VIRTUAL") #Generic Vector Machine object if(!isGeneric("type")){ if (is.function("type")) fun <- type else fun <- function(object) standardGeneric("type") setGeneric("type", fun) } setMethod("type", "vm", function(object) object@type) setGeneric("type<-", function(x, value) standardGeneric("type<-")) setReplaceMethod("type", "vm", function(x, value) { x@type <- value x }) if(!isGeneric("kernelf")){ if (is.function("kernelf")) fun <- kernelf else fun <- function(object) standardGeneric("kernelf") setGeneric("kernelf", fun) } setMethod("kernelf", "vm", function(object) object@kernelf) setGeneric("kernelf<-", function(x, value) standardGeneric("kernelf<-")) setReplaceMethod("kernelf", "vm", function(x, value) { x@kernelf <- value x }) if(!isGeneric("kpar")){ if (is.function("kpar")) fun <- kpar else fun <- function(object) standardGeneric("kpar") setGeneric("kpar", fun) } setMethod("kpar", "vm", function(object) object@kpar) setGeneric("kpar<-", function(x, value) standardGeneric("kpar<-")) setReplaceMethod("kpar", "vm", function(x, value) { x@kpar <- value x }) if(!isGeneric("kcall")){ if (is.function("kcall")) fun <- kcall else fun <- function(object) standardGeneric("kcall") setGeneric("kcall", fun) } setMethod("kcall", "vm", function(object) object@kcall) setGeneric("kcall<-", function(x, value) standardGeneric("kcall<-")) setReplaceMethod("kcall", "vm", function(x, value) { x@kcall <- value x }) setMethod("terms", "vm", function(x, ...) x@terms) setGeneric("terms<-", function(x, value) standardGeneric("terms<-")) setReplaceMethod("terms", "vm", function(x, value) { x@terms <- value x }) if(!isGeneric("xmatrix")){ if (is.function("xmatrix")) fun <- xmatrix else fun <- function(object) standardGeneric("xmatrix") setGeneric("xmatrix", fun) } setMethod("xmatrix", "vm", function(object) object@xmatrix) setGeneric("xmatrix<-", function(x, value) standardGeneric("xmatrix<-")) setReplaceMethod("xmatrix", "vm", function(x, value) { x@xmatrix <- value x }) if(!isGeneric("ymatrix")){ if (is.function("ymatrix")) fun <- ymatrix else fun <- function(object) standardGeneric("ymatrix") setGeneric("ymatrix", fun) } setMethod("ymatrix", "vm", function(object) object@ymatrix) setGeneric("ymatrix<-", function(x, value) standardGeneric("ymatrix<-")) setReplaceMethod("ymatrix", "vm", function(x, value) { x@ymatrix <- value x }) setMethod("fitted", "vm", function(object, ...) object@fitted) setGeneric("fitted<-", function(x, value) standardGeneric("fitted<-")) setReplaceMethod("fitted", "vm", function(x, value) { x@fitted <- value x }) if(!isGeneric("lev")){ if (is.function("lev")) fun <- lev else fun <- function(object) standardGeneric("lev") setGeneric("lev", fun) } setMethod("lev", "vm", function(object) object@lev) setGeneric("lev<-", function(x, value) standardGeneric("lev<-")) setReplaceMethod("lev", "vm", function(x, value) { x@lev <- value x }) if(!isGeneric("nclass")){ if (is.function("nclass")) fun <- nclass else fun <- function(object) standardGeneric("nclass") setGeneric("nclass", fun) } setMethod("nclass", "vm", function(object) object@nclass) setGeneric("nclass<-", function(x, value) standardGeneric("nclass<-")) setReplaceMethod("nclass", "vm", function(x, value) { x@nclass <- value x }) if(!isGeneric("alpha")){ if (is.function("alpha")) fun <- alpha else fun <- function(object) standardGeneric("alpha") setGeneric("alpha", fun) } setMethod("alpha", "vm", function(object) object@alpha) setGeneric("alpha<-", function(x, value) standardGeneric("alpha<-")) setReplaceMethod("alpha", "vm", function(x, value) { x@alpha <- value x }) if(!isGeneric("error")){ if (is.function("error")) fun <- error else fun <- function(object) standardGeneric("error") setGeneric("error", fun) } setMethod("error", "vm", function(object) object@error) setGeneric("error<-", function(x, value) standardGeneric("error<-")) setReplaceMethod("error", "vm", function(x, value) { x@error <- value x }) if(!isGeneric("cross")){ if (is.function("cross")) fun <- cross else fun <- function(object) standardGeneric("cross") setGeneric("cross", fun) } setMethod("cross", "vm", function(object) object@cross) setGeneric("cross<-", function(x, value) standardGeneric("cross<-")) setReplaceMethod("cross", "vm", function(x, value) { x@cross <- value x }) if(!isGeneric("n.action")){ if (is.function("n.action")) fun <- n.action else fun <- function(object) standardGeneric("n.action") setGeneric("n.action", fun) } setMethod("n.action", "vm", function(object) object@n.action) setGeneric("n.action<-", function(x, value) standardGeneric("n.action<-")) setReplaceMethod("n.action", "vm", function(x, value) { x@n.action <- value x }) setClass("ksvm", representation(param = "list", scaling = "ANY", coef = "ANY", alphaindex = "ANY", b = "numeric", obj = "vector", SVindex = "vector", nSV = "numeric", prior = "list", prob.model = "list" ), contains="vm") if(!isGeneric("param")){ if (is.function("param")) fun <- param else fun <- function(object) standardGeneric("param") setGeneric("param", fun) } setMethod("param", "ksvm", function(object) object@param) setGeneric("param<-", function(x, value) standardGeneric("param<-")) setReplaceMethod("param", "ksvm", function(x, value) { x@param <- value x }) if(!isGeneric("scaling")){ if (is.function("scaling")) fun <- scaling else fun <- function(object) standardGeneric("scaling") setGeneric("scaling", fun) } setMethod("scaling", "ksvm", function(object) object@scaling) setGeneric("scaling<-", function(x, value) standardGeneric("scaling<-")) setReplaceMethod("scaling", "ksvm", function(x, value) { x@scaling<- value x }) if(!isGeneric("obj")){ if (is.function("obj")) fun <- obj else fun <- function(object) standardGeneric("obj") setGeneric("obj", fun) } setMethod("obj", "ksvm", function(object) object@obj) setGeneric("obj<-", function(x, value) standardGeneric("obj<-")) setReplaceMethod("obj", "ksvm", function(x, value) { x@obj<- value x }) setMethod("coef", "ksvm", function(object, ...) object@coef) setGeneric("coef<-", function(x, value) standardGeneric("coef<-")) setReplaceMethod("coef", "ksvm", function(x, value) { x@coef <- value x }) if(!isGeneric("alphaindex")){ if (is.function("alphaindex")) fun <- alphaindex else fun <- function(object) standardGeneric("alphaindex") setGeneric("alphaindex", fun) } setMethod("alphaindex", "ksvm", function(object) object@alphaindex) setGeneric("alphaindex<-", function(x, value) standardGeneric("alphaindex<-")) setReplaceMethod("alphaindex", "ksvm", function(x, value) { x@alphaindex <- value x }) if(!isGeneric("b")){ if (is.function("b")) fun <- b else fun <- function(object) standardGeneric("b") setGeneric("b", fun) } setMethod("b", "ksvm", function(object) object@b) setGeneric("b<-", function(x, value) standardGeneric("b<-")) setReplaceMethod("b", "ksvm", function(x, value) { x@b <- value x }) if(!isGeneric("SVindex")){ if (is.function("SVindex")) fun <- SVindex else fun <- function(object) standardGeneric("SVindex") setGeneric("SVindex", fun) } setMethod("SVindex", "ksvm", function(object) object@SVindex) setGeneric("SVindex<-", function(x, value) standardGeneric("SVindex<-")) setReplaceMethod("SVindex", "ksvm", function(x, value) { x@SVindex <- value x }) if(!isGeneric("nSV")){ if (is.function("nSV")) fun <- nSV else fun <- function(object) standardGeneric("nSV") setGeneric("nSV", fun) } setMethod("nSV", "ksvm", function(object) object@nSV) setGeneric("nSV<-", function(x, value) standardGeneric("nSV<-")) setReplaceMethod("nSV", "ksvm", function(x, value) { x@nSV <- value x }) if(!isGeneric("prior")){ if (is.function("prior")) fun <- prior else fun <- function(object) standardGeneric("prior") setGeneric("prior", fun) } setMethod("prior", "ksvm", function(object) object@prior) setGeneric("prior<-", function(x, value) standardGeneric("prior<-")) setReplaceMethod("prior", "ksvm", function(x, value) { x@prior <- value x }) if(!isGeneric("prob.model")){ if (is.function("prob.model")) fun <- prob.model else fun <- function(object) standardGeneric("prob.model") setGeneric("prob.model", fun) } setMethod("prob.model", "ksvm", function(object) object@prob.model) setGeneric("prob.model<-", function(x, value) standardGeneric("prob.model<-")) setReplaceMethod("prob.model", "ksvm", function(x, value) { x@prob.model <- value x }) setClass("lssvm", representation(param = "list", scaling = "ANY", coef = "ANY", alphaindex = "ANY", ## prob.model = "list", b = "numeric", nSV = "numeric" ), contains="vm") ##setMethod("prob.model", "lssvm", function(object) object@prob.model) ##setGeneric("prob.model<-", function(x, value) standardGeneric("prob.model<-")) ##setReplaceMethod("prob.model", "lssvm", function(x, value) { ## x@prob.model <- value ## x ##}) setMethod("param", "lssvm", function(object) object@param) setReplaceMethod("param", "lssvm", function(x, value) { x@param <- value x }) setMethod("scaling", "lssvm", function(object) object@scaling) setReplaceMethod("scaling", "lssvm", function(x, value) { x@scaling<- value x }) setMethod("coef", "lssvm", function(object, ...) object@coef) setReplaceMethod("coef", "lssvm", function(x, value) { x@coef <- value x }) setMethod("alphaindex", "lssvm", function(object) object@alphaindex) setReplaceMethod("alphaindex", "lssvm", function(x, value) { x@alphaindex <- value x }) setMethod("b", "lssvm", function(object) object@b) setReplaceMethod("b", "lssvm", function(x, value) { x@b <- value x }) setMethod("nSV", "lssvm", function(object) object@nSV) setReplaceMethod("nSV", "lssvm", function(x, value) { x@nSV <- value x }) setClass("kqr", representation(param = "list", scaling = "ANY", coef = "ANY", b = "numeric" ), contains="vm") setMethod("b", "kqr", function(object) object@b) setReplaceMethod("b", "kqr", function(x, value) { x@b <- value x }) setMethod("scaling", "kqr", function(object) object@scaling) setReplaceMethod("scaling", "kqr", function(x, value) { x@scaling <- value x }) setMethod("coef", "kqr", function(object) object@coef) setReplaceMethod("coef", "kqr", function(x, value) { x@coef <- value x }) setMethod("param", "kqr", function(object) object@param) setReplaceMethod("param", "kqr", function(x, value) { x@param <- value x }) ## failed attempt to get rid of all this above ## mkaccesfun <- function(cls) #{ # snames <- slotNames(cls) ## # # for(i in 1:length(snames)) # { resF <- paste("\"",snames[i],"\"",sep="") # if(!isGeneric(snames[i])) # eval(parse(file="",text=paste("setGeneric(",resF,",function(object)","standardGeneric(",resF,")",")",sep=" "))) # setGeneric(snames[i], function(object) standardGeneric(snames[i])) # # setMethod(snames[i], cls, function(object) eval(parse(file="",text=paste("object@",snames[i],sep="")))) # resG <- paste("\"",snames[i],"<-","\"",sep="") #eval(parse(file="",text=paste("setGeneric(",resG,",function(x, value)","standardGeneric(",resG,")",")",sep=" "))) # setReplaceMethod(snames[i], cls, function(x, value) { # eval(parse(file="",text=paste("x@",snames[i],"<-value",sep=""))) # x # }) # } #} setClass("prc", representation(pcv = "matrix", eig = "vector", kernelf = "kfunction", kpar = "list", xmatrix = "input", kcall = "ANY", terms = "ANY", n.action = "ANY"),contains="VIRTUAL") #accessor functions if(!isGeneric("pcv")){ if (is.function("pcv")) fun <- pcv else fun <- function(object) standardGeneric("pcv") setGeneric("pcv", fun) } setMethod("pcv", "prc", function(object) object@pcv) setGeneric("pcv<-", function(x, value) standardGeneric("pcv<-")) setReplaceMethod("pcv", "prc", function(x, value) { x@pcv <- value x }) if(!isGeneric("eig")){ if (is.function("eig")) fun <- eig else fun <- function(object) standardGeneric("eig") setGeneric("eig", fun) } setMethod("eig", "prc", function(object) object@eig) setGeneric("eig<-", function(x, value) standardGeneric("eig<-")) setReplaceMethod("eig", "prc", function(x, value) { x@eig <- value x }) setMethod("kernelf","prc", function(object) object@kernelf) setReplaceMethod("kernelf","prc", function(x, value){ x@kernelf <- value x }) setMethod("xmatrix","prc", function(object) object@xmatrix) setReplaceMethod("xmatrix","prc", function(x, value){ x@xmatrix <- value x }) setMethod("kcall","prc", function(object) object@kcall) setReplaceMethod("kcall","prc", function(x, value){ x@kcall <- value x }) setMethod("terms","prc", function(x, ...) x@terms) setReplaceMethod("terms","prc", function(x, value){ x@terms <- value x }) setMethod("n.action","prc", function(object) object@n.action) setReplaceMethod("n.action","prc", function(x, value){ x@n.action <- value x }) ##kernel principal components object setClass("kpca", representation(rotated = "matrix"),contains="prc") #accessor functions if(!isGeneric("rotated")){ if (is.function("rotated")) fun <- rotated else fun <- function(object) standardGeneric("rotated") setGeneric("rotated", fun) } setMethod("rotated", "kpca", function(object) object@rotated) setGeneric("rotated<-", function(x, value) standardGeneric("rotated<-")) setReplaceMethod("rotated", "kpca", function(x, value) { x@rotated <- value x }) ## kernel maximum mean discrepancy setClass("kmmd", representation(H0="logical", AsympH0 ="logical", kernelf = "kfunction", Asymbound="numeric", Radbound="numeric", xmatrix="input", mmdstats="vector")) if(!isGeneric("mmdstats")){ if (is.function("mmdstats")) fun <- mmdstats else fun <- function(object) standardGeneric("mmdstats") setGeneric("mmdstats", fun) } setMethod("mmdstats","kmmd", function(object) object@mmdstats) setGeneric("mmdstats<-", function(x, value) standardGeneric("mmdstats<-")) setReplaceMethod("mmdstats","kmmd", function(x, value){ x@mmdstats <- value x }) if(!isGeneric("Radbound")){ if (is.function("Radbound")) fun <- Radbound else fun <- function(object) standardGeneric("Radbound") setGeneric("Radbound", fun) } setMethod("Radbound","kmmd", function(object) object@Radbound) setGeneric("Radbound<-", function(x, value) standardGeneric("Radbound<-")) setReplaceMethod("Radbound","kmmd", function(x, value){ x@Radbound <- value x }) if(!isGeneric("Asymbound")){ if (is.function("Asymbound")) fun <- Asymbound else fun <- function(object) standardGeneric("Asymbound") setGeneric("Asymbound", fun) } setMethod("Asymbound","kmmd", function(object) object@Asymbound) setGeneric("Asymbound<-", function(x, value) standardGeneric("Asymbound<-")) setReplaceMethod("Asymbound","kmmd", function(x, value){ x@Asymbound <- value x }) if(!isGeneric("H0")){ if (is.function("H0")) fun <- H0 else fun <- function(object) standardGeneric("H0") setGeneric("H0", fun) } setMethod("H0","kmmd", function(object) object@H0) setGeneric("H0<-", function(x, value) standardGeneric("H0<-")) setReplaceMethod("H0","kmmd", function(x, value){ x@H0 <- value x }) if(!isGeneric("AsympH0")){ if (is.function("AsympH0")) fun <- AsympH0 else fun <- function(object) standardGeneric("AsympH0") setGeneric("AsympH0", fun) } setMethod("AsympH0","kmmd", function(object) object@AsympH0) setGeneric("AsympH0<-", function(x, value) standardGeneric("AsympH0<-")) setReplaceMethod("AsympH0","kmmd", function(x, value){ x@AsympH0 <- value x }) setMethod("kernelf","kmmd", function(object) object@kernelf) setReplaceMethod("kernelf","kmmd", function(x, value){ x@kernelf <- value x }) setClass("ipop", representation(primal = "vector", dual = "numeric", how = "character" )) if(!isGeneric("primal")){ if (is.function("primal")) fun <- primal else fun <- function(object) standardGeneric("primal") setGeneric("primal", fun) } setMethod("primal", "ipop", function(object) object@primal) setGeneric("primal<-", function(x, value) standardGeneric("primal<-")) setReplaceMethod("primal", "ipop", function(x, value) { x@primal <- value x }) if(!isGeneric("dual")){ if (is.function("dual")) fun <- dual else fun <- function(object) standardGeneric("dual") setGeneric("dual", fun) } setMethod("dual", "ipop", function(object) object@dual) setGeneric("dual<-", function(x, value) standardGeneric("dual<-")) setReplaceMethod("dual", "ipop", function(x, value) { x@dual <- value x }) if(!isGeneric("how")){ if (is.function("how")) fun <- how else fun <- function(object) standardGeneric("how") setGeneric("how", fun) } setMethod("how", "ipop", function(object) object@how) setGeneric("how<-", function(x, value) standardGeneric("how<-")) setReplaceMethod("how", "ipop", function(x, value) { x@how <- value x }) # Kernel Canonical Correlation Analysis setClass("kcca", representation(kcor = "vector", xcoef = "matrix", ycoef = "matrix" ##xvar = "matrix", ##yvar = "matrix" )) if(!isGeneric("kcor")){ if (is.function("kcor")) fun <- kcor else fun <- function(object) standardGeneric("kcor") setGeneric("kcor", fun) } setMethod("kcor", "kcca", function(object) object@kcor) setGeneric("kcor<-", function(x, value) standardGeneric("kcor<-")) setReplaceMethod("kcor", "kcca", function(x, value) { x@kcor <- value x }) if(!isGeneric("xcoef")){ if (is.function("xcoef")) fun <- xcoef else fun <- function(object) standardGeneric("xcoef") setGeneric("xcoef", fun) } setMethod("xcoef", "kcca", function(object) object@xcoef) setGeneric("xcoef<-", function(x, value) standardGeneric("xcoef<-")) setReplaceMethod("xcoef", "kcca", function(x, value) { x@xcoef <- value x }) if(!isGeneric("ycoef")){ if (is.function("ycoef")) fun <- ycoef else fun <- function(object) standardGeneric("ycoef") setGeneric("ycoef", fun) } setMethod("ycoef", "kcca", function(object) object@ycoef) setGeneric("ycoef<-", function(x, value) standardGeneric("ycoef<-")) setReplaceMethod("ycoef", "kcca", function(x, value) { x@ycoef <- value x }) ##if(!isGeneric("xvar")){ ## if (is.function("xvar")) ## fun <- xvar ## else fun <- function(object) standardGeneric("xvar") ## setGeneric("xvar", fun) ##} ##setMethod("xvar", "kcca", function(object) object@xvar) ##setGeneric("xvar<-", function(x, value) standardGeneric("xvar<-")) ##setReplaceMethod("xvar", "kcca", function(x, value) { ## x@xvar <- value ## x ##}) ##if(!isGeneric("yvar")){ ## if (is.function("yvar")) ## fun <- yvar ## else fun <- function(object) standardGeneric("yvar") ## setGeneric("yvar", fun) ##} ##setMethod("yvar", "kcca", function(object) object@yvar) ##setGeneric("yvar<-", function(x, value) standardGeneric("yvar<-")) ##setReplaceMethod("yvar", "kcca", function(x, value) { ## x@yvar <- value ## x ##}) ## Gaussian Processes object setClass("gausspr",representation(tol = "numeric", scaling = "ANY", sol = "matrix", alphaindex="list", nvar = "numeric" ),contains="vm") setMethod("alphaindex","gausspr", function(object) object@alphaindex) setReplaceMethod("alphaindex","gausspr", function(x, value){ x@alphaindex <- value x }) if(!isGeneric("sol")){ if (is.function("sol")) fun <- sol else fun <- function(object) standardGeneric("sol") setGeneric("sol", fun) } setMethod("sol","gausspr", function(object) object@sol) setGeneric("sol<-", function(x, value) standardGeneric("sol<-")) setReplaceMethod("sol","gausspr", function(x, value){ x@sol <- value x }) setMethod("scaling","gausspr", function(object) object@scaling) setReplaceMethod("scaling","gausspr", function(x, value){ x@scaling <- value x }) setMethod("coef", "gausspr", function(object, ...) object@alpha) # Relevance Vector Machine object setClass("rvm", representation(tol = "numeric", nvar = "numeric", mlike = "numeric", RVindex = "vector", coef = "ANY", nRV = "numeric"),contains ="vm") if(!isGeneric("tol")){ if (is.function("tol")) fun <- tol else fun <- function(object) standardGeneric("tol") setGeneric("tol", fun) } setMethod("tol", "rvm", function(object) object@tol) setGeneric("tol<-", function(x, value) standardGeneric("tol<-")) setReplaceMethod("tol", "rvm", function(x, value) { x@tol <- value x }) setMethod("coef", "rvm", function(object, ...) object@coef) setReplaceMethod("coef", "rvm", function(x, value) { x@coef <- value x }) if(!isGeneric("RVindex")){ if (is.function("RVindex")) fun <- RVindex else fun <- function(object) standardGeneric("RVindex") setGeneric("RVindex", fun) } setMethod("RVindex", "rvm", function(object) object@RVindex) setGeneric("RVindex<-", function(x, value) standardGeneric("RVindex<-")) setReplaceMethod("RVindex", "rvm", function(x, value) { x@RVindex <- value x }) if(!isGeneric("nvar")){ if (is.function("nvar")) fun <- nvar else fun <- function(object) standardGeneric("nvar") setGeneric("nvar", fun) } setMethod("nvar", "rvm", function(object) object@nvar) setGeneric("nvar<-", function(x, value) standardGeneric("nvar<-")) setReplaceMethod("nvar", "rvm", function(x, value) { x@nvar <- value x }) if(!isGeneric("nRV")){ if (is.function("nRV")) fun <- nRV else fun <- function(object) standardGeneric("nRV") setGeneric("nRV", fun) } setMethod("nRV", "rvm", function(object) object@nRV) setGeneric("nRV<-", function(x, value) standardGeneric("nRV<-")) setReplaceMethod("nRV", "rvm", function(x, value) { x@nRV <- value x }) setMethod("coef", "rvm", function(object, ...) object@alpha) if(!isGeneric("mlike")){ if (is.function("mlike")) fun <- mlike else fun <- function(object) standardGeneric("mlike") setGeneric("mlike", fun) } setMethod("mlike", "rvm", function(object) object@mlike) setGeneric("mlike<-", function(x, value) standardGeneric("mlike<-")) setReplaceMethod("mlike", "rvm", function(x, value) { x@mlike <- value x }) setClass("inchol",representation("matrix", pivots="vector", diagresidues="vector", maxresiduals="vector"), prototype=structure(.Data=matrix(), pivots=vector(), diagresidues=vector(), maxresiduals=vector())) if(!isGeneric("pivots")){ if (is.function("pivots")) fun <- pivots else fun <- function(object) standardGeneric("pivots") setGeneric("pivots", fun) } setMethod("pivots", "inchol", function(object) object@pivots) setGeneric("pivots<-", function(x, value) standardGeneric("pivots<-")) setReplaceMethod("pivots", "inchol", function(x, value) { x@pivots <- value x }) if(!isGeneric("diagresidues")){ if (is.function("diagresidues")) fun <- diagresidues else fun <- function(object) standardGeneric("diagresidues") setGeneric("diagresidues", fun) } setMethod("diagresidues", "inchol", function(object) object@diagresidues) setGeneric("diagresidues<-", function(x,value) standardGeneric("diagresidues<-")) setReplaceMethod("diagresidues", "inchol", function(x, value) { x@diagresidues <- value x }) if(!isGeneric("maxresiduals")){ if (is.function("maxresiduals")) fun <- maxresiduals else fun <- function(object) standardGeneric("maxresiduals") setGeneric("maxresiduals", fun) } setMethod("maxresiduals", "inchol", function(object) object@maxresiduals) setGeneric("maxresiduals<-", function(x,value) standardGeneric("maxresiduals<-")) setReplaceMethod("maxresiduals", "inchol", function(x, value) { x@maxresiduals <- value x }) ## csi object setClass("csi",representation(Q = "matrix", R = "matrix", truegain = "vector", predgain = "vector"),contains="inchol") if(!isGeneric("Q")){ if (is.function("Q")) fun <- Q else fun <- function(object) standardGeneric("Q") setGeneric("Q", fun) } setMethod("Q", "csi", function(object) object@Q) setGeneric("Q<-", function(x, value) standardGeneric("Q<-")) setReplaceMethod("Q", "csi", function(x, value) { x@Q <- value x }) if(!isGeneric("R")){ if (is.function("R")) fun <- R else fun <- function(object) standardGeneric("R") setGeneric("R", fun) } setMethod("R", "csi", function(object) object@R) setGeneric("R<-", function(x, value) standardGeneric("R<-")) setReplaceMethod("R", "csi", function(x, value) { x@R <- value x }) if(!isGeneric("truegain")){ if (is.function("truegain")) fun <- truegain else fun <- function(object) standardGeneric("truegain") setGeneric("truegain", fun) } setMethod("truegain", "csi", function(object) object@truegain) setGeneric("truegain<-", function(x, value) standardGeneric("truegain<-")) setReplaceMethod("truegain", "csi", function(x, value) { x@truegain <- value x }) if(!isGeneric("predgain")){ if (is.function("predgain")) fun <- predgain else fun <- function(object) standardGeneric("predgain") setGeneric("predgain", fun) } setMethod("predgain", "csi", function(object) object@predgain) setGeneric("predgain<-", function(x, value) standardGeneric("predgain<-")) setReplaceMethod("predgain", "csi", function(x, value) { x@predgain <- value x }) setClass("specc",representation("vector", centers="matrix", size="vector", kernelf="kfunction", withinss = "vector" ),prototype=structure(.Data=vector(), centers = matrix(), size=matrix(), kernelf = ls, withinss=vector())) if(!isGeneric("centers")){ if (is.function("centers")) fun <- centers else fun <- function(object) standardGeneric("centers") setGeneric("centers", fun) } setMethod("centers", "specc", function(object) object@centers) setGeneric("centers<-", function(x,value) standardGeneric("centers<-")) setReplaceMethod("centers", "specc", function(x, value) { x@centers <- value x }) if(!isGeneric("size")){ if (is.function("size")) fun <- size else fun <- function(object) standardGeneric("size") setGeneric("size", fun) } setMethod("size", "specc", function(object) object@size) setGeneric("size<-", function(x,value) standardGeneric("size<-")) setReplaceMethod("size", "specc", function(x, value) { x@size <- value x }) if(!isGeneric("withinss")){ if (is.function("withinss")) fun <- withinss else fun <- function(object) standardGeneric("withinss") setGeneric("withinss", fun) } setMethod("withinss", "specc", function(object) object@withinss) setGeneric("withinss<-", function(x,value) standardGeneric("withinss<-")) setReplaceMethod("withinss", "specc", function(x, value) { x@withinss <- value x }) setMethod("kernelf","specc", function(object) object@kernelf) setReplaceMethod("kernelf","specc", function(x, value){ x@kernelf <- value x }) setClass("ranking",representation("matrix", convergence="matrix", edgegraph="matrix"), prototype=structure(.Data=matrix(), convergence=matrix(), edgegraph=matrix())) if(!isGeneric("convergence")){ if (is.function("convergence")) fun <- convergence else fun <- function(object) standardGeneric("convergence") setGeneric("convergence", fun) } setMethod("convergence", "ranking", function(object) object@convergence) setGeneric("convergence<-", function(x,value) standardGeneric("convergence<-")) setReplaceMethod("convergence", "ranking", function(x, value) { x@convergence <- value x }) if(!isGeneric("edgegraph")){ if (is.function("edgegraph")) fun <- edgegraph else fun <- function(object) standardGeneric("edgegraph") setGeneric("edgegraph", fun) } setMethod("edgegraph", "ranking", function(object) object@edgegraph) setGeneric("edgegraph<-", function(x,value) standardGeneric("edgegraph<-")) setReplaceMethod("edgegraph", "ranking", function(x, value) { x@edgegraph <- value x }) ## online learning algorithms class setClass("onlearn", representation( kernelf = "kfunction", buffer = "numeric", kpar = "list", xmatrix = "matrix", fit = "numeric", onstart = "numeric", onstop = "numeric", alpha = "ANY", rho = "numeric", b = "numeric", pattern ="ANY", type="character" )) if(!isGeneric("fit")){ if (is.function("fit")) fun <- fit else fun <- function(object) standardGeneric("fit") setGeneric("fit", fun) } setMethod("fit","onlearn", function(object) object@fit) setGeneric("fit<-", function(x, value) standardGeneric("fit<-")) setReplaceMethod("fit","onlearn", function(x, value){ x@fit <- value x }) if(!isGeneric("onstart")){ if (is.function("onstart")) fun <- onstart else fun <- function(object) standardGeneric("onstart") setGeneric("onstart", fun) } setMethod("onstart", "onlearn", function(object) object@onstart) setGeneric("onstart<-", function(x, value) standardGeneric("onstart<-")) setReplaceMethod("onstart", "onlearn", function(x, value) { x@onstart <- value x }) if(!isGeneric("onstop")){ if (is.function("onstop")) fun <- onstop else fun <- function(object) standardGeneric("onstop") setGeneric("onstop", fun) } setMethod("onstop", "onlearn", function(object) object@onstop) setGeneric("onstop<-", function(x, value) standardGeneric("onstop<-")) setReplaceMethod("onstop", "onlearn", function(x, value) { x@onstop <- value x }) if(!isGeneric("buffer")){ if (is.function("buffer")) fun <- buffer else fun <- function(object) standardGeneric("buffer") setGeneric("buffer", fun) } setMethod("buffer", "onlearn", function(object) object@buffer) setGeneric("buffer<-", function(x, value) standardGeneric("buffer<-")) setReplaceMethod("buffer", "onlearn", function(x, value) { x@buffer <- value x }) setMethod("kernelf","onlearn", function(object) object@kernelf) setReplaceMethod("kernelf","onlearn", function(x, value){ x@kernelf <- value x }) setMethod("kpar","onlearn", function(object) object@kpar) setReplaceMethod("kpar","onlearn", function(x, value){ x@kpar <- value x }) setMethod("xmatrix","onlearn", function(object) object@xmatrix) setReplaceMethod("xmatrix","onlearn", function(x, value){ x@xmatrix <- value x }) setMethod("alpha","onlearn", function(object) object@alpha) setReplaceMethod("alpha","onlearn", function(x, value){ x@alpha <- value x }) setMethod("b","onlearn", function(object) object@b) setReplaceMethod("b","onlearn", function(x, value){ x@b <- value x }) setMethod("type","onlearn", function(object) object@type) setReplaceMethod("type","onlearn", function(x, value){ x@type <- value x }) if(!isGeneric("rho")){ if (is.function("rho")) fun <- rho else fun <- function(object) standardGeneric("rho") setGeneric("rho", fun) } setMethod("rho", "onlearn", function(object) object@rho) setGeneric("rho<-", function(x, value) standardGeneric("rho<-")) setReplaceMethod("rho", "onlearn", function(x, value) { x@rho <- value x }) if(!isGeneric("pattern")){ if (is.function("pattern")) fun <- pattern else fun <- function(object) standardGeneric("pattern") setGeneric("pattern", fun) } setMethod("pattern", "onlearn", function(object) object@pattern) setGeneric("pattern<-", function(x, value) standardGeneric("pattern<-")) setReplaceMethod("pattern", "onlearn", function(x, value) { x@pattern <- value x }) setClass("kfa",representation(alpha = "matrix", alphaindex = "vector", kernelf = "kfunction", xmatrix = "matrix", kcall = "call", terms = "ANY" )) setMethod("coef", "kfa", function(object, ...) object@alpha) setMethod("kernelf","kfa", function(object) object@kernelf) setReplaceMethod("kernelf","kfa", function(x, value){ x@kernelf <- value x }) setMethod("alphaindex","kfa", function(object) object@alphaindex) setReplaceMethod("alphaindex","kfa", function(x, value){ x@alphaindex <- value x }) setMethod("alpha","kfa", function(object) object@alpha) setReplaceMethod("alpha","kfa", function(x, value){ x@alpha <- value x }) setMethod("xmatrix","kfa", function(object) object@xmatrix) setReplaceMethod("xmatrix","kfa", function(x, value){ x@xmatrix <- value x }) setMethod("kcall","kfa", function(object) object@kcall) setReplaceMethod("kcall","kfa", function(x, value){ x@kcall <- value x }) setMethod("terms","kfa", function(x, ...) x@terms) setReplaceMethod("terms","kfa", function(x, value){ x@terms <- value x }) ## kernel hebbian algorithm object setClass("kha", representation(eskm ="vector"),contains="prc") ## accessor functions if(!isGeneric("eskm")){ if (is.function("eskm")) fun <- eskm else fun <- function(object) standardGeneric("eskm") setGeneric("eskm", fun) } setMethod("eskm", "kha", function(object) object@eskm) setGeneric("eskm<-", function(x, value) standardGeneric("eskm<-")) setReplaceMethod("eskm", "kha", function(x, value) { x@eskm <- value x }) kernlab/R/kpca.R0000644000176000001440000001214011304023134013147 0ustar ripleyusers## kpca function ## author : alexandros setGeneric("kpca",function(x, ...) standardGeneric("kpca")) setMethod("kpca", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) na.act <- attr(mf, "na.action") Terms <- attr(mf, "terms") x <- model.matrix(mt, mf) res <- kpca(x, ...) ## fix up call to refer to the generic, but leave arg name as `formula' cl[[1]] <- as.name("kpca") kcall(res) <- cl attr(Terms,"intercept") <- 0 terms(res) <- Terms if(!is.null(na.act)) n.action(res) <- na.act return(res) }) ## Matrix Interface setMethod("kpca",signature(x="matrix"), function(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 0, th = 1e-4, na.action = na.omit, ...) { x <- na.action(x) x <- as.matrix(x) m <- nrow(x) ret <- new("kpca") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") km <- kernelMatrix(kernel,x) ## center kernel matrix kc <- t(t(km - colSums(km)/m) - rowSums(km)/m) + sum(km)/m^2 ## compute eigenvectors res <- eigen(kc/m,symmetric=TRUE) if(features == 0) features <- sum(res$values > th) else if(res$values[features] < th) warning(paste("eigenvalues of the kernel matrix are below threshold!")) pcv(ret) <- t(t(res$vectors[,1:features])/sqrt(res$values[1:features])) eig(ret) <- res$values[1:features] names(eig(ret)) <- paste("Comp.", 1:features, sep = "") rotated(ret) <- kc %*% pcv(ret) kcall(ret) <- match.call() kernelf(ret) <- kernel xmatrix(ret) <- x return(ret) }) ## List Interface setMethod("kpca",signature(x="list"), function(x, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), features = 0, th = 1e-4, na.action = na.omit, ...) { x <- na.action(x) m <- length(x) ret <- new("kpca") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") km <- kernelMatrix(kernel,x) ## center kernel matrix kc <- t(t(km - colSums(km)/m) - rowSums(km)/m) + sum(km)/m^2 ## compute eigenvectors res <- eigen(kc/m,symmetric=TRUE) if(features == 0) features <- sum(res$values > th) else if(res$values[features] < th) warning(paste("eigenvalues of the kernel matrix are below threshold!")) pcv(ret) <- t(t(res$vectors[,1:features])/sqrt(res$values[1:features])) eig(ret) <- res$values[1:features] names(eig(ret)) <- paste("Comp.", 1:features, sep = "") rotated(ret) <- kc %*% pcv(ret) kcall(ret) <- match.call() kernelf(ret) <- kernel xmatrix(ret) <- x return(ret) }) ## Kernel Matrix Interface setMethod("kpca",signature(x= "kernelMatrix"), function(x, features = 0, th = 1e-4, ...) { ret <- new("kpca") m <- dim(x)[1] if(m!= dim(x)[2]) stop("Kernel matrix has to be symetric, and positive semidefinite") ## center kernel matrix kc <- t(t(x - colSums(x)/m) - rowSums(x)/m) + sum(x)/m^2 ## compute eigenvectors res <- eigen(kc/m,symmetric=TRUE) if(features == 0) features <- sum(res$values > th) else if(res$values[features] < th) warning(paste("eigenvalues of the kernel matrix are below threshold!")) pcv(ret) <- t(t(res$vectors[,1:features])/sqrt(res$values[1:features])) eig(ret) <- res$values[1:features] names(eig(ret)) <- paste("Comp.", 1:features, sep = "") rotated(ret) <- kc %*% pcv(ret) kcall(ret) <- match.call() xmatrix(ret) <- x kernelf(ret) <- " Kernel matrix used." return(ret) }) ## project a new matrix into the feature space setMethod("predict",signature(object="kpca"), function(object , x) { if (!is.null(terms(object))) { if(!is.matrix(x) || !is(x,"list")) x <- model.matrix(delete.response(terms(object)), as.data.frame(x), na.action = n.action(object)) } else x <- if (is.vector(x)) t(t(x)) else if (!is(x,"list")) x <- as.matrix(x) if (is.vector(x) || is.data.frame(x)) x <- as.matrix(x) if (!is.matrix(x) && !is(x,"list")) stop("x must be a matrix a vector, a data frame, or a list") if(is(x,"matrix")) { n <- nrow(x) m <- nrow(xmatrix(object))} else { n <- length(x) m <- length(xmatrix(object)) } if(is.character(kernelf(object))) { knc <- x ka <- xmatrix(object) } else { knc <- kernelMatrix(kernelf(object),x,xmatrix(object)) ka <- kernelMatrix(kernelf(object),xmatrix(object)) } ## center ret <- t(t(knc - rowSums(knc)/m) - rowSums(ka)/m) + sum(ka)/(m*n) return(ret %*% pcv(object)) }) kernlab/R/kernels.R0000644000176000001440000023705312055335057013726 0ustar ripleyusers## kernel functions ## Functions for computing a kernel value, matrix, matrix-vector ## product and quadratic form ## ## author : alexandros karatzoglou ## Define the kernel objects, ## functions with an additional slot for the kernel parameter list. ## kernel functions take two vector arguments and return a scalar (dot product) rbfdot<- function(sigma=1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") return(exp(sigma*(2*crossprod(x,y) - crossprod(x) - crossprod(y)))) # sigma/2 or sigma ?? } } return(new("rbfkernel",.Data=rval,kpar=list(sigma=sigma))) } setClass("rbfkernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) laplacedot<- function(sigma=1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") return(exp(-sigma*sqrt(-(round(2*crossprod(x,y) - crossprod(x) - crossprod(y),9))))) } } return(new("laplacekernel",.Data=rval,kpar=list(sigma=sigma))) } setClass("laplacekernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) besseldot<- function(sigma = 1, order = 1, degree = 1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") lim <- 1/(gamma(order+1)*2^(order)) bkt <- sigma*sqrt(-(2*crossprod(x,y) - crossprod(x) - crossprod(y))) if(bkt < 10e-5) res <- lim else res <- besselJ(bkt,order)*(bkt^(-order)) return((res/lim)^degree) } } return(new("besselkernel",.Data=rval,kpar=list(sigma=sigma ,order = order ,degree = degree))) } setClass("besselkernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) anovadot<- function(sigma = 1, degree = 1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") res <- sum(exp(- sigma * (x - y)^2)) return((res)^degree) } } return(new("anovakernel",.Data=rval,kpar=list(sigma=sigma ,degree = degree))) } setClass("anovakernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) splinedot<- function() { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") minv <- pmin(x,y) res <- 1 + x*y*(1+minv) - ((x+y)/2)*minv^2 + (minv^3)/3 fres <- prod(res) return(fres) } } return(new("splinekernel",.Data=rval,kpar=list())) } setClass("splinekernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) fourierdot <- function(sigma = 1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") res <- (1 - sigma^2)/2*(1 - 2*sigma*cos(x - y) + sigma^2) fres <- prod(res) return(fres) } } return(new("fourierkernel",.Data=rval,kpar=list())) } setClass("fourierkernel",prototype=structure(.Data=function(){},kpar=list(sigma = 1)),contains=c("kernel")) tanhdot <- function(scale = 1, offset = 1) { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y)){ tanh(scale*crossprod(x)+offset) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") tanh(scale*crossprod(x,y)+offset) } } return(new("tanhkernel",.Data=rval,kpar=list(scale=scale,offset=offset))) } setClass("tanhkernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) setClass("polykernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) polydot <- function(degree = 1, scale = 1, offset = 1) { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y)){ (scale*crossprod(x)+offset)^degree } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") (scale*crossprod(x,y)+offset)^degree } } return(new("polykernel",.Data=rval,kpar=list(degree=degree,scale=scale,offset=offset))) } setClass("vanillakernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) vanilladot <- function( ) { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y)){ crossprod(x) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") crossprod(x,y) } } return(new("vanillakernel",.Data=rval,kpar=list())) } setClass("stringkernel",prototype=structure(.Data=function(){},kpar=list(length = 4, lambda = 1.1, type = "spectrum", normalized = TRUE)),contains=c("kernel")) stringdot <- function(length = 4, lambda = 1.1, type = "spectrum", normalized = TRUE) { type <- match.arg(type,c("sequence","string","fullstring","exponential","constant","spectrum", "boundrange")) ## need to do this to set the length parameters if(type == "spectrum" | type == "boundrange") lambda <- length switch(type, "sequence" = { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y) && normalized == FALSE) return(.Call("subsequencek",as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")) if (is(x,"vector") && is(y,"vector") && normalized == FALSE) return(.Call("subsequencek",as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")) if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("subsequencek",as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")/sqrt(.Call("subsequencek",as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")*.Call("subsequencek",as.character(y), as.character(y), as.integer(nchar(y)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab"))) } }, "exponential" = { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") if (normalized == FALSE){ if(is.null(y)) y <- x return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(nchar(x)),as.integer(nchar(y)),as.integer(2),as.double(lambda)))} if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(nchar(x)),as.integer(nchar(y)),as.integer(2),as.double(lambda))/sqrt(.Call("stringtv",as.character(x),as.character(x),as.integer(1),as.integer(nchar(x)),as.integer(nchar(x)),as.integer(2),as.double(lambda))*.Call("stringtv",as.character(y),as.character(y),as.integer(1),as.integer(nchar(y)),as.integer(nchar(y)),as.integer(2),as.double(lambda)))) } }, "constant" = { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") if (normalized == FALSE){ if(is.null(y)) y <- x return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(nchar(x)),as.integer(nchar(y)),as.integer(1),as.double(lambda)))} if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(nchar(x)),as.integer(nchar(y)),as.integer(1),as.double(lambda))/sqrt(.Call("stringtv",as.character(x),as.character(x),as.integer(1),as.integer(nchar(x)),as.integer(nchar(x)),as.integer(1),as.double(lambda))*.Call("stringtv",as.character(y),as.character(y),as.integer(1),as.integer(nchar(y)),as.integer(nchar(y)),as.integer(1),as.double(lambda)))) } }, "spectrum" = { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") n <- nchar(x) m <- nchar(y) if(n < length | m < length){ warning("String length smaller than length parameter value") return(0)} if (normalized == FALSE){ if(is.null(y)) y <- x return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(n),as.integer(m),as.integer(3),as.double(length)))} if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(n),as.integer(m),as.integer(3),as.double(length))/sqrt(.Call("stringtv",as.character(x),as.character(x),as.integer(1),as.integer(n),as.integer(n),as.integer(3),as.double(lambda))*.Call("stringtv",as.character(y),as.character(y),as.integer(1),as.integer(m),as.integer(m),as.integer(3),as.double(length)))) } }, "boundrange" = { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") if (normalized == FALSE){ if(is.null(y)) y <- x return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(nchar(x)),as.integer(nchar(y)),as.integer(4),as.double(lambda)))} if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(nchar(x)),as.integer(nchar(y)),as.integer(4),as.double(lambda))/sqrt(.Call("stringtv",as.character(x),as.character(x),as.integer(1),as.integer(nchar(x)),as.integer(nchar(x)),as.integer(4),as.double(lambda))*.Call("stringtv",as.character(y),as.character(y),as.integer(1),as.integer(nchar(y)),as.integer(nchar(y)),as.integer(4),as.double(lambda)))) } }, "string" = { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y) && normalized == FALSE) return(.Call("substringk",as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")) if (is(x,"vector") && is(y,"vector") && normalized == FALSE) return(.Call("substringk",as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")) if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("substringk",as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")/sqrt(.Call("substringk",as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")*.Call("substringk",as.character(y), as.character(y), as.integer(nchar(y)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab"))) } }, "fullstring" = { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y) && normalized == FALSE) return(.Call("fullsubstringk",as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")) if (is(x,"vector") && is(y,"vector") && normalized == FALSE) return(.Call("fullsubstringk",as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")) if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("fullsubstringk",as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")/sqrt(.Call("fullsubstringk",as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")*.Call("fullsubstringk",as.character(y), as.character(y), as.integer(nchar(y)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab"))) } }) return(new("stringkernel",.Data=rval,kpar=list(length=length, lambda =lambda, type = type, normalized = normalized))) } ## show method for kernel functions setMethod("show",signature(object="kernel"), function(object) { switch(class(object), "rbfkernel" = cat(paste("Gaussian Radial Basis kernel function.", "\n","Hyperparameter :" ,"sigma = ", kpar(object)$sigma,"\n")), "laplacekernel" = cat(paste("Laplace kernel function.", "\n","Hyperparameter :" ,"sigma = ", kpar(object)$sigma,"\n")), "besselkernel" = cat(paste("Bessel kernel function.", "\n","Hyperparameter :" ,"sigma = ", kpar(object)$sigma,"order = ",kpar(object)$order, "degree = ", kpar(object)$degree,"\n")), "anovakernel" = cat(paste("Anova RBF kernel function.", "\n","Hyperparameter :" ,"sigma = ", kpar(object)$sigma, "degree = ", kpar(object)$degree,"\n")), "tanhkernel" = cat(paste("Hyperbolic Tangent kernel function.", "\n","Hyperparameters :","scale = ", kpar(object)$scale," offset = ", kpar(object)$offset,"\n")), "polykernel" = cat(paste("Polynomial kernel function.", "\n","Hyperparameters :","degree = ",kpar(object)$degree," scale = ", kpar(object)$scale," offset = ", kpar(object)$offset,"\n")), "vanillakernel" = cat(paste("Linear (vanilla) kernel function.", "\n")), "splinekernel" = cat(paste("Spline kernel function.", "\n")), "stringkernel" = { if(kpar(object)$type =="spectrum" | kpar(object)$type =="boundrange") cat(paste("String kernel function.", " Type = ", kpar(object)$type, "\n","Hyperparameters :","sub-sequence/string length = ",kpar(object)$length, "\n")) else if(kpar(object)$type =="exponential" | kpar(object)$type =="constant") cat(paste("String kernel function.", " Type = ", kpar(object)$type, "\n","Hyperparameters :"," lambda = ", kpar(object)$lambda, "\n")) else cat(paste("String kernel function.", " Type = ", kpar(object)$type, "\n","Hyperparameters :","sub-sequence/string length = ",kpar(object)$length," lambda = ", kpar(object)$lambda, "\n")) if(kpar(object)$normalized == TRUE) cat(" Normalized","\n") if(kpar(object)$normalized == FALSE) cat(" Not Normalized","\n")} ) }) ## create accesor function as in "S4 Classses in 15 pages more or less", well.. if (!isGeneric("kpar")){ if (is.function(kpar)) fun <- kpar else fun <- function(object) standardGeneric("kpar") setGeneric("kpar",fun) } setMethod("kpar","kernel", function(object) object@kpar) ## Functions that return usefull kernel calculations (kernel matrix etc.) ## kernelMatrix function takes two or three arguments kernelMatrix <- function(kernel, x, y=NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(x,"matrix")) stop("x must be a matrix") if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") n <- nrow(x) res1 <- matrix(rep(0,n*n), ncol = n) if(is.null(y)){ for(i in 1:n) { for(j in i:n) { res1[i,j] <- kernel(x[i,],x[j,]) } } res1 <- res1 + t(res1) diag(res1) <- diag(res1)/2 } if (is(y,"matrix")){ m<-dim(y)[1] res1 <- matrix(0,dim(x)[1],dim(y)[1]) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[i,],y[j,]) } } } return(as.kernelMatrix(res1)) } setGeneric("kernelMatrix",function(kernel, x, y = NULL) standardGeneric("kernelMatrix")) kernelMatrix.rbfkernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- rowSums(x*x)/2 if (is(x,"matrix") && is.null(y)){ res <- crossprod(t(x)) for (i in 1:n) res[i,]<- exp(2*sigma*(res[i,] - dota - rep(dota[i],n))) return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m) res[,i]<- exp(2*sigma*(res[,i] - dota - rep(dotb[i],n))) return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="rbfkernel"),kernelMatrix.rbfkernel) kernelMatrix.laplacekernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- rowSums(x*x)/2 if (is(x,"matrix") && is.null(y)){ res <- crossprod(t(x)) for (i in 1:n) res[i,]<- exp(-sigma*sqrt(round(-2*(res[i,] - dota - rep(dota[i],n)),9))) return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m) res[,i]<- exp(-sigma*sqrt(round(-2*(res[,i] - dota - rep(dotb[i],n)),9))) return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="laplacekernel"),kernelMatrix.laplacekernel) kernelMatrix.besselkernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma nu = kpar(kernel)$order ni = kpar(kernel)$degree n <- dim(x)[1] lim <- 1/(gamma(nu+1)*2^(nu)) dota <- rowSums(x*x)/2 if (is(x,"matrix") && is.null(y)){ res <- crossprod(t(x)) for (i in 1:n){ xx <- sigma*sqrt(round(-2*(res[i,] - dota - rep(dota[i],n)),9)) res[i,] <- besselJ(xx,nu)*(xx^(-nu)) res[i,which(xx<10e-5)] <- lim } return(as.kernelMatrix((res/lim)^ni)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m){ xx <- sigma*sqrt(round(-2*(res[,i] - dota - rep(dotb[i],n)),9)) res[,i] <- besselJ(xx,nu)*(xx^(-nu)) res[which(xx<10e-5),i] <- lim } return(as.kernelMatrix((res/lim)^ni)) } } setMethod("kernelMatrix",signature(kernel="besselkernel"),kernelMatrix.besselkernel) kernelMatrix.anovakernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma degree = kpar(kernel)$degree n <- dim(x)[1] if (is(x,"matrix") && is.null(y)){ a <- matrix(0, dim(x)[2], n) res <- matrix(0, n ,n) for (i in 1:n) { a[rep(TRUE,dim(x)[2]), rep(TRUE,n)] <- x[i,] res[i,]<- colSums(exp( - sigma*(a - t(x))^2))^degree } return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] b <- matrix(0, dim(x)[2],m) res <- matrix(0, dim(x)[1],m) for( i in 1:n) { b[rep(TRUE,dim(x)[2]), rep(TRUE,m)] <- x[i,] res[i,]<- colSums(exp( - sigma*(b - t(y))^2))^degree } return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="anovakernel"),kernelMatrix.anovakernel) kernelMatrix.polykernel <- function(kernel, x, y = NULL) { if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") scale = kpar(kernel)$scale offset = kpar(kernel)$offset degree = kpar(kernel)$degree if (is(x,"matrix") && is.null(y)) { res <- (scale*crossprod(t(x))+offset)^degree return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") res <- (scale*crossprod(t(x),t(y)) + offset)^degree return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="polykernel"),kernelMatrix.polykernel) kernelMatrix.vanilla <- function(kernel, x, y = NULL) { if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") if (is(x,"matrix") && is.null(y)){ res <- crossprod(t(x)) return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") res <- crossprod(t(x),t(y)) return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="vanillakernel"),kernelMatrix.vanilla) kernelMatrix.tanhkernel <- function(kernel, x, y = NULL) { if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") if (is(x,"matrix") && is.null(y)){ scale = kpar(kernel)$scale offset = kpar(kernel)$offset res <- tanh(scale*crossprod(t(x)) + offset) return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") res <- tanh(scale*crossprod(t(x),t(y)) + offset) return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="tanhkernel"),kernelMatrix.tanhkernel) kernelMatrix.splinekernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma degree = kpar(kernel)$degree n <- dim(x)[1] if (is(x,"matrix") && is.null(y)){ a <- matrix(0, dim(x)[2], n) res <- matrix(0, n ,n) x <- t(x) for (i in 1:n) { dr <- x + x[,i] dp <- x * x[,i] dm <- pmin(x,x[,i]) res[i,] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] b <- matrix(0, dim(x)[2],m) res <- matrix(0, dim(x)[1],m) x <- t(x) y <- t(y) for( i in 1:n) { dr <- y + x[,i] dp <- y * x[,i] dm <- pmin(y,x[,i]) res[i,] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="splinekernel"),kernelMatrix.splinekernel) kernelMatrix.stringkernel <- function(kernel, x, y=NULL) { n <- length(x) res1 <- matrix(rep(0,n*n), ncol = n) normalized = kpar(kernel)$normalized if(is(x,"list")) x <- sapply(x,paste,collapse="") if(is(y,"list")) y <- sapply(y,paste,collapse="") if (kpar(kernel)$type == "sequence" |kpar(kernel)$type == "string"|kpar(kernel)$type == "fullstring") { resdiag <- rep(0,n) if(normalized == TRUE) kernel <- stringdot(length = kpar(kernel)$length, type = kpar(kernel)$type, lambda = kpar(kernel)$lambda, normalized = FALSE) ## y is null if(is.null(y)){ if(normalized == TRUE){ ## calculate diagonal elements first, and use them to normalize for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- kernel(x[[i]],x[[j]])/sqrt(resdiag[i]*resdiag[j]) } } res1 <- res1 + t(res1) diag(res1) <- rep(1,n) } else{ for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- kernel(x[[i]],x[[j]]) } } res1 <- res1 + t(res1) diag(res1) <- resdiag } } if (!is.null(y)){ m <- length(y) res1 <- matrix(0,n,m) resdiag1 <- rep(0,m) if(normalized == TRUE){ for(i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:m) resdiag1[i] <- kernel(y[[i]],y[[i]]) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[[i]],y[[j]])/sqrt(resdiag[i]*resdiag1[j]) } } } else{ for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[[i]],y[[j]]) } } } } return(as.kernelMatrix(res1)) } else { switch(kpar(kernel)$type, "exponential" = sktype <- 2, "constant" = sktype <- 1, "spectrum" = sktype <- 3, "boundrange" = sktype <- 4) if(sktype==3 &(any(nchar(x) < kpar(kernel)$length)|any(nchar(x) < kpar(kernel)$length))) stop("spectral kernel does not accept strings shorter than the length parameter") if(is(x,"list")) x <- unlist(x) if(is(y,"list")) y <- unlist(y) x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") if(is.null(y)) ret <- matrix(0, length(x),length(x)) else ret <- matrix(0,length(x),length(y)) if(is.null(y)){ for(i in 1:length(x)) ret[i,i:length(x)] <- .Call("stringtv",as.character(x[i]),as.character(x[i:length(x)]),as.integer(length(x) - i + 1),as.integer(nchar(x[i])),as.integer(nchar(x[i:length(x)])),as.integer(sktype),as.double(kpar(kernel)$lambda)) ret <- ret + t(ret) diag(ret) <- diag(ret)/2 } else for(i in 1:length(x)) ret[i,] <- .Call("stringtv",as.character(x[i]),as.character(y),as.integer(length(y)),as.integer(nchar(x[i])),as.integer(nchar(y)),as.integer(sktype),as.double(kpar(kernel)$lambda)) if(normalized == TRUE){ if(is.null(y)) ret <- t((1/sqrt(diag(ret)))*t(ret*(1/sqrt(diag(ret))))) else{ norm1 <- rep(0,length(x)) norm2 <- rep(0,length(y)) for( i in 1:length(x)) norm1[i] <- .Call("stringtv",as.character(x[i]),as.character(x[i]),as.integer(1),as.integer(nchar(x[i])),as.integer(nchar(x[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) for( i in 1:length(y)) norm2[i] <- .Call("stringtv",as.character(y[i]),as.character(y[i]),as.integer(1),as.integer(nchar(y[i])),as.integer(nchar(y[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) ret <- t((1/sqrt(norm2))*t(ret*(1/sqrt(norm1)))) } } } return(as.kernelMatrix(ret)) } setMethod("kernelMatrix",signature(kernel="stringkernel"),kernelMatrix.stringkernel) ## kernelMult computes kernel matrix - vector product ## function computing * z ( %*% z) kernelMult <- function(kernel, x, y=NULL, z, blocksize = 128) { # if(is.function(kernel)) ker <- deparse(substitute(kernel)) # kernel <- do.call(kernel, kpar) if(!is(x,"matrix")) stop("x must be a matrix") if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must ba a matrix or a vector") n <- nrow(x) if(is.null(y)) { ## check if z,x match z <- as.matrix(z) if(is.null(y)&&!dim(z)[1]==n) stop("z columns/length do not match x columns") res1 <- matrix(rep(0,n*n), ncol = n) for(i in 1:n) { for(j in i:n) { res1[j,i] <- kernel(x[i,],x[j,]) } } res1 <- res1 + t(res1) diag(res1) <- diag(res1)/2 } if (is(y,"matrix")) { m <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1] == m) stop("z has wrong dimension") res1 <- matrix(rep.int(0,m*n),ncol=m) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[i,],y[j,]) } } } return(res1%*%z) } setGeneric("kernelMult", function(kernel, x, y=NULL, z, blocksize = 256) standardGeneric("kernelMult")) kernelMult.character <- function(kernel, x, y=NULL, z, blocksize = 256) { return(x%*%z) } setMethod("kernelMult",signature(kernel="character", x="kernelMatrix"),kernelMult.character) kernelMult.rbfkernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 dota <- as.matrix(rowSums(x^2)) if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(sigma*(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n))))%*%z lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n))))%*%z } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(sigma*(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2))))%*%z lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) - dota[lowerl:n]%*%t(rep.int(1,n2))))%*%z } return(res) } setMethod("kernelMult",signature(kernel="rbfkernel"),kernelMult.rbfkernel) kernelMult.laplacekernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 dota <- as.matrix(rowSums(x^2)) if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(-sigma*sqrt(-round(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n)),9)))%*%z lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(-sigma*sqrt(-round(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n)),9)))%*%z } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(-sigma*sqrt(-round(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2)),9)))%*%z lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(-sigma*sqrt(-round(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) - dota[lowerl:n]%*%t(rep.int(1,n2)),9)))%*%z } return(res) } setMethod("kernelMult",signature(kernel="laplacekernel"),kernelMult.laplacekernel) kernelMult.besselkernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma nu <- kpar(kernel)$order ni <- kpar(kernel)$degree n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 lim <- 1/(gamma(nu+1)*2^(nu)) dota <- as.matrix(rowSums(x^2)) if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize xx <- sigma*sqrt(-round(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n)),9)) res1 <- besselJ(xx,nu)*(xx^(-nu)) res1[which(xx<10e-5)] <- lim res[lowerl:upperl,] <- ((res1/lim)^ni)%*%z lowerl <- upperl + 1 } } if(lowerl <= n) { xx <- sigma*sqrt(-round(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n)),9)) res1 <- besselJ(xx,nu)*(xx^(-nu)) res1[which(xx<10e-5)] <- lim res[lowerl:n,] <- ((res1/lim)^ni)%*%z } } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize xx <- sigma*sqrt(-round(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2)),9)) res1 <- besselJ(xx,nu)*(xx^(-nu)) res1[which(xx < 10e-5)] <- lim res[lowerl:upperl,] <- ((res1/lim)^ni)%*%z lowerl <- upperl + 1 } } if(lowerl <= n) { xx <- sigma*sqrt(-round(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) - dota[lowerl:n]%*%t(rep.int(1,n2)),9)) res1 <- besselJ(xx,nu)*(xx^(-nu)) res1[which(xx < 10e-5)] <- lim res[lowerl:n,] <- ((res1/lim)^ni)%*%z } } return(res) } setMethod("kernelMult",signature(kernel="besselkernel"),kernelMult.besselkernel) kernelMult.anovakernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$degree n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { a <- matrix(0,m,blocksize) re <- matrix(0, n, blocksize) for(i in 1:nblocks) { upperl = upperl + blocksize for(j in 1:n) { a[rep(TRUE,m),rep(TRUE,blocksize)] <- x[j,] re[j,] <- colSums(exp( - sigma*(a - t(x[lowerl:upperl,]))^2))^degree } res[lowerl:upperl,] <- t(re)%*%z lowerl <- upperl + 1 } } if(lowerl <= n){ a <- matrix(0,m,n-lowerl+1) re <- matrix(0,n,n-lowerl+1) for(j in 1:n) { a[rep(TRUE,m),rep(TRUE,n-lowerl+1)] <- x[j,] re[j,] <- colSums(exp( - sigma*(a - t(x[lowerl:n,,drop=FALSE]))^2))^degree } res[lowerl:n,] <- t(re)%*%z } } if(is(y,"matrix")) { n2 <- dim(y)[1] nblocks <- floor(n2/blocksize) z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { b <- matrix(0, m, blocksize) re <- matrix(0, n, blocksize) for(i in 1:nblocks) { upperl = upperl + blocksize for(j in 1:n) { b[rep(TRUE,dim(x)[2]), rep(TRUE,blocksize)] <- x[j,] re[j,]<- colSums(exp( - sigma*(b - t(y[lowerl:upperl,]))^2)^degree) } res[,1] <- res[,1] + re %*%z[lowerl:upperl,] lowerl <- upperl + 1 } } if(lowerl <= n) { b <- matrix(0, dim(x)[2], n2-lowerl+1) re <- matrix(0, n, n2-lowerl+1) for( i in 1:n) { b[rep(TRUE,dim(x)[2]),rep(TRUE,n2-lowerl+1)] <- x[i,] re[i,]<- colSums(exp( - sigma*(b - t(y[lowerl:n2,,drop=FALSE]))^2)^degree) } res[,1] <- res[,1] + re%*%z[lowerl:n2] } } return(res) } setMethod("kernelMult",signature(kernel="anovakernel"),kernelMult.anovakernel) kernelMult.splinekernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") n <- dim(x)[1] m <- dim(x)[2] if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) x <- t(x) if(nblocks > 0) { re <- matrix(0, dim(z)[1], blocksize) for(i in 1:nblocks) { upperl = upperl + blocksize for (j in lowerl:upperl) { dr <- x + x[ , j] dp <- x * x[ , j] dm <- pmin(x,x[,j]) re[,j-(i-1)*blocksize] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } res[lowerl:upperl,] <- crossprod(re,z) lowerl <- upperl + 1 } } if(lowerl <= n){ a <- matrix(0,m,n-lowerl+1) re <- matrix(0,dim(z)[1],n-lowerl+1) for(j in lowerl:(n-lowerl+1)) { dr <- x + x[ , j] dp <- x * x[ , j] dm <- pmin(x,x[,j]) re[,j-nblocks*blocksize] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } res[lowerl:n,] <- crossprod(re,z) } } if(is(y,"matrix")) { n2 <- dim(y)[1] nblocks <- floor(n2/blocksize) z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) x <- t(x) y <- t(y) if(nblocks > 0) { re <- matrix(0, dim(z)[1], blocksize) for(i in 1:nblocks) { upperl = upperl + blocksize for(j in lowerl:upperl) { dr <- y + x[ , j] dp <- y * x[ , j] dm <- pmin(y,x[,j]) re[,j-(i-1)*blocksize] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } res[lowerl:upperl] <- crossprod(re, z) lowerl <- upperl + 1 } } if(lowerl <= n) { b <- matrix(0, dim(x)[2], n-lowerl+1) re <- matrix(0, dim(z)[1], n-lowerl+1) for(j in lowerl:(n-lowerl+1)) { dr <- y + x[, j] dp <- y * x[, j] dm <- pmin(y,x[,j]) re[,j-nblocks*blocksize] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } res[lowerl:n] <- crossprod(re, z) } } return(res) } setMethod("kernelMult",signature(kernel="splinekernel"),kernelMult.splinekernel) kernelMult.polykernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) degree <- kpar(kernel)$degree scale <- kpar(kernel)$scale offset <- kpar(kernel)$offset n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- ((scale*x[lowerl:upperl,]%*%t(x) + offset)^degree) %*% z lowerl <- upperl + 1 } if(lowerl <= n) res[lowerl:n,] <- ((scale*x[lowerl:n,]%*%t(x) +offset)^degree)%*%z } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- ((scale*x[lowerl:upperl,]%*%t(y) + offset)^degree)%*%z lowerl <- upperl + 1 } if(lowerl <= n) res[lowerl:n,] <- ((scale*x[lowerl:n,]%*%t(y) + offset)^degree)%*%z } return(res) } setMethod("kernelMult",signature(kernel="polykernel"),kernelMult.polykernel) kernelMult.tanhkernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) scale <- kpar(kernel)$scale offset <- kpar(kernel)$offset n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- tanh(scale*x[lowerl:upperl,]%*%t(x) + offset) %*% z lowerl <- upperl + 1 } if(lowerl <= n) res[lowerl:n,] <- tanh(scale*x[lowerl:n,]%*%t(x) +offset)%*%z } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- tanh(scale*x[lowerl:upperl,]%*%t(y) + offset)%*%z lowerl <- upperl + 1 } if(lowerl <= n) res[lowerl:n,] <- tanh(scale*x[lowerl:n,]%*%t(y) + offset)%*%z } return(res) } setMethod("kernelMult",signature(kernel="tanhkernel"),kernelMult.tanhkernel) kernelMult.vanillakernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") n <- dim(x)[1] m <- dim(x)[2] if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- t(crossprod(crossprod(x,z),t(x))) } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- t(crossprod(crossprod(y,z),t(x))) } return(res) } setMethod("kernelMult",signature(kernel="vanillakernel"),kernelMult.vanillakernel) kernelMult.stringkernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") normalized = kpar(kernel)$normalized n <- length(x) res1 <- matrix(rep(0,n*n), ncol = n) resdiag <- rep(0,n) if(is(x,"list")) x <- sapply(x,paste,collapse="") if(is(y,"list")) y <- sapply(y,paste,collapse="") if (kpar(kernel)$type == "sequence" |kpar(kernel)$type == "string"|kpar(kernel)$type == "fullstring") { if(normalized == TRUE) kernel <- stringdot(length = kpar(kernel)$length, type = kpar(kernel)$type, lambda = kpar(kernel)$lambda, normalized = FALSE) ## y is null if(is.null(y)){ if(normalized == TRUE){ z <- as.matrix(z) if(dim(z)[1]!= n) stop("z rows must be equal to x length") dz <- dim(z)[2] vres <- matrix(0,n,dz) ## calculate diagonal elements first, and use them to normalize for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- kernel(x[[i]],x[[j]])/sqrt(resdiag[i]*resdiag[j]) } } res1 <- res1 + t(res1) diag(res1) <- rep(1,n) vres <- res1 %*% z } else{ z <- as.matrix(z) if(dim(z)[1]!= n) stop("z rows must be equal to x length") dz <- dim(z)[2] vres <- matrix(0,n,dz) ## calculate diagonal elements first, and use them to normalize for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- kernel(x[[i]],x[[j]]) } } res1 <- res1 + t(res1) diag(res1) <- resdiag vres <- res1 %*% z } } if (!is.null(y)){ if(normalized == TRUE){ nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 m <- length(y) z <- as.matrix(z) if(dim(z)[1]!= m) stop("z rows must be equal to y length") resdiag1 <- rep(0,m) dz <- dim(z)[2] vres <- matrix(0,n,dz) for(i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:m) resdiag1[i] <- kernel(y[[i]],y[[i]]) if (nblocks > 0){ res1 <- matrix(0,blocksize,m) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { for(j in 1:m) { res1[i - (k-1)*blocksize,j] <- kernel(x[[i]],y[[j]])/sqrt(resdiag[i]*resdiag1[j]) } } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,m) for(i in lowerl:n) { for(j in 1:m) { res1[i - nblocks*blocksize,j] <- kernel(x[[i]],y[[j]])/sqrt(resdiag[i]*resdiag1[j]) } } vres[lowerl:n,] <- res1 %*% z } } else { nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 m <- length(y) z <- as.matrix(z) if(dim(z)[1]!= m) stop("z rows must be equal to y length") dz <- dim(z)[2] vres <- matrix(0,n,dz) if (nblocks > 0){ res1 <- matrix(0,blocksize,m) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { for(j in 1:m) { res1[i - (k-1)*blocksize, j] <- kernel(x[[i]],y[[j]]) } } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,m) for(i in lowerl:n) { for(j in 1:m) { res1[i - nblocks*blocksize,j] <- kernel(x[[i]],y[[j]]) } } vres[lowerl:n,] <- res1 %*% z } } } } else { switch(kpar(kernel)$type, "exponential" = sktype <- 2, "constant" = sktype <- 1, "spectrum" = sktype <- 3, "boundrange" = sktype <- 4) if(sktype==3 &(any(nchar(x) < kpar(kernel)$length)|any(nchar(x) < kpar(kernel)$length))) stop("spectral kernel does not accept strings shorter than the length parameter") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") ## y is null if(is.null(y)){ if(normalized == TRUE){ nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 z <- as.matrix(z) if(dim(z)[1]!= n) stop("z rows must be equal to y length") dz <- dim(z)[2] vres <- matrix(0,n,dz) for (i in 1:n) resdiag[i] <- .Call("stringtv",as.character(x[i]),as.character(x[i]),as.integer(1),as.integer(nchar(x[i])),as.integer(nchar(x[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) if (nblocks > 0){ res1 <- matrix(0,blocksize,n) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { res1[i - (k-1)*blocksize, ] <- .Call("stringtv",as.character(x[i]),as.character(x),as.integer(length(x)),as.integer(nchar(x[i])),as.integer(nchar(x)),as.integer(sktype),as.double(kpar(kernel)$lambda))/sqrt(resdiag[i]*resdiag) } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,n) for(i in lowerl:n) { res1[i - nblocks*blocksize,] <- .Call("stringtv",as.character(x[i]),as.character(x),as.integer(length(x)),as.integer(nchar(x[i])),as.integer(nchar(x)),as.integer(sktype),as.double(kpar(kernel)$lambda))/sqrt(resdiag[i]*resdiag) } vres[lowerl:n,] <- res1 %*% z } } else { nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 z <- as.matrix(z) if(dim(z)[1]!= n) stop("z rows must be equal to y length") dz <- dim(z)[2] vres <- matrix(0,n,dz) if (nblocks > 0){ res1 <- matrix(0,blocksize,n) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { res1[i - (k-1)*blocksize, ] <- .Call("stringtv",as.character(x[i]),as.character(x),as.integer(length(x)),as.integer(nchar(x[i])),as.integer(nchar(x)),as.integer(sktype),as.double(kpar(kernel)$lambda)) } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,n) for(i in lowerl:n) { res1[i - nblocks*blocksize,] <- .Call("stringtv",as.character(x[i]),as.character(x),as.integer(length(x)),as.integer(nchar(x[i])),as.integer(nchar(x)),as.integer(sktype),as.double(kpar(kernel)$lambda)) } vres[lowerl:n,] <- res1 %*% z } } } if (!is.null(y)){ if(normalized == TRUE){ nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 m <- length(y) z <- as.matrix(z) if(dim(z)[1]!= m) stop("z rows must be equal to y length") resdiag1 <- rep(0,m) dz <- dim(z)[2] vres <- matrix(0,n,dz) for(i in 1:n) resdiag[i] <- .Call("stringtv",as.character(x[i]),as.character(x[i]),as.integer(1),as.integer(nchar(x[i])),as.integer(nchar(x[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) for(i in 1:m) resdiag1[i] <- .Call("stringtv",as.character(y[i]),as.character(y[i]),as.integer(1),as.integer(nchar(y[i])),as.integer(nchar(y[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) if (nblocks > 0){ res1 <- matrix(0,blocksize,m) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { res1[i - (k-1)*blocksize, ] <- .Call("stringtv",as.character(x[i]),as.character(y),as.integer(length(y)),as.integer(nchar(x[i])),as.integer(nchar(y)),as.integer(sktype),as.double(kpar(kernel)$lambda))/sqrt(resdiag[i]*resdiag1) } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,m) for(i in lowerl:n) { res1[i - nblocks*blocksize,] <- .Call("stringtv",as.character(x[i]),as.character(y),as.integer(length(y)),as.integer(nchar(x[i])),as.integer(nchar(y)),as.integer(sktype),as.double(kpar(kernel)$lambda))/sqrt(resdiag[i]*resdiag1) } vres[lowerl:n,] <- res1 %*% z } } else { nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 m <- length(y) z <- as.matrix(z) if(dim(z)[1]!= m) stop("z rows must be equal to y length") dz <- dim(z)[2] vres <- matrix(0,n,dz) if (nblocks > 0){ res1 <- matrix(0,blocksize,m) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { res1[i - (k-1)*blocksize, ] <- .Call("stringtv",as.character(x[i]),as.character(y),as.integer(length(y)),as.integer(nchar(x[i])),as.integer(nchar(y)),as.integer(sktype),as.double(kpar(kernel)$lambda)) } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,m) for(i in lowerl:n) { res1[i - nblocks*blocksize,] <- .Call("stringtv",as.character(x[i]),as.character(y),as.integer(length(y)),as.integer(nchar(x[i])),as.integer(nchar(y)),as.integer(sktype),as.double(kpar(kernel)$lambda)) } vres[lowerl:n,] <- res1 %*% z } } } } return(vres) } setMethod("kernelMult",signature(kernel="stringkernel"),kernelMult.stringkernel) ## kernelPol return the quadratic form of a kernel matrix ## kernelPol returns the scalar product of x y componentwise with polarities ## of z and k kernelPol <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(x,"matrix")) stop("x must be a matrix") if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must ba a matrix or a vector") n <- nrow(x) z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") res1 <- matrix(rep(0,n*n), ncol = n) if (is.null(y)) { for(i in 1:n) { for(j in i:n) { res1[i,j] <- kernel(x[i,],x[j,])*z[j]*z[i] } } res1 <- res1 + t(res1) diag(res1) <- diag(res1)/2 } if (is(x,"matrix") && is(y,"matrix")){ m <- dim(y)[1] if(is.null(k)) stop("k not specified!") k <- as.matrix(k) if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") if(!dim(z)[2]==dim(k)[2]) stop("z and k vectors must have the same number of columns") if(!dim(x)[1]==dim(z)[1]) stop("z and x must have the same number of rows") if(!dim(y)[1]==dim(k)[1]) stop("y and k must have the same number of rows") res1 <- matrix(0,dim(x)[1],dim(y)[1]) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[i,],y[j,])*z[i]*k[j] } } } return(res1) } setGeneric("kernelPol", function(kernel, x, y=NULL, z, k = NULL) standardGeneric("kernelPol")) kernelPol.rbfkernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix a vector or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma n <- dim(x)[1] dota <- rowSums(x*x)/2 z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") if (is.null(y)) { if(is(z,"matrix")&&!dim(z)[1]==n) stop("z must have size equal to x colums") res <- crossprod(t(x)) for (i in 1:n) res[i,] <- z[i,]*(exp(2*sigma*(res[i,] - dota - rep(dota[i],n)))*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.matrix(k) if(!dim(k)[1]==m) stop("k must have equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m)#2*sigma or sigma res[,i]<- k[i,]*(exp(2*sigma*(res[,i] - dota - rep(dotb[i],n)))*z) return(res) } } setMethod("kernelPol",signature(kernel="rbfkernel"),kernelPol.rbfkernel) kernelPol.laplacekernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix, vector or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") sigma <- kpar(kernel)$sigma if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) n <- dim(x)[1] dota <- rowSums(x*x)/2 z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") if (is.null(y)) { if(is(z,"matrix")&&!dim(z)[1]==n) stop("z must have size equal to x colums") res <- crossprod(t(x)) for (i in 1:n) res[i,] <- z[i,]*(exp(-sigma*sqrt(-round(2*(res[i,] - dota - rep(dota[i],n)),9)))*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.matrix(k) if(!dim(k)[1]==m) stop("k must have equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m)#2*sigma or sigma res[,i]<- k[i,]*(exp(-sigma*sqrt(-round(2*(res[,i] - dota - rep(dotb[i],n)),9)))*z) return(res) } } setMethod("kernelPol",signature(kernel="laplacekernel"),kernelPol.laplacekernel) kernelPol.besselkernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") sigma <- kpar(kernel)$sigma if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) nu <- kpar(kernel)$order ni <- kpar(kernel)$degree n <- dim(x)[1] lim <- 1/(gamma(nu + 1)*2^nu) dota <- rowSums(x*x)/2 z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") if (is.null(y)) { if(is(z,"matrix")&&!dim(z)[1]==n) stop("z must have size equal to x colums") res <- crossprod(t(x)) for (i in 1:n) { xx <- sigma*sqrt(-round(2*(res[i,] - dota - rep(dota[i],n)),9)) res[i,] <- besselJ(xx,nu)*(xx^(-nu)) res[i,which(xx < 10e-5)] <- lim res[i,] <- z[i,]*(((res[i,]/lim)^ni)*z) } return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] if(!dim(k)[1]==m) stop("k must have equal rows to y") k <- as.matrix(k) if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m){#2*sigma or sigma xx <- sigma*sqrt(-round(2*(res[,i] - dota - rep(dotb[i],n)),9)) res[,i] <- besselJ(xx,nu)*(xx^(-nu)) res[which(xx<10e-5),i] <- lim res[,i]<- k[i,]*(((res[,i]/lim)^ni)*z) } return(res) } } setMethod("kernelPol",signature(kernel="besselkernel"),kernelPol.besselkernel) kernelPol.anovakernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$degree if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) n <- dim(x)[1] z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") if (is.null(y)) { if(is(z,"matrix")&&!dim(z)[1]==n) stop("z must have size equal to x colums") a <- matrix(0, dim(x)[2], n) res <- matrix(0,n,n) for (i in 1:n) { a[rep(TRUE,dim(x)[2]), rep(TRUE,n)] <- x[i,] res[i,]<- z[i,]*((colSums(exp( - sigma*(a - t(x))^2))^degree)*z) } return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.matrix(k) if(!dim(k)[1]==m) stop("k must have equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") b <- matrix(0, dim(x)[2],m) res <- matrix(0, dim(x)[1],m) for( i in 1:n) { b[rep(TRUE,dim(x)[2]), rep(TRUE,m)] <- x[i,] res[i,] <- z[i,]*((colSums(exp( - sigma*(b - t(y))^2))^degree)*k) } return(res) } } setMethod("kernelPol",signature(kernel="anovakernel"),kernelPol.anovakernel) kernelPol.splinekernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$degree n <- dim(x)[1] z <- as.vector(z) if(!(length(z)==n)) stop("z must have the length equal to x colums") if (is.null(y)) { res <- kernelMatrix(kernel,x) return(unclass(z*t(res*z))) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.vector(k) if(!(length(k)==m)) stop("k must have length equal to rows of y") res <- kernelMatrix(kernel,x,y) return(unclass(k*t(res*z))) } } setMethod("kernelPol",signature(kernel="splinekernel"),kernelPol.splinekernel) kernelPol.polykernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) degree <- kpar(kernel)$degree scale <- kpar(kernel)$scale offset <- kpar(kernel)$offset n <- dim(x)[1] if(is(z,"matrix")) { z <- as.vector(z) } m <- length(z) if(!(m==n)) stop("z must have the length equal to x colums") if (is.null(y)) { res <- z*t(((scale*crossprod(t(x))+offset)^degree)*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.vector(k) if(!(length(k)==m)) stop("k must have length equal to rows of y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") res<- k*t(((scale*x%*%t(y) + offset)^degree)*z) return(res) } } setMethod("kernelPol",signature(kernel="polykernel"),kernelPol.polykernel) kernelPol.tanhkernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix, vector or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) scale <- kpar(kernel)$scale offset <- kpar(kernel)$offset n <- dim(x)[1] if(is(z,"matrix")) { z <- as.vector(z) } m <- length(z) if(!(m==n)) stop("z must have the length equal to x colums") if (is.null(y)) { res <- z*t(tanh(scale*crossprod(t(x))+offset)*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.vector(k) if(!(length(k)==m)) stop("k must have length equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes x, y must have the same number of columns") res<- k*t(tanh(scale*x%*%t(y) + offset)*z) return(res) } } setMethod("kernelPol",signature(kernel="tanhkernel"),kernelPol.tanhkernel) kernelPol.vanillakernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix, vector or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") n <- dim(x)[1] if(is(z,"matrix")) { z <- as.vector(z) } m <- length(z) if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!(m==n)) stop("z must have the length equal to x colums") if (is.null(y)) { res <- z*t(crossprod(t(x))*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.vector(k) if(!length(k)==m) stop("k must have length equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes x, y must have the same number of columns") for( i in 1:m) res<- k*t(x%*%t(y)*z) return(res) } } setMethod("kernelPol",signature(kernel="vanillakernel"),kernelPol.vanillakernel) kernelPol.stringkernel <- function(kernel, x, y=NULL ,z ,k=NULL) { n <- length(x) res1 <- matrix(rep(0,n*n), ncol = n) resdiag <- rep(0,n) if(is(x,"list")) x <- sapply(x,paste,collapse="") if(is(y,"list")) y <- sapply(y,paste,collapse="") normalized = kpar(kernel)$normalized if(normalized == TRUE) kernel <- stringdot(length = kpar(kernel)$length, type = kpar(kernel)$type, lambda = kpar(kernel)$lambda, normalized = FALSE) z <- as.matrix(z) ## y is null if (kpar(kernel)$type == "sequence" |kpar(kernel)$type == "string"|kpar(kernel)$type == "fullstring") { if(is.null(y)){ if(normalized == TRUE){ ## calculate diagonal elements first, and use them to normalize for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- (z[i,]*kernel(x[[i]],x[[j]])*z[j,])/sqrt(resdiag[i]*resdiag[j]) } } res1 <- res1 + t(res1) diag(res1) <- z^2 } else { for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- (z[i,]*kernel(x[[i]],x[[j]])*z[j,]) } } res1 <- res1 + t(res1) diag(res1) <- resdiag * z^2 } } if (!is.null(y)){ if(normalized == TRUE){ m <- length(y) res1 <- matrix(0,n,m) resdiag1 <- rep(0,m) k <- as.matrix(k) for(i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:m) resdiag1[i] <- kernel(y[[i]],y[[i]]) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- (z[i,]*kernel(x[[i]],y[[j]])*k[j,])/sqrt(resdiag[i]*resdiag1[j]) } } } } else{ m <- length(y) res1 <- matrix(0,n,m) k <- as.matrix(k) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- (z[i,]*kernel(x[[i]],y[[j]])*k[j,]) } } } } else { switch(kpar(kernel)$type, "exponential" = sktype <- 2, "constant" = sktype <- 1, "spectrum" = sktype <- 3, "boundrange" = sktype <- 4) if(is(x,"list")) x <- unlist(x) if(is(y,"list")) y <- unlist(y) x <- paste(x,"\n",seq="") if(!is.null(y)) y <- paste(y,"\n",seq="") if(is.null(y)) ret <- matrix(0, length(x),length(x)) else ret <- matrix(0,length(x),length(y)) if(is.null(y)){ for( i in 1:length(x)) ret[i,] <- .Call("stringtv",as.character(x[i]),as.character(x),as.integer(length(x)),as.integer(nchar(x[i])),as.integer(nchar(x)),as.integer(sktype),as.double(kpar(kernel)$lambda)) res1 <- k*ret*k } else{ for( i in 1:length(x)) ret[i,] <- .Call("stringtv",as.character(x[i]),as.character(y),as.integer(length(x)),as.integer(nchar(x[i])),as.integer(nchar(y)),as.integer(sktype),as.double(kpar(kernel)$lambda)) res1 <- k*ret*z } if(normalized == TRUE){ if(is.null(y)){ ret <- t((1/sqrt(diag(ret)))*t(ret*(1/sqrt(diag(ret))))) res1 <- k*ret*k } else{ norm1 <- rep(0,length(x)) norm2 <- rep(0,length(y)) for( i in 1:length(x)) norm1[i] <- .Call("stringtv",as.character(x[i]),as.character(x[i]),as.integer(1),as.integer(nchar(x[i])),as.integer(nchar(x[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) for( i in 1:length(y)) norm2[i] <- .Call("stringtv",as.character(y[i]),as.character(y[i]),as.integer(1),as.integer(nchar(y[i])),as.integer(nchar(y[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) ret <- t((1/sqrt(norm2))*t(ret*(1/sqrt(norm1)))) res1 <- k*ret*z } } } return(res1) } setMethod("kernelPol",signature(kernel="stringkernel"),kernelPol.stringkernel) ## kernelFast returns the kernel matrix, its usefull in algorithms ## which require iterative kernel matrix computations kernelFast <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setGeneric("kernelFast",function(kernel, x, y, a) standardGeneric("kernelFast")) kernelFast.rbfkernel <- function(kernel, x, y, a) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- a/2 if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m) res[,i]<- exp(2*sigma*(res[,i] - dota - rep(dotb[i],n))) return(res) } } setMethod("kernelFast",signature(kernel="rbfkernel"),kernelFast.rbfkernel) kernelFast.laplacekernel <- function(kernel, x, y, a) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- a/2 if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m) res[,i]<- exp(-sigma*sqrt(round(-2*(res[,i] - dota - rep(dotb[i],n)),9))) return(res) } } setMethod("kernelFast",signature(kernel="laplacekernel"),kernelFast.laplacekernel) kernelFast.besselkernel <- function(kernel, x, y, a) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma nu = kpar(kernel)$order ni = kpar(kernel)$degree n <- dim(x)[1] lim <- 1/(gamma(nu+1)*2^(nu)) dota <- a/2 if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m){ xx <- sigma*sqrt(round(-2*(res[,i] - dota - rep(dotb[i],n)),9)) res[,i] <- besselJ(xx,nu)*(xx^(-nu)) res[which(xx<10e-5),i] <- lim } return((res/lim)^ni) } } setMethod("kernelFast",signature(kernel="besselkernel"),kernelFast.besselkernel) kernelFast.anovakernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="anovakernel"),kernelFast.anovakernel) kernelFast.polykernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="polykernel"),kernelFast.polykernel) kernelFast.vanilla <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="vanillakernel"),kernelFast.vanilla) kernelFast.tanhkernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="tanhkernel"),kernelFast.tanhkernel) kernelFast.stringkernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="stringkernel"),kernelFast.stringkernel) kernelFast.splinekernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="splinekernel"),kernelFast.splinekernel) kernlab/R/ranking.R0000644000176000001440000002203011304023134013661 0ustar ripleyusers## manifold ranking ## author: alexandros setGeneric("ranking",function(x, ...) standardGeneric("ranking")) setMethod("ranking",signature(x="matrix"), function (x, y, kernel = "rbfdot", kpar = list(sigma = 1), scale = FALSE, alpha = 0.99, iterations = 600, edgegraph = FALSE, convergence = FALSE, ...) { m <- dim(x)[1] d <- dim(x)[2] if(length(y) != m) { ym <- matrix(0,m,1) ym[y] <- 1 y <- ym } if (is.null(y)) y <- matrix(1, m, 1) labelled <- y != 0 if (!any(labelled)) stop("no labels sublied") if(is.character(kernel)) kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","besseldot","laplacedot")) if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(scale) x <- scale(x) ## scaling from ksvm ## normalize ? if (is(kernel)[1]=='rbfkernel' && edgegraph){ sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- rowSums(x*x)/2 sed <- crossprod(t(x)) for (i in 1:n) sed[i,] <- - 2*(sed[i,] - dota - rep(dota[i],n)) diag(sed) <- 0 K <- exp(- sigma * sed) mst <- minimum.spanning.tree(sed) algo.mst <- mst$E max.squared.edge.length <- mst$max.sed.in.tree edgegraph <- (sed <= max.squared.edge.length) K[!edgegraph] <- 0 ##algo.edge.graph <- sparse(algo.edge.graph) rm(sed) gc() } else { edgegraph <- matrix() K <- kernelMatrix(kernel,x) } if (edgegraph && is(kernel)[1]!="rbfkernel"){ warning('edge graph is only implemented for use with the RBF kernel') edgegraph <- matrix() } diag(K) <- 0 ##K <- sparse(K) cs <- colSums(K) ##cs[cs <= 10e-6] <- 1 D <- 1/sqrt(cs) K <- D * K %*% diag(D) if(sum(labelled)==1) y <- K[, labelled,drop = FALSE] else y <- as.matrix(colSums(K[, labelled])) K <- alpha * K[, !labelled] ym <- matrix(0,m,iterations) ym[,1] <- y for (iteration in 2:iterations) ym[, iteration] <- ym[, 1] + K %*% ym[!labelled, iteration-1] ym[labelled,] <- NA r <- ym r[!labelled,] <- compute.ranks(-r[!labelled, ]) if(convergence) convergence <- (r - rep(r[,dim(r)[2]],iterations))/(m-sum(labelled)) else convergence <- matrix() res <- cbind(t(t(1:m)), ym[,iterations], r[,iterations]) return(new("ranking", .Data=res, convergence = convergence, edgegraph = edgegraph)) }) ## kernelMatrix interface setMethod("ranking",signature(x="kernelMatrix"), function (x, y, alpha = 0.99, iterations = 600, convergence = FALSE, ...) { m <- dim(x)[1] if(length(y) != m) { ym <- matrix(0,m,1) ym[y] <- 1 y <- ym } if (is.null(y)) y <- matrix(1, m, 1) labelled <- y != 0 if (!any(labelled)) stop("no labels sublied") diag(x) <- 0 ##K <- sparse(K) cs <- colSums(x) ##cs[cs <= 10e-6] <- 1 D <- 1/sqrt(cs) x <- D * x %*% diag(D) if(sum(labelled)==1) y <- x[, labelled,drop = FALSE] else y <- as.matrix(colSums(x[, labelled])) x <- alpha * x[, !labelled] ym <- matrix(0,m,iterations) ym[,1] <- y for (iteration in 2:iterations) ym[, iteration] <- ym[, 1] + x %*% ym[!labelled, iteration-1] ym[labelled,] <- NA r <- ym r[!labelled,] <- compute.ranks(-r[!labelled, ]) if(convergence) convergence <- (r - rep(r[,dim(r)[2]],iterations))/(m-sum(labelled)) else convergence <- matrix() res <- cbind(t(t(1:m)), ym[,iterations], r[,iterations]) return(new("ranking", .Data=res, convergence = convergence)) }) ## list interface setMethod("ranking",signature(x="list"), function (x, y, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), alpha = 0.99, iterations = 600, convergence = FALSE, ...) { m <- length(x) if(length(y) != m) { ym <- matrix(0,m,1) ym[y] <- 1 y <- ym } if (is.null(y)) y <- matrix(1, m, 1) labelled <- y != 0 if (!any(labelled)) stop("no labels sublied") if(is.character(kernel)) kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","besseldot","laplacedot")) if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") edgegraph <- matrix() K <- kernelMatrix(kernel,x) diag(K) <- 0 ##K <- sparse(K) cs <- colSums(K) ##cs[cs <= 10e-6] <- 1 D <- 1/sqrt(cs) K <- D * K %*% diag(D) if(sum(labelled)==1) y <- K[, labelled,drop = FALSE] else y <- as.matrix(colSums(K[, labelled])) K <- alpha * K[, !labelled] ym <- matrix(0,m,iterations) ym[,1] <- y for (iteration in 2:iterations) ym[, iteration] <- ym[, 1] + K %*% ym[!labelled, iteration-1] ym[labelled,] <- NA r <- ym r[!labelled,] <- compute.ranks(-r[!labelled, ]) if(convergence) convergence <- (r - rep(r[,dim(r)[2]],iterations))/(m-sum(labelled)) else convergence <- matrix() res <- cbind(t(t(1:m)), ym[,iterations], r[,iterations]) return(new("ranking", .Data=res, convergence = convergence, edgegraph = NULL)) }) minimum.spanning.tree <- function(sed) { max.sed.in.tree <- 0 E <- matrix(0,dim(sed)[1],dim(sed)[2]) n <- dim(E)[1] C <- logical(n) cmp <- sed diag(cmp) <- NA ans <- min(cmp, na.rm = TRUE) i <- which.min(cmp) j <- i%/%n + 1 i <- i%%n +1 for (nC in 1:n) { cmp <- sed cmp[C,] <- NA cmp[,!C] <- NA if(nC == 1) { ans <- 1 i <- 1 } else{ ans <- min(cmp, na.rm=TRUE) i <- which.min(cmp)} j <- i%/%n + 1 i <- i%%n + 1 E[i, j] <- nC E[j, i] <- nC C[i] <- TRUE max.sed.in.tree <- max(max.sed.in.tree, sed[i, j]) } ## E <- sparse(E) res <- list(E=E, max.sed.in.tree=max.sed.in.tree) } compute.ranks <- function(am) { rm <- matrix(0,dim(am)[1],dim(am)[2]) for (j in 1:dim(am)[2]) { a <- am[, j] sort <- sort(a, index.return = TRUE) sorted <- sort$x r <- sort$ix r[r] <- 1:length(r) while(1) { if(sum(na.omit(diff(sorted) == 0)) == 0) break tied <- sorted[min(which(diff(sorted) == 0))] sorted[sorted==tied] <- NA r[a==tied] <- mean(r[a==tied]) } rm[, j] <- r } return(rm) } setMethod("show","ranking", function(object) { cat("Ranking object of class \"ranking\"","\n") cat("\n") show(object@.Data) cat("\n") if(!any(is.na(convergence(object)))) cat("convergence matrix included.","\n") if(!any(is.na(edgegraph(object)))) cat("edgegraph matrix included.","\n") }) kernlab/R/specc.R0000644000176000001440000002543012560371302013344 0ustar ripleyusers## Spectral clustering ## author : alexandros setGeneric("specc",function(x, ...) standardGeneric("specc")) setMethod("specc", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) na.act <- attr(mf, "na.action") x <- model.matrix(mt, mf) res <- specc(x, ...) cl[[1]] <- as.name("specc") if(!is.null(na.act)) n.action(res) <- na.action return(res) }) setMethod("specc",signature(x="matrix"),function(x, centers, kernel = "rbfdot", kpar = "automatic", nystrom.red = FALSE, nystrom.sample = dim(x)[1]/6, iterations = 200, mod.sample = 0.75, na.action = na.omit, ...) { x <- na.action(x) rown <- rownames(x) x <- as.matrix(x) m <- nrow(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(is.character(kpar)) { kpar <- match.arg(kpar,c("automatic","local")) if(kpar == "automatic") { if (nystrom.red == TRUE) sam <- sample(1:m, floor(mod.sample*nystrom.sample)) else sam <- sample(1:m, floor(mod.sample*m)) sx <- unique(x[sam,]) ns <- dim(sx)[1] dota <- rowSums(sx*sx)/2 ktmp <- crossprod(t(sx)) for (i in 1:ns) ktmp[i,]<- 2*(-ktmp[i,] + dota + rep(dota[i], ns)) ## fix numerical prob. ktmp[ktmp<0] <- 0 ktmp <- sqrt(ktmp) kmax <- max(ktmp) kmin <- min(ktmp + diag(rep(Inf,dim(ktmp)[1]))) kmea <- mean(ktmp) lsmin <- log2(kmin) lsmax <- log2(kmax) midmax <- min(c(2*kmea, kmax)) midmin <- max(c(kmea/2,kmin)) rtmp <- c(seq(midmin,0.9*kmea,0.05*kmea), seq(kmea,midmax,0.08*kmea)) if ((lsmax - (Re(log2(midmax))+0.5)) < 0.5) step <- (lsmax - (Re(log2(midmax))+0.5)) else step <- 0.5 if (((Re(log2(midmin))-0.5)-lsmin) < 0.5 ) stepm <- ((Re(log2(midmin))-0.5) - lsmin) else stepm <- 0.5 tmpsig <- c(2^(seq(lsmin,(Re(log2(midmin))-0.5), stepm)), rtmp, 2^(seq(Re(log2(midmax))+0.5, lsmax,step))) diss <- matrix(rep(Inf,length(tmpsig)*nc),ncol=nc) for (i in 1:length(tmpsig)){ ka <- exp((-(ktmp^2))/(2*(tmpsig[i]^2))) diag(ka) <- 0 d <- 1/sqrt(rowSums(ka)) if(!any(d==Inf) && !any(is.na(d))&& (max(d)[1]-min(d)[1] < 10^4)) { l <- d * ka %*% diag(d) xi <- eigen(l,symmetric=TRUE)$vectors[,1:nc] yi <- xi/sqrt(rowSums(xi^2)) res <- kmeans(yi, centers, iterations) diss[i,] <- res$withinss } } ms <- which.min(rowSums(diss)) kernel <- rbfdot((tmpsig[ms]^(-2))/2) ## Compute Affinity Matrix if (nystrom.red == FALSE) km <- kernelMatrix(kernel, x) } if (kpar=="local") { if (nystrom.red == TRUE) stop ("Local Scaling not supported for nystrom reduction.") s <- rep(0,m) dota <- rowSums(x*x)/2 dis <- crossprod(t(x)) for (i in 1:m) dis[i,]<- 2*(-dis[i,] + dota + rep(dota[i],m)) ## fix numerical prob. dis[dis < 0] <- 0 for (i in 1:m) s[i] <- median(sort(sqrt(dis[i,]))[1:5]) ## Compute Affinity Matrix km <- exp(-dis / s%*%t(s)) kernel <- "Localy scaled RBF kernel" } } else { if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") ## Compute Affinity Matrix if (nystrom.red == FALSE) km <- kernelMatrix(kernel, x) } if (nystrom.red == TRUE){ n <- floor(nystrom.sample) ind <- sample(1:m, m) x <- x[ind,] tmps <- sort(ind, index.return = TRUE) reind <- tmps$ix A <- kernelMatrix(kernel, x[1:n,]) B <- kernelMatrix(kernel, x[-(1:n),], x[1:n,]) d1 <- colSums(rbind(A,B)) d2 <- rowSums(B) + drop(matrix(colSums(B),1) %*% .ginv(A)%*%t(B)) dhat <- sqrt(1/c(d1,d2)) A <- A * (dhat[1:n] %*% t(dhat[1:n])) B <- B * (dhat[(n+1):m] %*% t(dhat[1:n])) Asi <- .sqrtm(.ginv(A)) Q <- A + Asi %*% crossprod(B) %*% Asi tmpres <- svd(Q) U <- tmpres$u L <- tmpres$d V <- rbind(A,B) %*% Asi %*% U %*% .ginv(sqrt(diag(L))) yi <- matrix(0,m,nc) ## for(i in 2:(nc +1)) ## yi[,i-1] <- V[,i]/V[,1] for(i in 1:nc) ## specc yi[,i] <- V[,i]/sqrt(sum(V[,i]^2)) res <- kmeans(yi[reind,], centers, iterations) } else{ if(is(kernel)[1] == "rbfkernel") diag(km) <- 0 d <- 1/sqrt(rowSums(km)) l <- d * km %*% diag(d) xi <- eigen(l)$vectors[,1:nc] yi <- xi/sqrt(rowSums(xi^2)) res <- kmeans(yi, centers, iterations) } cent <- matrix(unlist(lapply(1:nc,ll<- function(l){colMeans(x[which(res$cluster==l), ,drop=FALSE])})),ncol=dim(x)[2], byrow=TRUE) withss <- unlist(lapply(1:nc,ll<- function(l){sum((x[which(res$cluster==l),, drop=FALSE] - cent[l,])^2)})) names(res$cluster) <- rown return(new("specc", .Data=res$cluster, size = res$size, centers=cent, withinss=withss, kernelf= kernel)) }) setMethod("specc",signature(x="list"),function(x, centers, kernel = "stringdot", kpar = list(length=4, lambda=0.5), nystrom.red = FALSE, nystrom.sample = length(x)/6, iterations = 200, mod.sample = 0.75, na.action = na.omit, ...) { x <- na.action(x) m <- length(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if (nystrom.red == TRUE){ n <- nystrom.sample ind <- sample(1:m, m) x <- x[ind,] tmps <- sort(ind, index.return = TRUE) reind <- tmps$ix A <- kernelMatrix(kernel, x[1:n,]) B <- kernelMatrix(kernel, x[-(1:n),], x[1:n,]) d1 <- colSums(rbind(A,B)) d2 <- rowSums(B) + drop(matrix(colSums(B),1) %*% .ginv(A)%*%t(B)) dhat <- sqrt(1/c(d1,d2)) A <- A * (dhat[1:n] %*% t(dhat[1:n])) B <- B * (dhat[(n+1):m] %*% t(dhat[1:n])) Asi <- .sqrtm(.ginv(A)) Q <- A + Asi %*% crossprod(B) %*% Asi tmpres <- svd(Q) U <- tmpres$u L <- tmpres$d V <- rbind(A,B) %*% Asi %*% U %*% .ginv(sqrt(diag(L))) yi <- matrix(0,m,nc) ## for(i in 2:(nc +1)) ## yi[,i-1] <- V[,i]/V[,1] for(i in 1:nc) ## specc yi[,i] <- V[,i]/sqrt(sum(V[,i]^2)) res <- kmeans(yi[reind,], centers, iterations) } else{ ## Compute Affinity Matrix / in our case just the kernel matrix km <- kernelMatrix(kernel, x) if(is(kernel)[1] == "rbfkernel") diag(km) <- 0 d <- 1/sqrt(rowSums(km)) l <- d * km %*% diag(d) xi <- eigen(l)$vectors[,1:nc] sqxi <- rowSums(xi^2) if(any(sqxi==0)) stop("Zero eigenvector elements, try using a lower value for the length hyper-parameter") yi <- xi/sqrt(sqxi) res <- kmeans(yi, centers, iterations) } return(new("specc", .Data=res$cluster, size = res$size, kernelf= kernel)) }) setMethod("specc",signature(x="kernelMatrix"),function(x, centers, nystrom.red = FALSE, iterations = 200, ...) { m <- nrow(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(dim(x)[1]!=dim(x)[2]) { nystrom.red <- TRUE if(dim(x)[1] < dim(x)[2]) x <- t(x) m <- nrow(x) n <- ncol(x) } if (nystrom.red == TRUE){ A <- x[1:n,] B <- x[-(1:n),] d1 <- colSums(rbind(A,B)) d2 <- rowSums(B) + drop(matrix(colSums(B),1) %*% .ginv(A)%*%t(B)) dhat <- sqrt(1/c(d1,d2)) A <- A * (dhat[1:n] %*% t(dhat[1:n])) B <- B * (dhat[(n+1):m] %*% t(dhat[1:n])) Asi <- .sqrtm(.ginv(A)) Q <- A + Asi %*% crossprod(B) %*% Asi tmpres <- svd(Q) U <- tmpres$u L <- tmpres$d V <- rbind(A,B) %*% Asi %*% U %*% .ginv(sqrt(diag(L))) yi <- matrix(0,m,nc) ## for(i in 2:(nc +1)) ## yi[,i-1] <- V[,i]/V[,1] for(i in 1:nc) ## specc yi[,i] <- V[,i]/sqrt(sum(V[,i]^2)) res <- kmeans(yi, centers, iterations) } else{ d <- 1/sqrt(rowSums(x)) l <- d * x %*% diag(d) xi <- eigen(l)$vectors[,1:nc] yi <- xi/sqrt(rowSums(xi^2)) res <- kmeans(yi, centers, iterations) } ## cent <- matrix(unlist(lapply(1:nc,ll<- function(l){colMeans(x[which(res$cluster==l),])})),ncol=dim(x)[2], byrow=TRUE) ## withss <- unlist(lapply(1:nc,ll<- function(l){sum((x[which(res$cluster==l),] - cent[l,])^2)})) return(new("specc", .Data=res$cluster, size = res$size, centers = matrix(0), withinss = c(0), kernelf= "Kernel Matrix used as input.")) }) setMethod("show","specc", function(object){ cat("Spectral Clustering object of class \"specc\"","\n") cat("\n","Cluster memberships:","\n","\n") cat(object@.Data,"\n","\n") show(kernelf(object)) cat("\n") if(!any(is.na(centers(object)))){ cat(paste("Centers: ","\n")) show(centers(object)) cat("\n")} cat(paste("Cluster size: ","\n")) show(size(object)) cat("\n") if(!is.logical(withinss(object))){ cat(paste("Within-cluster sum of squares: ", "\n")) show(withinss(object)) cat("\n")} }) .ginv <- function (X, tol = sqrt(.Machine$double.eps)) { if (length(dim(X)) > 2 || !(is.numeric(X) || is.complex(X))) stop("'X' must be a numeric or complex matrix") if (!is.matrix(X)) X <- as.matrix(X) Xsvd <- svd(X) if (is.complex(X)) Xsvd$u <- Conj(Xsvd$u) Positive <- Xsvd$d > max(tol * Xsvd$d[1], 0) if (all(Positive)) Xsvd$v %*% (1/Xsvd$d * t(Xsvd$u)) else if (!any(Positive)) array(0, dim(X)[2:1]) else Xsvd$v[, Positive, drop = FALSE] %*% ((1/Xsvd$d[Positive]) * t(Xsvd$u[, Positive, drop = FALSE])) } .sqrtm <- function(x) { tmpres <- eigen(x) V <- t(tmpres$vectors) D <- tmpres$values if(is.complex(D)) D <- Re(D) D <- pmax(D,0) return(crossprod(V*sqrt(D),V)) } kernlab/R/kfa.R0000644000176000001440000001017611654244033013014 0ustar ripleyusers ## This code takes the set x of vectors from the input space ## and does projection pursuit to find a good basis for x. ## ## The algorithm is described in Section 14.5 of ## Learning with Kernels by B. Schoelkopf and A. Smola, entitled ## Kernel Feature Analysis. ## ## created : 17.09.04 alexandros ## updated : setGeneric("kfa",function(x, ...) standardGeneric("kfa")) setMethod("kfa", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) Terms <- attr(mf, "terms") na.act <- attr(mf, "na.action") x <- model.matrix(mt, mf) res <- kfa(x, ...) ## fix up call to refer to the generic, but leave arg name as `formula' cl[[1]] <- as.name("kfa") kcall(res) <- cl attr(Terms,"intercept") <- 0 terms(res) <- Terms if(!is.null(na.act)) n.action(res) <- na.act return(res) }) setMethod("kfa",signature(x="matrix"), function(x, kernel="rbfdot", kpar=list(sigma=0.1), features = 0, subset = 59, normalize = TRUE, na.action = na.omit) { if(!is.matrix(x)) stop("x must be a matrix") x <- na.action(x) if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") ## initialize variables m <- dim(x)[1] if(subset > m) subset <- m if (features==0) features <- subset alpha <- matrix(0,subset,features) alphazero <- rep(1,subset) alphafeat <- matrix(0,features,features) idx <- -(1:subset) randomindex <- sample(1:m, subset) K <- kernelMatrix(kernel,x[randomindex,,drop=FALSE],x) ## main loop for (i in 1:features) { K.cols <- K[-idx, , drop = FALSE] if(i > 1) projections <- K.cols * (alphazero[-idx]%*%t(rep(1,m))) + crossprod(t(alpha[-idx,1:(i-1),drop=FALSE]),K[idx, ,drop = FALSE]) else projections <- K.cols * (alphazero%*%t(rep(1,m))) Q <- apply(projections, 1, sd) Q.tmp <- rep(0,subset) Q.tmp[-idx] <- Q Qidx <- which.max(Q.tmp) Qmax <- Q.tmp[Qidx] if(i > 1) alphafeat[i,1:(i-1)] <- alpha[Qidx,1:(i-1)] alphafeat[i,i] <- alphazero[Qidx] if (i > 1) idx <- c(idx,Qidx) else idx <- Qidx if (i > 1) Qfeat <- c(Qfeat, Qmax) else Qfeat <- Qmax Ksub <- K[idx, idx, drop = FALSE] alphasub <- alphafeat[i,1:i] phisquare <- alphasub %*% Ksub %*% t(t(alphasub)) dotprod <- (alphazero * (K[,idx, drop = FALSE] %*% t(t(alphasub))) + alpha[,1:i]%*%(Ksub%*%t(t(alphasub))))/drop(phisquare) alpha[,1:i] <- alpha[,1:i] - dotprod %*%alphasub if(normalize){ sumalpha <- alphazero + rowSums(abs(alpha)) alphazero <- alphazero / sumalpha alpha <- alpha/ (sumalpha %*% t(rep(1,features))) } } obj <- new("kfa") alpha(obj) <- alphafeat alphaindex(obj) <- randomindex[idx] xmatrix(obj) <- x[alphaindex(obj),] kernelf(obj) <- kernel kcall(obj) <- match.call() return(obj) }) ## project a new matrix into the feature space setMethod("predict",signature(object="kfa"), function(object , x) { if (!is.null(terms(object))) { if(!is.matrix(x)) x <- model.matrix(delete.response(terms(object)), as.data.frame(x), na.action = n.action(object)) } else x <- if (is.vector(x)) t(t(x)) else as.matrix(x) if (!is.matrix(x)) stop("x must be a matrix a vector or a data frame") tmpres <- kernelMult(kernelf(object), x, xmatrix(object), alpha(object)) return(tmpres - matrix(colSums(tmpres)/dim(tmpres)[1],dim(tmpres)[1],dim(tmpres)[2],byrow=TRUE)) }) setMethod("show",signature(object="kfa"), function(object) { cat(paste("Number of features :",dim(alpha(object))[2],"\n")) show(kernelf(object)) }) kernlab/R/sigest.R0000644000176000001440000000465211304023134013540 0ustar ripleyusers## sigma estimation for RBF kernels ## author: alexandros setGeneric("sigest", function(x, ...) standardGeneric("sigest")) setMethod("sigest",signature(x="formula"), function (x, data=NULL, frac = 0.5, na.action = na.omit, scaled = TRUE){ call <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) ## m$... <- NULL m$formula <- m$x m$x <- NULL m$scaled <- NULL m$frac <- NULL m[[1]] <- as.name("model.frame") m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 x <- model.matrix(Terms, m) if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- sigest(x, scaled = scaled, frac = frac, na.action = na.action) return (ret) }) setMethod("sigest",signature(x="matrix"), function (x, frac = 0.5, scaled = TRUE, na.action = na.omit) { x <- na.action(x) if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp } } m <- dim(x)[1] n <- floor(frac*m) index <- sample(1:m, n, replace = TRUE) index2 <- sample(1:m, n, replace = TRUE) temp <- x[index,, drop=FALSE] - x[index2,,drop=FALSE] dist <- rowSums(temp^2) srange <- 1/quantile(dist[dist!=0],probs=c(0.9,0.5,0.1)) ## ds <- sort(dist[dist!=0]) ## sl <- ds[ceiling(0.2*length(ds))] ## su <- ds[ceiling(0.8*length(ds))] ## srange <- c(1/su,1/median(ds), 1/sl) ## names(srange) <- NULL return(srange) }) kernlab/R/kernelmatrix.R0000644000176000001440000000050311304023134014736 0ustar ripleyusers setGeneric("as.kernelMatrix",function(x, center = FALSE) standardGeneric("as.kernelMatrix")) setMethod("as.kernelMatrix", signature(x = "matrix"), function(x, center = FALSE) { if(center){ m <- dim(x)[1] x <- t(t(x - colSums(x)/m) - rowSums(x)/m) + sum(x)/m^2 } return(new("kernelMatrix",.Data = x)) }) kernlab/R/couplers.R0000644000176000001440000000770211304023134014075 0ustar ripleyusers## wrapper function for couplers ## author : alexandros karatzoglou couple <- function(probin, coupler = "minpair") { if(is.vector(probin)) probin <- matrix(probin,1) m <- dim(probin)[1] coupler <- match.arg(coupler, c("minpair", "pkpd", "vote", "ht")) # if(coupler == "ht") # multiprob <- sapply(1:m, function(x) do.call(coupler, list(probin[x ,], clscnt))) # else multiprob <- sapply(1:m, function(x) do.call(coupler, list(probin[x ,]))) return(t(multiprob)) } ht <- function(probin, clscnt, iter=1000) { nclass <- length(clscnt) probim <- matrix(0, nclass, nclass) for(i in 1:nclass) for(j in 1:nclass) if(j>i) { probim[i,j] <- probin[i] probim[j,i] <- 1 - probin[i] } p <- rep(1/nclass,nclass) u <- matrix((1/nclass)/((1/nclass)+(1/nclass)) ,nclass,nclass) iter <- 0 while(TRUE) { iter <- iter + 1 stoperror <- 0 for(i in 1:nclass){ num <- den <- 0 for(j in 1:nclass) { if (j!=i) { num <- num + (clscnt[i] + clscnt[j]) * probim[i,j] den <- den + (clscnt[i] + clscnt[j]) * u[i,j] } } alpha <- num/(den + 1e-308) p[i] <- p[i]*alpha stoperror <- stoperror + (alpha -1)^2 if(0) { sum <- 0 sum <- sum(p) + sum p <- p/sum for(ui in 1:nclass) for(uj in 1:nclass) u[ui, uj] <- p[ui]/(p[ui] + p[uj]) } else { for(j in 1:nclass) if (i!=j) { u[i,j] <- p[i]/(p[i] + p[j]) u[j,i] <- 1 - u[i,j] } } } if(stoperror < 1e-3) break if(iter > 400) { cat("Too many iterations: aborting", probin, iter, stoperror, p) break } } ## normalize prob. p <- p/sum(p) return(p) } minpair <- function(probin) { ## Count number of classes and construct prob. matrix nclass <- (1+sqrt(1 + 8*length(probin)))/2 if(nclass%%1 != 0) stop("Vector has wrong length only one against one problems supported") probim <- matrix(0, nclass, nclass) probim[upper.tri(probim)] <- probin probim[lower.tri(probim)] <- 1 - probin sum <- colSums(probim^2) Q <- diag(sum) Q[upper.tri(Q)] <- - probin*(1 - probin) Q[lower.tri(Q)] <- - probin*(1 - probin) SQ <- matrix(0,nclass +1, nclass +1) SQ[1:(nclass+1) <= nclass, 1:(nclass+1) <= nclass] <- Q SQ[1:(nclass+1) > nclass, 1:(nclass+1) <= nclass] <- rep(1,nclass) SQ[1:(nclass+1) <= nclass, 1:(nclass+1) > nclass] <- rep(1,nclass) rhs <- rep(0,nclass+1) rhs[nclass + 1] <- 1 p <- solve(SQ,rhs) p <- p[-(nclass+1)]/sum(p[-(nclass+1)]) return(p) } pkpd <- function(probin) { ## Count number of classes and constuct prob. matrix nclass <- k <- (1+sqrt(1 + 8*length(probin)))/2 if(nclass%%1 != 0) stop("Vector has wrong length only one against one problems supported") probim <- matrix(0, nclass, nclass) probim[upper.tri(probim)] <- probin probim[lower.tri(probim)] <- 1 - probin probim[probim==0] <- 1e-300 R <- 1/probim diag(R) <- 0 p <- 1/(rowSums(R) - (k-2)) p <- p/sum(p) return(p) } vote<- function(probin) { nclass <- (1+sqrt(1 + 8*length(probin)))/2 if(nclass%%1 != 0) stop("Vector has wrong length only one against one problems supported") votev <- rep(0,nclass) p <- 0 for(i in 1:(nclass-1)) { jj <- i+1 for(j in jj:nclass) { p <- p+1 votev[i][probin[i] >= 0.5] <- votev[i][probin[i] >= 0.5] + 1 votev[j][probin[j] < 0.5] <- votev[j][probin[j] < 0.5] + 1 } } p <- votev/sum(votev) return(p) } kernlab/R/ksvm.R0000644000176000001440000035057012560371302013235 0ustar ripleyusers## Support Vector Machines ## author : alexandros karatzoglou ## updated : 08.02.06 setGeneric("ksvm", function(x, ...) standardGeneric("ksvm")) setMethod("ksvm",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m$scaled <- NULL m[[1]] <- as.name("model.frame") m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 ## no intercept x <- model.matrix(Terms, m) y <- model.extract(m, "response") if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- ksvm(x, y, scaled = scaled, ...) kcall(ret) <- cl attr(Terms,"intercept") <- 0 ## no intercept terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("ksvm",signature(x="vector"), function(x, ...) { x <- t(t(x)) ret <- ksvm(x, ...) return(ret) }) setMethod("ksvm",signature(x="matrix"), function (x, y = NULL, scaled = TRUE, type = NULL, kernel = "rbfdot", kpar = "automatic", C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ... ,subset ,na.action = na.omit) { ## Comment out sparse code, future impl. will be based on "Matrix" ## sparse <- inherits(x, "matrix.csr") ## if (sparse) { ## if (!require(SparseM)) ## stop("Need SparseM package for handling of sparse structures!") ## } sparse <- FALSE if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","matrix")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(ksvm(as.kernelMatrix(x), y = y, type = type, C = C, nu = nu, epsilon = epsilon, prob.model = prob.model, class.weights = class.weights, cross = cross, fit = fit, cache = cache, tol = tol, shrinking = shrinking, ...)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } ## subsetting and na-handling for matrices ret <- new("ksvm") if (!missing(subset)) x <- x[subset,] if (is.null(y)) x <- na.action(x) else { df <- na.action(data.frame(y, x)) y <- df[,1] x <- as.matrix(df[,-1]) } n.action(ret) <- na.action if (is.null(type)) type(ret) <- if (is.null(y)) "one-svc" else if (is.factor(y)) "C-svc" else "eps-svr" if(!is.null(type)) type(ret) <- match.arg(type,c("C-svc", "nu-svc", "kbb-svc", "spoc-svc", "C-bsvc", "one-svc", "eps-svr", "eps-bsvr", "nu-svr")) ## ## scaling, subsetting, and NA handling ## if (sparse) { ## scale <- rep(FALSE, ncol(x)) ## if(!is.null(y)) na.fail(y) ## x <- t(t(x)) ## make shure that col-indices are sorted ## } x.scale <- y.scale <- NULL ## scaling if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")] if (is.numeric(y)&&(type(ret)!="C-svc"&&type(ret)!="nu-svc"&&type(ret)!="C-bsvc"&&type(ret)!="spoc-svc"&&type(ret)!="kbb-svc")) { y <- scale(y) y.scale <- attributes(y)[c("scaled:center","scaled:scale")] y <- as.vector(y) } } } ncols <- ncol(x) m <- nrows <- nrow(x) if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) #cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if (!is(y,"vector") && !is.factor (y) & is(y,"matrix") & !(type(ret)=="one-svc")) stop("y must be a vector or a factor.") if(!(type(ret)=="one-svc")) if(is(y,"vector") | is(y,"factor") ) ym <- length(y) else if(is(y,"matrix")) ym <- dim(y)[1] else stop("y must be a matrix or a vector") if ((type(ret) != "one-svc") && ym != m) stop("x and y don't match.") if(nu > 1|| nu <0) stop("nu must be between 0 an 1.") weightlabels <- NULL nweights <- 0 weight <- 0 wl <- 0 ## in case of classification: transform factors into integers if (type(ret) == "one-svc") # one class classification --> set dummy y <- 1 else if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (!is.null(class.weights)) { weightlabels <- match (names(class.weights),lev(ret)) if (any(is.na(weightlabels))) stop ("At least one level name is missing or misspelled.") } } else { if ((type(ret) =="C-svc" || type(ret) == "nu-svc" ||type(ret) == "C-bsvc" || type(ret) == "spoc-svc" || type(ret) == "kbb-svc") && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if (type(ret) != "eps-svr" || type(ret) != "nu-svr"|| type(ret)!="eps-bsvr") lev(ret) <- sort(unique (y)) } ## initialize nclass(ret) <- length (unique(y)) p <- 0 K <- 0 svindex <- problem <- NULL sigma <- 0.1 degree <- offset <- scale <- 1 switch(is(kernel)[1], "rbfkernel" = { sigma <- kpar(kernel)$sigma ktype <- 2 }, "tanhkernel" = { sigma <- kpar(kernel)$scale offset <- kpar(kernel)$offset ktype <- 3 }, "polykernel" = { degree <- kpar(kernel)$degree sigma <- kpar(kernel)$scale offset <- kpar(kernel)$offset ktype <- 1 }, "vanillakernel" = { ktype <- 0 }, "laplacekernel" = { ktype <- 5 sigma <- kpar(kernel)$sigma }, "besselkernel" = { ktype <- 6 sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$order offset <- kpar(kernel)$degree }, "anovakernel" = { ktype <- 7 sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$degree }, "splinekernel" = { ktype <- 8 }, { ktype <- 4 } ) prior(ret) <- list(NULL) ## C classification if(type(ret) == "C-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ## prepare the data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) if(ktype==4) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]) resv <- .Call("smo_optim", as.double(t(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE])), as.integer(li+lj), as.integer(ncol(x)), as.double(yd), as.double(K), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), ##linear term as.integer(ktype), as.integer(0), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] ## alpha svind <- tmpres > 0 alpha(ret)[p] <- list(tmpres[svind]) ## coefficients alpha*y coef(ret)[p] <- list(alpha(ret)[[p]]*yd[reind][svind]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][svind]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][reind,,drop=FALSE][svind, ,drop=FALSE]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) ## store objective function values in a vector obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## nu classification if(type(ret) == "nu-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) if(ktype==4) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]) resv <- .Call("smo_optim", as.double(t(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE])), as.integer(li+lj), as.integer(ncol(x)), as.double(yd), as.double(K), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), #linear term as.integer(ktype), as.integer(1), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), #weightlabl. as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] svind <- tmpres != 0 alpha(ret)[p] <- coef(ret)[p] <- list(tmpres[svind]) ##store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][svind]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][reind,,drop=FALSE][svind,,drop=FALSE]) ##save the indexes from all the SV in a vector (use unique!) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) ## store objective function values in a vector obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" problem[p] <- list(c(i,j)) param(ret)$nu <- nu ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## Bound constraint C classification if(type(ret) == "C-bsvc"){ if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) if(ktype==4) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]) resv <- .Call("tron_optim", as.double(t(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE])), as.integer(li+lj), as.integer(ncol(x)), as.double(yd), as.double(K), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ja else 0), as.integer(sparse), as.integer(2), as.double(0), ##countc as.integer(ktype), as.integer(5), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), ## cost value of alpha seeding as.double(2), ## step value of alpha seeding as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), ##qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix svind <- resv[-(li+lj+1)][reind] > 0 alpha(ret)[p] <- list(resv[-(li+lj+1)][reind][svind]) ## nonzero alpha*y coef(ret)[p] <- list(alpha(ret)[[p]] * yd[reind][svind]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][svind]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][reind,,drop = FALSE][svind,,drop = FALSE]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- - sapply(coef(ret),sum) ## store obj. values in vector obj(ret) <- c(obj(ret), resv[(li+lj+1)]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## SPOC multiclass classification if(type(ret) =="spoc-svc") { if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) xd <- matrix(x[yd$ix,],nrow=dim(x)[1]) count <- 0 if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call("tron_optim", as.double(t(xd)), as.integer(nrow(xd)), as.integer(ncol(xd)), as.double(rep(yd$x-1,2)), as.double(K), as.integer(if (sparse) xd@ia else 0), as.integer(if (sparse) xd@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(7), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(C), as.double(2), #Cstep as.integer(0), #weightlabel as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- t(matrix(resv[-(nclass(ret)*nrow(xd) + 1)],nclass(ret)))[reind,,drop=FALSE] coef(ret) <- lapply(1:nclass(ret), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) names(coef(ret)) <- lev(ret) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which(alpha(ret)[,x]!=0)) xmatrix(ret) <- x obj(ret) <- resv[(nclass(ret)*nrow(xd) + 1)] names(alphaindex(ret)) <- lev(ret) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- 0 param(ret)$C <- C } ## KBB multiclass classification if(type(ret) =="kbb-svc") { if(!is.null(class.weights)) weightedC <- weightlabels * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- x[yd$ix,,drop=FALSE] count <- sapply(unique(yd$x), function(c) length(yd$x[yd$x==c])) if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call("tron_optim", as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(yd$x-1), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(8), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(C), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- matrix(resv[-(nrow(x)*(nclass(ret)-1)+1)],nrow(x))[reind,,drop=FALSE] xmatrix(ret) <- x<- x[reind,,drop=FALSE] coef(ret) <- lapply(1:(nclass(ret)-1), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which((y == x) & (rowSums(alpha(ret))!=0))) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- - sapply(coef(ret),sum) obj(ret) <- resv[(nrow(x)*(nclass(ret)-1)+1)] param(ret)$C <- C } ## Novelty detection if(type(ret) =="one-svc") { if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call("smo_optim", as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(matrix(rep(1,m))), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(2), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex,,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$nu <- nu } ## epsilon regression if(type(ret) =="eps-svr") { if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call("smo_optim", as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(3), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex, ,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$C <- C } ## nu regression if(type(ret) =="nu-svr") { if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call("smo_optim", as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(4), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex,,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$nu <- nu } ## bound constraint eps regression if(type(ret) =="eps-bsvr") { if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call("tron_optim", as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(2), as.integer(0), as.integer(ktype), as.integer(6), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(0), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[-(m + 1)] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex,,drop=FALSE] b(ret) <- -sum(alpha(ret)) obj(ret) <- resv[(m + 1)] param(ret)$epsilon <- epsilon param(ret)$C <- C } kcall(ret) <- match.call() kernelf(ret) <- kernel ymatrix(ret) <- y SVindex(ret) <- sort(unique(svindex),method="quick") nSV(ret) <- length(unique(svindex)) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, x) else NULL if(any(scaled)) scaling(ret) <- list(scaled = scaled, x.scale = x.scale, y.scale = y.scale) if (fit){ if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="one-svc") error(ret) <- sum(!fitted(ret))/m if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr"){ if (!is.null(scaling(ret)$y.scale)){ scal <- scaling(ret)$y.scale$"scaled:scale" fitted(ret) <- fitted(ret) # / scaling(ret)$y.scale$"scaled:scale" + scaling(ret)$y.scale$"scaled:center" } else scal <- 1 error(ret) <- drop(crossprod(fitted(ret) - y)/m) } } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") { if(is.null(class.weights)) cret <- ksvm(x[cind,],y[cind],type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, scaled=FALSE, cross = 0, fit = FALSE ,cache = cache) else cret <- ksvm(x[cind,],as.factor(lev(ret)[y[cind]]),type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, scaled=FALSE, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache) cres <- predict(cret, x[vgr[[i]],,drop=FALSE]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="one-svc") { cret <- ksvm(x[cind,],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol,scaled=FALSE, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]],, drop=FALSE]) cerror <- (1 - sum(cres)/length(cres))/cross + cerror } if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") { cret <- ksvm(x[cind,],y[cind],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol,scaled=FALSE, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]],,drop=FALSE]) if (!is.null(scaling(ret)$y.scale)) scal <- scaling(ret)$y.scale$"scaled:scale" else scal <- 1 cerror <- drop((scal^2)*crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } prob.model(ret) <- list(NULL) if(prob.model) { if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") { p <- 0 for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(j,i)] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(i,j)] wl <- c(0,1) nweigths <- 2 } } m <- li+lj suppressWarnings(vgr <- split(c(sample(1:li,li),sample((li+1):(li+lj),lj)),1:3)) pres <- yres <- NULL for(k in 1:3) { cind <- unsplit(vgr[-k],factor(rep((1:3)[-k],unlist(lapply(vgr[-k],length))))) if(is.null(class.weights)) cret <- ksvm(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][cind,],yd[cind],type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, scaled=FALSE, cross = 0, fit = FALSE ,cache = cache, prob.model = FALSE) else cret <- ksvm(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][cind,],as.factor(lev(ret)[y[c(indexes[[i]],indexes[[j]])][cind]]),type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, scaled=FALSE, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache, prob.model = FALSE) yres <- c(yres, yd[vgr[[k]]]) pres <- rbind(pres, predict(cret, x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][vgr[[k]],],type="decision")) } prob.model(ret)[[p]] <- .probPlatt(pres,yres) } } } if(type(ret) == "eps-svr"||type(ret) == "nu-svr"||type(ret)=="eps-bsvr"){ suppressWarnings(vgr<-split(sample(1:m,m),1:3)) pres <- NULL for(i in 1:3) { cind <- unsplit(vgr[-i],factor(rep((1:3)[-i],unlist(lapply(vgr[-i],length))))) cret <- ksvm(x[cind,],y[cind],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol,scaled=FALSE, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]],]) if (!is.null(scaling(ret)$y.scale)) cres <- cres * scaling(ret)$y.scale$"scaled:scale" + scaling(ret)$y.scale$"scaled:center" pres <- rbind(pres, cres) } pres[abs(pres) > (5*sd(pres))] <- 0 prob.model(ret) <- list(sum(abs(pres))/dim(pres)[1]) } } return(ret) }) ## kernelmatrix interface setMethod("ksvm",signature(x="kernelMatrix"), function (x, y = NULL, type = NULL, C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ...) { sparse <- FALSE ## subsetting and na-handling for matrices ret <- new("ksvm") if (is.null(type)) type(ret) <- if (is.null(y)) "one-svc" else if (is.factor(y)) "C-svc" else "eps-svr" if(!is.null(type)) type(ret) <- match.arg(type,c("C-svc", "nu-svc", "kbb-svc", "spoc-svc", "C-bsvc", "one-svc", "eps-svr", "eps-bsvr", "nu-svr")) ncols <- ncol(x) m <- nrows <- nrow(x) if (!is(y,"vector") && !is.factor (y) & !is(y,"matrix") & !(type(ret)=="one-svc")) stop("y must be a vector or a factor.") if(!(type(ret)=="one-svc")) if(is(y,"vector") | is(y,"factor")) ym <- length(y) else if(is(y,"matrix")) ym <- dim(y)[1] else stop("y must be a matrix or a vector") if ((type(ret) != "one-svc") && ym != m) stop("x and y don't match.") if(nu > 1|| nu <0) stop("nu must be between 0 an 1.") weightlabels <- NULL nweights <- 0 weight <- 0 wl <- 0 ## in case of classification: transform factors into integers if (type(ret) == "one-svc") # one class classification --> set dummy y <- 1 else if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (!is.null(class.weights)) { if (is.null(names (class.weights))) stop ("Weights have to be specified along with their according level names !") weightlabels <- match (names(class.weights),lev(ret)) if (any(is.na(weightlabels))) stop ("At least one level name is missing or misspelled.") } } else { if ((type(ret) =="C-svc" || type(ret) == "nu-svc" ||type(ret) == "C-bsvc" || type(ret) == "spoc-svc" || type(ret) == "kbb-svc") && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if (type(ret) != "eps-svr" || type(ret) != "nu-svr"|| type(ret)!="eps-bsvr") lev(ret) <- sort(unique (y)) } ## initialize nclass(ret) <- length (unique(y)) p <- 0 svindex <- problem <- NULL sigma <- 0.1 degree <- offset <- scale <- 1 ktype <- 4 prior(ret) <- list(NULL) ## C classification if(type(ret) == "C-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) xdd <- matrix(1,li+lj,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(as.vector(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE])), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), ##linear term as.integer(ktype), as.integer(0), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] ## alpha svind <- tmpres > 0 alpha(ret)[p] <- list(tmpres[svind]) ## coefficients alpha*y coef(ret)[p] <- list(alpha(ret)[[p]]*yd[reind][svind]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][svind]) ## store Support Vectors ## xmatrix(ret)[p] <- list(xd[svind, svind,drop=FALSE]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) ## store objective function values in vector obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C } } } ## nu classification if(type(ret) == "nu-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) ##xd <- matrix(0,(li+lj),(li+lj)) ##xdi <- 1:(li+lj) <= li ##xd[xdi,rep(TRUE,li+lj)] <- x[indexes[[i]],c(indexes[[i]],indexes[[j]])] ##xd[xdi == FALSE,rep(TRUE,li+lj)] <- x[indexes[[j]],c(indexes[[i]],indexes[[j]])] if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) xdd <- matrix(1,li+lj,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), #linear term as.integer(ktype), as.integer(1), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), #weightlabl. as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] alpha(ret)[p] <- coef(ret)[p] <- list(tmpres[tmpres != 0]) ##store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][tmpres != 0]) ## store Support Vectors ## xmatrix(ret)[p] <- list(xd[tmpres != 0,tmpres != 0,drop=FALSE]) ##save the indexes from all the SV in a vector (use unique!) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) ## store objective function values in vector obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" problem[p] <- list(c(i,j)) param(ret)$nu <- nu ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## Bound constraint C classification if(type(ret) == "C-bsvc"){ if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) xdd <- matrix(rnorm(li+lj),li+lj,1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ja else 0), as.integer(sparse), as.integer(2), as.double(0), ##countc as.integer(ktype), as.integer(5), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), ## cost value of alpha seeding as.double(2), ## step value of alpha seeding as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), ##qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix alpha(ret)[p] <- list(resv[-(li+lj+1)][reind][resv[-(li+lj+1)][reind] > 0]) ## nonzero alpha*y coef(ret)[p] <- list(alpha(ret)[[p]] * yd[reind][resv[-(li+lj+1)][reind] > 0]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][resv[-(li+lj+1)][reind] > 0]) ## store Support Vectors ## xmatrix(ret)[p] <- list(xd[resv > 0 ,resv > 0,drop = FALSE]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- - sapply(coef(ret),sum) ## store objective function values vector obj(ret) <- c(obj(ret), resv[(li+lj+1)]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C } } } ## SPOC multiclass classification if(type(ret) =="spoc-svc") { if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- matrix(x[yd$ix,yd$ix],nrow=dim(x)[1]) count <- 0 xdd <- matrix(1,m,1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(rep(yd$x-1,2)), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(7), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(C), as.double(2), #Cstep as.integer(0), #weightlabel as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- t(matrix(resv[-(nclass(ret)*nrow(xdd)+1)],nclass(ret)))[reind,,drop=FALSE] coef(ret) <- lapply(1:nclass(ret), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) names(coef(ret)) <- lev(ret) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which(alpha(ret)[,x]!=0)) ## xmatrix(ret) <- x names(alphaindex(ret)) <- lev(ret) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- 0 obj(ret) <- resv[(nclass(ret)*nrow(xdd)+1)] param(ret)$C <- C } ## KBB multiclass classification if(type(ret) =="kbb-svc") { if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- matrix(x[yd$ix,yd$ix],nrow=dim(x)[1]) count <- sapply(unique(yd$x), function(c) length(yd$x[yd$x==c])) xdd <- matrix(1,m,1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd$x-1), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(8), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- matrix(resv[-(nrow(x)*(nclass(ret)-1) + 1)],nrow(x))[reind,,drop=FALSE] coef(ret) <- lapply(1:(nclass(ret)-1), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which((y == x) & (rowSums(alpha(ret))!=0))) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- - sapply(coef(ret),sum) obj(ret) <- resv[(nrow(x)*(nclass(ret)-1) + 1)] param(ret)$C <- C } ## Novelty detection if(type(ret) =="one-svc") { xdd <- matrix(1,m,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(matrix(rep(1,m))), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(2), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) ## xmatrix(ret) <- x[svindex,svindex,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$nu <- nu } ## epsilon regression if(type(ret) =="eps-svr") { xdd <- matrix(1,m,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(3), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) ## xmatrix(ret) <- x[svindex,svindex ,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$C <- C } ## nu regression if(type(ret) =="nu-svr") { xdd <- matrix(1,m,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(4), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) ## xmatrix(ret) <- x[svindex,svindex,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$nu <- nu } ## bound constraint eps regression if(type(ret) =="eps-bsvr") { xdd <- matrix(1,m,1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(2), as.integer(0), as.integer(ktype), as.integer(6), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(0), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[-(m+1)] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) ## xmatrix(ret) <- x[svindex,,drop=FALSE] b(ret) <- -sum(alpha(ret)) obj(ret) <- resv[(m+1)] param(ret)$epsilon <- epsilon param(ret)$C <- C } kcall(ret) <- match.call() kernelf(ret) <- " Kernel matrix used as input." ymatrix(ret) <- y SVindex(ret) <- unique(sort(svindex,method="quick")) nSV(ret) <- length(unique(svindex)) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, as.kernelMatrix(x[,SVindex(ret),drop = FALSE])) else NULL if (fit){ if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="one-svc") error(ret) <- sum(!fitted(ret))/m if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr <- split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") { if(is.null(class.weights)) cret <- ksvm(as.kernelMatrix(x[cind,cind]),y[cind],type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) else cret <- ksvm(as.kernelMatrix(x[cind,cind]), as.factor(lev(ret)[y[cind]]),type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="one-svc") { cret <- ksvm(as.kernelMatrix(x[cind,cind]),type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- (1 - sum(cres)/length(cres))/cross + cerror } if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") { cret <- ksvm(as.kernelMatrix(x[cind,cind]),y[cind],type=type(ret), C=C,nu=nu,epsilon=epsilon,tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } prob.model(ret) <- list(NULL) if(prob.model) { if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") { p <- 0 for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(j,i)] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(i,j)] wl <- c(0,1) nweigths <- 2 } } m <- li+lj suppressWarnings(vgr <- split(c(sample(1:li,li),sample((li+1):(li+lj),lj)),1:3)) pres <- yres <- NULL for(k in 1:3) { cind <- unsplit(vgr[-k],factor(rep((1:3)[-k],unlist(lapply(vgr[-k],length))))) if(is.null(class.weights)) cret <- ksvm(as.kernelMatrix(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][cind,cind]),yd[cind],type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache, prob.model=FALSE) else cret <- ksvm(as.kernelMatrix(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][cind,cind]), as.factor(lev(ret)[y[c(indexes[[i]],indexes[[j]])][cind]]),type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache, prob.model=FALSE) yres <- c(yres,yd[vgr[[k]]]) pres <- rbind(pres,predict(cret, as.kernelMatrix(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][vgr[[k]], cind,drop = FALSE][,SVindex(cret),drop = FALSE]),type="decision")) } prob.model(ret)[[p]] <- .probPlatt(pres,yres) } } } if(type(ret) == "eps-svr"||type(ret) == "nu-svr"||type(ret)=="eps-bsvr"){ suppressWarnings(vgr<-split(sample(1:m,m),1:3)) pres <- NULL for(i in 1:3) { cind <- unsplit(vgr[-i],factor(rep((1:3)[-i],unlist(lapply(vgr[-i],length))))) cret <- ksvm(as.kernelMatrix(x[cind,cind]),y[cind],type=type(ret), C=C, nu=nu, epsilon=epsilon, tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind, drop = FALSE][,SVindex(cret), drop = FALSE])) pres <- rbind(pres,predict(cret, as.kernelMatrix(x[vgr[[i]],cind , drop = FALSE][,SVindex(cret) ,drop = FALSE]),type="decision")) } pres[abs(pres) > (5*sd(pres))] <- 0 prob.model(ret) <- list(sum(abs(pres))/dim(pres)[1]) } } return(ret) }) .classAgreement <- function (tab) { n <- sum(tab) if (!is.null(dimnames(tab))) { lev <- intersect(colnames(tab), rownames(tab)) p0 <- sum(diag(tab[lev, lev])) / n } else { m <- min(dim(tab)) p0 <- sum(diag(tab[1:m, 1:m])) / n } return(p0) } ## List Interface setMethod("ksvm",signature(x="list"), function (x, y = NULL, type = NULL, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ... ,na.action = na.omit) { ret <- new("ksvm") if (is.null(y)) x <- na.action(x) n.action(ret) <- na.action sparse <- FALSE if (is.null(type)) type(ret) <- if (is.null(y)) "one-svc" else if (is.factor(y)) "C-svc" else "eps-svr" if(!is.null(type)) type(ret) <- match.arg(type,c("C-svc", "nu-svc", "kbb-svc", "spoc-svc", "C-bsvc", "one-svc", "eps-svr", "eps-bsvr", "nu-svr")) m <- length(x) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","stringdot")) if(is.character(kpar)) if(kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot" || kernel == "rbfdot" || kernel == "laplacedot" ) { stop("List interface supports only the stringdot kernel.") } } if(is(kernel,"kernel") & !is(kernel,"stringkernel")) stop("List interface supports only the stringdot kernel.") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if (!is(y,"vector") && !is.factor(y) & !is(y,"matrix") & !(type(ret)=="one-svc")) stop("y must be a vector or a factor.") if(!(type(ret)=="one-svc")) if(is(y,"vector") | is(y,"factor")) ym <- length(y) else if(is(y,"matrix")) ym <- dim(y)[1] else stop("y must be a matrix or a vector") if ((type(ret) != "one-svc") && ym != m) stop("x and y don't match.") if(nu > 1|| nu <0) stop("nu must be between 0 an 1.") weightlabels <- NULL nweights <- 0 weight <- 0 wl <- 0 ## in case of classification: transform factors into integers if (type(ret) == "one-svc") # one class classification --> set dummy y <- 1 else if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (!is.null(class.weights)) { if (is.null(names (class.weights))) stop ("Weights have to be specified along with their according level names !") weightlabels <- match (names(class.weights),lev(ret)) if (any(is.na(weightlabels))) stop ("At least one level name is missing or misspelled.") } } else { if ((type(ret) =="C-svc" || type(ret) == "nu-svc" ||type(ret) == "C-bsvc" || type(ret) == "spoc-svc" || type(ret) == "kbb-svc") && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if (type(ret) != "eps-svr" || type(ret) != "nu-svr"|| type(ret)!="eps-bsvr") lev(ret) <- sort(unique (y)) } ## initialize if (type(ret) =="C-svc" || type(ret) == "nu-svc" ||type(ret) == "C-bsvc" || type(ret) == "spoc-svc" || type(ret) == "kbb-svc") nclass(ret) <- length (unique(y)) p <- 0 K <- 0 svindex <- problem <- NULL ktype <- 4 prior(ret) <- list(NULL) sigma <- 0.1 degree <- offset <- scale <- 1 ## C classification if(type(ret) == "C-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]])]) xdd <- matrix(1,li+lj,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), ##linear term as.integer(ktype), as.integer(0), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] ## alpha alpha(ret)[p] <- list(tmpres[tmpres > 0]) ## coefficients alpha*y coef(ret)[p] <- list(alpha(ret)[[p]]*yd[reind][tmpres > 0]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][tmpres>0]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]])][reind][tmpres > 0]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) obj(ret) <- c(obj(ret),resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## nu classification if(type(ret) == "nu-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]])]) xdd <- matrix(1,li+lj,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), #linear term as.integer(ktype), as.integer(1), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), #weightlabl. as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] alpha(ret)[p] <- coef(ret)[p] <- list(tmpres[tmpres != 0]) ##store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][tmpres!=0]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]])][reind][tmpres != 0]) ##save the indexes from all the SV in a vector (use unique!) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" problem[p] <- list(c(i,j)) param(ret)$nu <- nu ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## Bound constraint C classification if(type(ret) == "C-bsvc"){ if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]])]) xdd <- matrix(1,li+lj,1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(2), as.double(0), ##countc as.integer(ktype), as.integer(5), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), ## cost value of alpha seeding as.double(2), ## step value of alpha seeding as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), ##qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix alpha(ret)[p] <- list(resv[-(li+lj+1)][reind][resv[-(li+lj+1)][reind] > 0]) ## nonzero alpha*y coef(ret)[p] <- list(alpha(ret)[[p]] * yd[reind][resv[-(li+lj+1)][reind] > 0]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][resv[-(li+lj+1)][reind] > 0]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]])][reind][resv[-(li+lj+1)][reind] > 0]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- - sapply(coef(ret),sum) obj(ret) <- c(obj(ret),resv[(li+lj+1)]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## SPOC multiclass classification if(type(ret) =="spoc-svc") { if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- x[yd$ix] count <- 0 K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(rep(yd$x-1,2)), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(7), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(C), as.double(2), #Cstep as.integer(0), #weightlabel as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- t(matrix(resv[-(nclass(ret)*nrow(xdd) + 1)],nclass(ret)))[reind,,drop=FALSE] coef(ret) <- lapply(1:nclass(ret), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) names(coef(ret)) <- lev(ret) alphaindex(ret) <- lapply(1:nclass(ret), function(x) which(alpha(ret)[,x]!=0)) names(alphaindex(ret)) <- lev(ret) xmatrix(ret) <- x svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- 0 obj(ret) <- resv[(nclass(ret)*nrow(xdd) + 1)] param(ret)$C <- C } ## KBB multiclass classification if(type(ret) =="kbb-svc") { if(!is.null(class.weights)) weightedC <- weightlabels * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- x[yd$ix] count <- sapply(unique(yd$x), function(c) length(yd$x[yd$x==c])) K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd$x-1), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(8), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- matrix(resv[-((nclass(ret)-1)*length(x)+1)],length(x))[reind,,drop=FALSE] xmatrix(ret) <- x<- x[reind] coef(ret) <- lapply(1:(nclass(ret)-1), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which((y == x) & (rowSums(alpha(ret))!=0))) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- - sapply(coef(ret),sum) obj(ret) <- resv[((nclass(ret)-1)*length(x)+1)] param(ret)$C <- C } ## Novelty detection if(type(ret) =="one-svc") { K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(matrix(rep(1,m))), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(2), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres !=0) xmatrix(ret) <- x[svindex] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$nu <- nu } ## epsilon regression if(type(ret) =="eps-svr") { K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(3), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$C <- C } ## nu regression if(type(ret) =="nu-svr") { K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(4), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$nu <- nu } ## bound constraint eps regression if(type(ret) =="eps-bsvr") { K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(2), as.integer(0), as.integer(ktype), as.integer(6), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(0), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[-(m+1)] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex] b(ret) <- -sum(alpha(ret)) obj(ret) <- resv[(m+1)] param(ret)$epsilon <- epsilon param(ret)$C <- C } kcall(ret) <- match.call() kernelf(ret) <- kernel ymatrix(ret) <- y SVindex(ret) <- unique(svindex) nSV(ret) <- length(unique(svindex)) if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") nclass(ret) <- m if(type(ret)=="one-svc") nclass(ret) <- 1 if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) { if((type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") & nclass(ret) > 2) predict(ret, x) else if((type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc"||type(ret)=="spoc-bsvc"||type(ret)=="kbb-bsvc")) predict(ret,as.kernelMatrix(K[reind,reind][,SVindex(ret), drop=FALSE])) else predict(ret,as.kernelMatrix(K[,SVindex(ret), drop=FALSE])) } else NULL if (fit){ if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="one-svc") error(ret) <- sum(!fitted(ret))/m if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(!((type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") & nclass(ret) > 2)) { if((type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc"||type(ret)=="spoc-bsvc"||type(ret)=="kbb-bsvc")) K <- as.kernelMatrix(K[reind,reind]) if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr <- split(sample(1:dim(K)[1],dim(K)[1]),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") { if(is.null(class.weights)) cret <- ksvm(as.kernelMatrix(K[cind,cind]),y[cind],type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) else cret <- ksvm(as.kernelMatrix(K[cind,cind]),as.factor(lev(ret)[y[cind]]),type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache) cres <- predict(cret, as.kernelMatrix(K[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="one-svc") { cret <- ksvm(as.kernelMatrix(K[cind,cind]), type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) cres <- predict(cret, as.kernelMatrix(K[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- (1 - sum(cres)/length(cres))/cross + cerror } if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") { cret <- ksvm(as.kernelMatrix(K[cind,cind]),y[cind],type=type(ret), C=C,nu=nu,epsilon=epsilon,tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, as.kernelMatrix(K[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } prob.model(ret) <- list(NULL) if(prob.model) { if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") { p <- 0 for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(j,i)] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(i,j)] wl <- c(0,1) nweigths <- 2 } } m <- li+lj suppressWarnings(vgr <- split(c(sample(1:li,li),sample((li+1):(li+lj),lj)),1:3)) pres <- yres <- NULL for(k in 1:3) { cind <- unsplit(vgr[-k],factor(rep((1:3)[-k],unlist(lapply(vgr[-k],length))))) cret <- ksvm(as.kernelMatrix(as.kernelMatrix(K[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][cind,cind])), yd[cind], type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model=FALSE) yres <- c(yres,yd[vgr[[k]]]) pres <- rbind(pres,predict(cret, as.kernelMatrix(K[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][vgr[[k]], cind,drop = FALSE][,SVindex(cret),drop = FALSE]),type="decision")) } prob.model(ret)[[p]] <- .probPlatt(pres,yres) } } } if(type(ret) == "eps-svr"||type(ret) == "nu-svr"||type(ret)=="eps-bsvr"){ suppressWarnings(vgr<-split(sample(1:m,m),1:3)) pres <- NULL for(i in 1:3) { cind <- unsplit(vgr[-i],factor(rep((1:3)[-i],unlist(lapply(vgr[-i],length))))) cret <- ksvm(as.kernelMatrix(K[cind,cind]),y[cind],type=type(ret), C=C, nu=nu, epsilon=epsilon, tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, as.kernelMatrix(K[vgr[[i]], cind, drop = FALSE][,SVindex(cret), drop = FALSE])) pres <- rbind(pres,predict(cret, as.kernelMatrix(K[vgr[[i]],cind , drop = FALSE][,SVindex(cret) ,drop = FALSE]),type="decision")) } pres[abs(pres) > (5*sd(pres))] <- 0 prob.model(ret) <- list(sum(abs(pres))/dim(pres)[1]) } } } else{ if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") { if(is.null(class.weights)) cret <- ksvm(x[cind],y[cind],type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) else cret <- ksvm(x[cind],as.factor(lev(ret)[y[cind]]),type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache) cres <- predict(cret, x[vgr[[i]]]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") { cret <- ksvm(x[cind],y[cind],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]]]) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m)/cross + cerror } } cross(ret) <- cerror } prob.model(ret) <- list(NULL) if(prob.model) { if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") { p <- 0 for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(j,i)] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(i,j)] wl <- c(0,1) nweigths <- 2 } } m <- li+lj suppressWarnings(vgr <- split(c(sample(1:li,li),sample((li+1):(li+lj),lj)),1:3)) pres <- yres <- NULL for(k in 1:3) { cind <- unsplit(vgr[-k],factor(rep((1:3)[-k],unlist(lapply(vgr[-k],length))))) if(is.null(class.weights)) cret <- ksvm(x[c(indexes[[i]], indexes[[j]])][cind],yd[cind],type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache, prob.model=FALSE) else cret <- ksvm(x[c(indexes[[i]], indexes[[j]])][cind],as.factor(lev(ret)[y[cind]]),type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache, prob.model=FALSE) yres <- c(yres,yd[vgr[[k]]]) pres <- rbind(pres,predict(cret, x[c(indexes[[i]], indexes[[j]])][vgr[[k]]],type="decision")) } prob.model(ret)[[p]] <- .probPlatt(pres,yres) } } } if(type(ret) == "eps-svr"||type(ret) == "nu-svr"||type(ret)=="eps-bsvr"){ suppressWarnings(vgr<-split(sample(1:m,m),1:3)) for(i in 1:3) { cind <- unsplit(vgr[-i],factor(rep((1:3)[-i],unlist(lapply(vgr[-i],length))))) cret <- ksvm(x[cind],y[cind],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]]]) pres <- rbind(pres,predict(cret, x[vgr[[i]]],type="decision")) } pres[abs(pres) > (5*sd(pres))] <- 0 prob.model(ret) <- list(sum(abs(pres))/dim(pres)[1]) } } } return(ret) }) ##**************************************************************# ## predict for matrix, data.frame input setMethod("predict", signature(object = "ksvm"), function (object, newdata, type = "response", coupler = "minpair") { type <- match.arg(type,c("response","probabilities","votes","decision")) if (missing(newdata) && type=="response" & !is.null(fitted(object))) return(fitted(object)) else if(missing(newdata)) stop("Missing data !") if(!is(newdata,"list")){ if (!is.null(terms(object)) & !is(newdata,"kernelMatrix")) { if(!is.matrix(newdata)) newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = n.action(object)) } else newdata <- if (is.vector(newdata)) t(t(newdata)) else as.matrix(newdata) newnrows <- nrow(newdata) newncols <- ncol(newdata) if(!is(newdata,"kernelMatrix") && !is.null(xmatrix(object))){ if(is(xmatrix(object),"list") && is(xmatrix(object)[[1]],"matrix")) oldco <- ncol(xmatrix(object)[[1]]) if(is(xmatrix(object),"matrix")) oldco <- ncol(xmatrix(object)) if (oldco != newncols) stop ("test vector does not match model !") } } else newnrows <- length(newdata) p <- 0 if (is.list(scaling(object))) newdata[,scaling(object)$scaled] <- scale(newdata[,scaling(object)$scaled, drop = FALSE], center = scaling(object)$x.scale$"scaled:center", scale = scaling(object)$x.scale$"scaled:scale") if(type == "response" || type =="decision" || type=="votes") { if(type(object)=="C-svc"||type(object)=="nu-svc"||type(object)=="C-bsvc") { predres <- 1:newnrows if(type=="decision") votematrix <- matrix(0,nclass(object)*(nclass(object)-1)/2,newnrows) else votematrix <- matrix(0,nclass(object),newnrows) for(i in 1:(nclass(object)-1)) { jj <- i+1 for(j in jj:nclass(object)) { p <- p+1 if(is(newdata,"kernelMatrix")) ret <- newdata[,which(SVindex(object)%in%alphaindex(object)[[p]]), drop=FALSE] %*% coef(object)[[p]] - b(object)[p] else ret <- kernelMult(kernelf(object),newdata,xmatrix(object)[[p]],coef(object)[[p]]) - b(object)[p] if(type=="decision") votematrix[p,] <- ret else{ votematrix[i,ret<0] <- votematrix[i,ret<0] + 1 votematrix[j,ret>0] <- votematrix[j,ret>0] + 1 } } } if(type == "decision") predres <- t(votematrix) else predres <- sapply(predres, function(x) which.max(votematrix[,x])) } if(type(object) == "spoc-svc") { predres <- 1:newnrows votematrix <- matrix(0,nclass(object),newnrows) for(i in 1:nclass(object)){ if(is(newdata,"kernelMatrix")) votematrix[i,] <- newdata[,which(SVindex(object)%in%alphaindex(object)[[i]]), drop=FALSE] %*% coef(object)[[i]] else if (is(newdata,"list")) votematrix[i,] <- kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]]],coef(object)[[i]]) else votematrix[i,] <- kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]],,drop=FALSE],coef(object)[[i]]) } predres <- sapply(predres, function(x) which.max(votematrix[,x])) } if(type(object) == "kbb-svc") { predres <- 1:newnrows votematrix <- matrix(0,nclass(object),newnrows) A <- rowSums(alpha(object)) for(i in 1:nclass(object)) { for(k in (1:i)[-i]) if(is(newdata,"kernelMatrix")) votematrix[k,] <- votematrix[k,] - (newdata[,which(SVindex(object)%in%alphaindex(object)[[i]]), drop=FALSE] %*% alpha(object)[,k][alphaindex(object)[[i]]] + sum(alpha(object)[,k][alphaindex(object)[[i]]])) else if (is(newdata,"list")) votematrix[k,] <- votematrix[k,] - (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]]],alpha(object)[,k][alphaindex(object)[[i]]]) + sum(alpha(object)[,k][alphaindex(object)[[i]]])) else votematrix[k,] <- votematrix[k,] - (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]],,drop=FALSE],alpha(object)[,k][alphaindex(object)[[i]]]) + sum(alpha(object)[,k][alphaindex(object)[[i]]])) if(is(newdata,"kernelMatrix")) votematrix[i,] <- votematrix[i,] + (newdata[,which(SVindex(object)%in%alphaindex(object)[[i]]), drop=FALSE] %*% A[alphaindex(object)[[i]]] + sum(A[alphaindex(object)[[i]]])) else if (is(newdata,"list")) votematrix[i,] <- votematrix[i,] + (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]]],A[alphaindex(object)[[i]]]) + sum(A[alphaindex(object)[[i]]])) else votematrix[i,] <- votematrix[i,] + (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]],,drop=FALSE],A[alphaindex(object)[[i]]]) + sum(A[alphaindex(object)[[i]]])) if(i <= (nclass(object)-1)) for(kk in i:(nclass(object)-1)) if(is(newdata,"kernelMatrix")) votematrix[kk+1,] <- votematrix[kk+1,] - (newdata[,which(SVindex(object)%in%alphaindex(object)[[i]]), drop=FALSE] %*% alpha(object)[,kk][alphaindex(object)[[i]]] + sum(alpha(object)[,kk][alphaindex(object)[[i]]])) else if (is(newdata,"list")) votematrix[kk+1,] <- votematrix[kk+1,] - (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]]],alpha(object)[,kk][alphaindex(object)[[i]]]) + sum(alpha(object)[,kk][alphaindex(object)[[i]]])) else votematrix[kk+1,] <- votematrix[kk+1,] - (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]],,drop=FALSE],alpha(object)[,kk][alphaindex(object)[[i]]]) + sum(alpha(object)[,kk][alphaindex(object)[[i]]])) } predres <- sapply(predres, function(x) which.max(votematrix[,x])) } } if(type == "probabilities") { if(is.null(prob.model(object)[[1]])) stop("ksvm object contains no probability model. Make sure you set the paramater prob.model in ksvm during training.") if(type(object)=="C-svc"||type(object)=="nu-svc"||type(object)=="C-bsvc") { binprob <- matrix(0, newnrows, nclass(object)*(nclass(object) - 1)/2) for(i in 1:(nclass(object)-1)) { jj <- i+1 for(j in jj:nclass(object)) { p <- p+1 if(is(newdata,"kernelMatrix")) binprob[,p] <- 1 - .SigmoidPredict(as.vector(newdata[,which(SVindex(object)%in%alphaindex(object)[[p]]), drop=FALSE] %*% coef(object)[[p]] - b(object)[p]), prob.model(object)[[p]]$A, prob.model(object)[[p]]$B) else binprob[,p] <- 1 - .SigmoidPredict(as.vector(kernelMult(kernelf(object),newdata,xmatrix(object)[[p]],coef(object)[[p]]) - b(object)[p]), prob.model(object)[[p]]$A, prob.model(object)[[p]]$B) } } multiprob <- couple(binprob, coupler = coupler) } else stop("probability estimates only supported for C-svc, C-bsvc and nu-svc") } if(type(object) == "one-svc") { if(is(newdata,"kernelMatrix")) ret <- newdata %*% coef(object) - b(object) else ret <- kernelMult(kernelf(object),newdata,xmatrix(object),coef(object)) - b(object) ##one-class-classification: return TRUE/FALSE (probabilities ?) if(type=="decision") return(ret) else { ret[ret>0]<-1 return(ret == 1) } } else { if(type(object)=="eps-svr"||type(object)=="nu-svr"||type(object)=="eps-bsvr") { if(is(newdata,"kernelMatrix")) predres <- newdata %*% coef(object) - b(object) else predres <- kernelMult(kernelf(object),newdata,xmatrix(object),coef(object)) - b(object) } else { ##classification & votes : return votematrix if(type == "votes") return(votematrix) ##classification & probabilities : return probability matrix if(type == "probabilities") { colnames(multiprob) <- lev(object) return(multiprob) } if(is.numeric(lev(object)) && type == "response") return(lev(object)[predres]) if (is.character(lev(object)) && type!="decision") { ##classification & type response: return factors if(type == "response") return(factor (lev(object)[predres], levels = lev(object))) } } } if (!is.null(scaling(object)$y.scale) & !is(newdata,"kernelMatrix") & !is(newdata,"list")) ## return raw values, possibly scaled back return(predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center") else ##else: return raw values return(predres) }) #****************************************************************************************# setMethod("show","ksvm", function(object){ cat("Support Vector Machine object of class \"ksvm\"","\n") cat("\n") cat(paste("SV type:", type(object))) switch(type(object), "C-svc" = cat(paste(" (classification)", "\n")), "nu-svc" = cat(paste(" (classification)", "\n")), "C-bsvc" = cat(paste(" (classification)", "\n")), "one-svc" = cat(paste(" (novelty detection)", "\n")), "spoc-svc" = cat(paste(" (classification)", "\n")), "kbb-svc" = cat(paste(" (classification)", "\n")), "eps-svr" = cat(paste(" (regression)","\n")), "nu-svr" = cat(paste(" (regression)","\n")) ) switch(type(object), "C-svc" = cat(paste(" parameter : cost C =",param(object)$C, "\n")), "nu-svc" = cat(paste(" parameter : nu =", param(object)$nu, "\n")), "C-bsvc" = cat(paste(" parameter : cost C =",param(object)$C, "\n")), "one-svc" = cat(paste(" parameter : nu =", param(object)$nu, "\n")), "spoc-svc" = cat(paste(" parameter : cost C =",param(object)$C, "\n")), "kbb-svc" = cat(paste(" parameter : cost C =",param(object)$C, "\n")), "eps-svr" = cat(paste(" parameter : epsilon =",param(object)$epsilon, " cost C =", param(object)$C,"\n")), "nu-svr" = cat(paste(" parameter : epsilon =", param(object)$epsilon, " nu =", param(object)$nu,"\n")) ) cat("\n") show(kernelf(object)) cat(paste("\nNumber of Support Vectors :", nSV(object),"\n")) cat("\nObjective Function Value :", round(obj(object),4),"\n") ## if(type(object)=="C-svc" || type(object) == "nu-svc") ## cat(paste("Margin width :",margin(object),"\n")) if(!is.null(fitted(object))) cat(paste("Training error :", round(error(object),6),"\n")) if(cross(object)!= -1) cat("Cross validation error :",round(cross(object),6),"\n") if(!is.null(prob.model(object)[[1]])&&(type(object)=="eps-svr" ||type(object)=="nu-svr"||type(object)=="eps-bsvr")) cat("Laplace distr. width :",round(prob.model(object)[[1]],6),"\n") if(!is.null(prob.model(object)[[1]]) & (type(object) == "C-svc"| type(object) == "nu-svc"| type(object) == "C-bsvc")) cat("Probability model included.","\n") ##train error & loss }) setMethod("plot", signature(x = "ksvm", y = "missing"), function(x, data = NULL, grid = 50, slice = list(), ...) { if (type(x) =="C-svc" || type(x) == "nu-svc") { if(nclass(x) > 2) stop("plot function only supports binary classification") if (!is.null(terms(x))&&!is.null(data)) { if(!is.matrix(data)) sub <- model.matrix(delete.response(terms(x)), as.data.frame(data), na.action = n.action(x)) } else if(!is.null(data)) sub <- as.matrix(data) else sub <- xmatrix(x)[[1]] ## sub <- sub[,!colnames(xmatrix(x)[[1]])%in%names(slice)] xr <- seq(min(sub[,2]), max(sub[,2]), length = grid) yr <- seq(min(sub[,1]), max(sub[,1]), length = grid) sc <- 0 # if(is.null(data)) # { # sc <- 1 # data <- xmatrix(x)[[1]] # } if(is.data.frame(data) || !is.null(terms(x))){ lis <- c(list(yr), list(xr), slice) names(lis)[1:2] <- setdiff(colnames(sub),names(slice)) new <- expand.grid(lis)[,labels(terms(x))] } else new <- expand.grid(xr,yr) if(sc== 1) scaling(x) <- NULL preds <- predict(x, new ,type = "decision") if(is.null(terms(x))) xylb <- colnames(sub) else xylb <- names(lis) lvl <- 37 mymax <- max(abs(preds)) mylevels <- pretty(c(0, mymax), 15) nl <- length(mylevels)-2 mycols <- c(hcl(0, 100 * (nl:0/nl)^1.3, 90 - 40 *(nl:0/nl)^1.3), rev(hcl(260, 100 * (nl:0/nl)^1.3, 90 - 40 *(nl:0/nl)^1.3))) mylevels <- c(-rev(mylevels[-1]), mylevels) index <- max(which(mylevels < min(preds))):min(which(mylevels > max(preds))) mycols <- mycols[index] mylevels <- mylevels[index] #FIXME# previously the plot code assumed that the y values are either #FIXME# -1 or 1, but this is not generally true. If generated from a #FIXME# factor, they are typically 1 and 2. Maybe ymatrix should be #FIXME# changed? ymat <- ymatrix(x) ymean <- mean(unique(ymat)) filled.contour(xr, yr, matrix(as.numeric(preds), nrow = length(xr), byrow = TRUE), col = mycols, levels = mylevels, plot.axes = { axis(1) axis(2) if(!is.null(data)){ points(sub[-SVindex(x),2], sub[-SVindex(x),1], pch = ifelse(ymat[-SVindex(x)] < ymean, 2, 1)) points(sub[SVindex(x),2], sub[SVindex(x),1], pch = ifelse(ymat[SVindex(x)] < ymean, 17, 16))} else{ ## points(sub[-SVindex(x),], pch = ifelse(ymat[-SVindex(x)] < ymean, 2, 1)) points(sub, pch = ifelse(ymat[SVindex(x)] < ymean, 17, 16)) }}, nlevels = lvl, plot.title = title(main = "SVM classification plot", xlab = xylb[2], ylab = xylb[1]), ... ) } else { stop("Only plots of classification ksvm objects supported") } }) setGeneric(".probPlatt", function(deci, yres) standardGeneric(".probPlatt")) setMethod(".probPlatt",signature(deci="ANY"), function(deci,yres) { if (is.matrix(deci)) deci <- as.vector(deci) if (!is.vector(deci)) stop("input should be matrix or vector") yres <- as.vector(yres) ## Create label and count priors boolabel <- yres >= 0 prior1 <- sum(boolabel) m <- length(yres) prior0 <- m - prior1 ## set parameters (should be on the interface I guess) maxiter <- 100 minstep <- 1e-10 sigma <- 1e-3 eps <- 1e-5 ## Construct target support hiTarget <- (prior1 + 1)/(prior1 + 2) loTarget <- 1/(prior0 + 2) length <- prior1 + prior0 t <- rep(loTarget, m) t[boolabel] <- hiTarget ##Initial Point & Initial Fun Value A <- 0 B <- log((prior0 + 1)/(prior1 + 1)) fval <- 0 fApB <- deci*A + B bindex <- fApB >= 0 p <- q <- rep(0,m) fval <- sum(t[bindex]*fApB[bindex] + log(1 + exp(-fApB[bindex]))) fval <- fval + sum((t[!bindex] - 1)*fApB[!bindex] + log(1+exp(fApB[!bindex]))) for (it in 1:maxiter) { h11 <- h22 <- sigma h21 <- g1 <- g2 <- 0 fApB <- deci*A + B bindex <- fApB >= 0 p[bindex] <- exp(-fApB[bindex])/(1 + exp(-fApB[bindex])) q[bindex] <- 1/(1+exp(-fApB[bindex])) bindex <- fApB < 0 p[bindex] <- 1/(1 + exp(fApB[bindex])) q[bindex] <- exp(fApB[bindex])/(1 + exp(fApB[bindex])) d2 <- p*q h11 <- h11 + sum(d2*deci^2) h22 <- h22 + sum(d2) h21 <- h21 + sum(deci*d2) d1 <- t - p g1 <- g1 + sum(deci*d1) g2 <- g2 + sum(d1) ## Stopping Criteria if (abs(g1) < eps && abs(g2) < eps) break ## Finding Newton Direction -inv(t(H))%*%g det <- h11*h22 - h21^2 dA <- -(h22*g1 - h21*g2) / det dB <- -(-h21*g1 + h11*g2) / det gd <- g1*dA + g2*dB ## Line Search stepsize <- 1 while(stepsize >= minstep) { newA <- A + stepsize * dA newB <- B + stepsize * dB ## New function value newf <- 0 fApB <- deci * newA + newB bindex <- fApB >= 0 newf <- sum(t[bindex] * fApB[bindex] + log(1 + exp(-fApB[bindex]))) newf <- newf + sum((t[!bindex] - 1)*fApB[!bindex] + log(1 + exp(fApB[!bindex]))) ## Check decrease if (newf < (fval + 0.0001 * stepsize * gd)) { A <- newA B <- newB fval <- newf break } else stepsize <- stepsize/2 } if (stepsize < minstep) { cat("line search fails", A, B, g1, g2, dA, dB, gd) ret <- .SigmoidPredict(deci, A, B) return(ret) } } if(it >= maxiter -1) cat("maximum number of iterations reached",g1,g2) ret <- list(A=A, B=B) return(ret) }) ## Sigmoid predict function .SigmoidPredict <- function(deci, A, B) { fApB <- deci*A +B k <- length(deci) ret <- rep(0,k) bindex <- fApB >= 0 ret[bindex] <- exp(-fApB[bindex])/(1 + exp(-fApB[bindex])) ret[!bindex] <- 1/(1 + exp(fApB[!bindex])) return(ret) } kernlab/R/ipop.R0000644000176000001440000002544511304023134013214 0ustar ripleyusers##ipop solves the quadratic programming problem ##minimize c' * primal + 1/2 primal' * H * primal ##subject to b <= A*primal <= b + r ## l <= x <= u ## d is the optimizer itself ##returns primal and dual variables (i.e. x and the Lagrange ##multipliers for b <= A * primal <= b + r) ##for additional documentation see ## R. Vanderbei ## LOQO: an Interior Point Code for Quadratic Programming, 1992 ## Author: R version Alexandros Karatzoglou, orig. matlab Alex J. Smola ## Created: 12/12/97 ## R Version: 12/08/03 ## Updated: 13/10/05 ## This code is released under the GNU Public License setGeneric("ipop",function(c, H, A, b, l, u, r, sigf=7, maxiter=40, margin=0.05, bound=10, verb=0) standardGeneric("ipop")) setMethod("ipop",signature(H="matrix"), function(c, H, A, b, l, u, r, sigf=7, maxiter=40, margin=0.05, bound=10, verb=0) { if(!is.matrix(H)) stop("H must be a matrix") if(!is.matrix(A)&&!is.vector(A)) stop("A must be a matrix or a vector") if(!is.matrix(c)&&!is.vector(c)) stop("c must be a matrix or a vector") if(!is.matrix(l)&&!is.vector(l)) stop("l must be a matrix or a vector") if(!is.matrix(u)&&!is.vector(u)) stop("u must be a matrix or a vector") n <- dim(H)[1] ## check for a decomposed H matrix if(n == dim(H)[2]) smw <- 0 if(n > dim(H)[2]) smw <- 1 if(n < dim(H)[2]) { smw <- 1 n <- dim(H)[2] H <- t(H) } if (is.vector(A)) A <- matrix(A,1) m <- dim(A)[1] primal <- rep(0,n) if (missing(b)) bvec <- rep(0, m) ## if(n !=nrow(H)) ## stop("H matrix is not symmetric") if (n != length(c)) stop("H and c are incompatible!") if (n != ncol(A)) stop("A and c are incompatible!") if (m != length(b)) stop("A and b are incompatible!") if(n !=length(u)) stop("u is incopatible with H") if(n !=length(l)) stop("l is incopatible with H") c <- matrix(c) l <- matrix(l) u <- matrix(u) m <- nrow(A) n <- ncol(A) H.diag <- diag(H) if(smw == 0) H.x <- H else if (smw == 1) H.x <- t(H) b.plus.1 <- max(svd(b)$d) + 1 c.plus.1 <- max(svd(c)$d) + 1 one.x <- -matrix(1,n,1) one.y <- -matrix(1,m,1) ## starting point if(smw == 0) diag(H.x) <- H.diag + 1 else smwn <- dim(H)[2] H.y <- diag(1,m) c.x <- c c.y <- b ## solve the system [-H.x A' A H.y] [x, y] = [c.x c.y] if(smw == 0) { AP <- matrix(0,m+n,m+n) xp <- 1:(m+n) <= n AP[xp,xp] <- -H.x AP[xp == FALSE,xp] <- A AP[xp,xp == FALSE] <- t(A) AP[xp == FALSE, xp== FALSE] <- H.y s.tmp <- solve(AP,c(c.x,c.y)) x <- s.tmp[1:n] y <- s.tmp[-(1:n)] } else { V <- diag(smwn) smwinner <- chol(V + crossprod(H)) smwa1 <- t(A) smwc1 <- c.x smwa2 <- smwa1 - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwa1)))) smwc2 <- smwc1 - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwc1)))) y <- solve(A %*% smwa2 + H.y , c.y + A %*% smwc2) x <- smwa2 %*% y - smwc2 } g <- pmax(abs(x - l), bound) z <- pmax(abs(x), bound) t <- pmax(abs(u - x), bound) s <- pmax(abs(x), bound) v <- pmax(abs(y), bound) w <- pmax(abs(y), bound) p <- pmax(abs(r - w), bound) q <- pmax(abs(y), bound) mu <- as.vector(crossprod(z,g) + crossprod(v,w) + crossprod(s,t) + crossprod(p,q))/(2 * (m + n)) sigfig <- 0 counter <- 0 alfa <- 1 if (verb > 0) # print at least one status report cat("Iter PrimalInf DualInf SigFigs Rescale PrimalObj DualObj","\n") while (counter < maxiter) { ## update the iteration counter counter <- counter + 1 ## central path (predictor) if(smw == 0) H.dot.x <- H %*% x else if (smw == 1) H.dot.x <- H %*% crossprod(H,x) rho <- b - A %*% x + w nu <- l - x + g tau <- u - x - t alpha <- r - w - p sigma <- c - crossprod(A, y) - z + s + H.dot.x beta <- y + q - v gamma.z <- - z gamma.w <- - w gamma.s <- - s gamma.q <- - q ## instrumentation x.dot.H.dot.x <- crossprod(x, H.dot.x) primal.infeasibility <- max(svd(rbind(rho, tau, matrix(alpha), nu))$d)/ b.plus.1 dual.infeasibility <- max(svd(rbind(sigma,t(t(beta))))$d) / c.plus.1 primal.obj <- crossprod(c,x) + 0.5 * x.dot.H.dot.x dual.obj <- crossprod(b,y) - 0.5 * x.dot.H.dot.x + crossprod(l, z) - crossprod(u,s) - crossprod(r,q) old.sigfig <- sigfig sigfig <- max(-log10(abs(primal.obj - dual.obj)/(abs(primal.obj) + 1)), 0) if (sigfig >= sigf) break if (verb > 0) # final report cat( counter, "\t", signif(primal.infeasibility,6), signif(dual.infeasibility,6), sigfig, alfa, primal.obj, dual.obj,"\n") ## some more intermediate variables (the hat section) hat.beta <- beta - v * gamma.w / w hat.alpha <- alpha - p * gamma.q / q hat.nu <- nu + g * gamma.z / z hat.tau <- tau - t * gamma.s / s ## the diagonal terms d <- z / g + s / t e <- 1 / (v / w + q / p) ## initialization before the big cholesky if (smw == 0) diag(H.x) <- H.diag + d diag(H.y) <- e c.x <- sigma - z * hat.nu / g - s * hat.tau / t c.y <- rho - e * (hat.beta - q * hat.alpha / p) ## and solve the system [-H.x A' A H.y] [delta.x, delta.y] <- [c.x c.y] if(smw == 0){ AP[xp,xp] <- -H.x AP[xp == FALSE, xp== FALSE] <- H.y s1.tmp <- solve(AP,c(c.x,c.y)) delta.x<-s1.tmp[1:n] ; delta.y <- s1.tmp[-(1:n)] } else { V <- diag(smwn) smwinner <- chol(V + chunkmult(t(H),2000,d)) smwa1 <- t(A) smwa1 <- smwa1 / d smwc1 <- c.x / d smwa2 <- t(A) - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwa1)))) smwa2 <- smwa2 / d smwc2 <- (c.x - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwc1)))))/d delta.y <- solve(A %*% smwa2 + H.y , c.y + A %*% smwc2) delta.x <- smwa2 %*% delta.y - smwc2 } ## backsubstitution delta.w <- - e * (hat.beta - q * hat.alpha / p + delta.y) delta.s <- s * (delta.x - hat.tau) / t delta.z <- z * (hat.nu - delta.x) / g delta.q <- q * (delta.w - hat.alpha) / p delta.v <- v * (gamma.w - delta.w) / w delta.p <- p * (gamma.q - delta.q) / q delta.g <- g * (gamma.z - delta.z) / z delta.t <- t * (gamma.s - delta.s) / s ## compute update step now (sebastian's trick) alfa <- - (1 - margin) / min(c(delta.g / g, delta.w / w, delta.t / t, delta.p / p, delta.z / z, delta.v / v, delta.s / s, delta.q / q, -1)) newmu <- (crossprod(z,g) + crossprod(v,w) + crossprod(s,t) + crossprod(p,q))/(2 * (m + n)) newmu <- mu * ((alfa - 1) / (alfa + 10))^2 gamma.z <- mu / g - z - delta.z * delta.g / g gamma.w <- mu / v - w - delta.w * delta.v / v gamma.s <- mu / t - s - delta.s * delta.t / t gamma.q <- mu / p - q - delta.q * delta.p / p ## some more intermediate variables (the hat section) hat.beta <- beta - v * gamma.w / w hat.alpha <- alpha - p * gamma.q / q hat.nu <- nu + g * gamma.z / z hat.tau <- tau - t * gamma.s / s ## initialization before the big cholesky ##for ( i in 1 : n H.x(i,i) <- H.diag(i) + d(i) ) { ##H.y <- diag(e) c.x <- sigma - z * hat.nu / g - s * hat.tau / t c.y <- rho - e * (hat.beta - q * hat.alpha / p) ## and solve the system [-H.x A' A H.y] [delta.x, delta.y] <- [c.x c.y] if (smw == 0) { AP[xp,xp] <- -H.x AP[xp == FALSE, xp== FALSE] <- H.y s1.tmp <- solve(AP,c(c.x,c.y)) delta.x<-s1.tmp[1:n] ; delta.y<-s1.tmp[-(1:n)] } else if (smw == 1) { smwc1 <- c.x / d smwc2 <- (c.x - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwc1))))) / d delta.y <- solve(A %*% smwa2 + H.y , c.y + A %*% smwc2) delta.x <- smwa2 %*% delta.y - smwc2 } ## backsubstitution delta.w <- - e * (hat.beta - q * hat.alpha / p + delta.y) delta.s <- s * (delta.x - hat.tau) / t delta.z <- z * (hat.nu - delta.x) / g delta.q <- q * (delta.w - hat.alpha) / p delta.v <- v * (gamma.w - delta.w) / w delta.p <- p * (gamma.q - delta.q) / q delta.g <- g * (gamma.z - delta.z) / z delta.t <- t * (gamma.s - delta.s) / s ## compute the updates alfa <- - (1 - margin) / min(c(delta.g / g, delta.w / w, delta.t / t, delta.p / p, delta.z / z, delta.v / v, delta.s / s, delta.q / q, -1)) x <- x + delta.x * alfa g <- g + delta.g * alfa w <- w + delta.w * alfa t <- t + delta.t * alfa p <- p + delta.p * alfa y <- y + delta.y * alfa z <- z + delta.z * alfa v <- v + delta.v * alfa s <- s + delta.s * alfa q <- q + delta.q * alfa ## these two lines put back in ? ## mu <- (crossprod(z,g) + crossprod(v,w) + crossprod(s,t) + crossprod(p,q))/(2 * (m + n)) ## mu <- mu * ((alfa - 1) / (alfa + 10))^2 mu <- newmu } if (verb > 0) ## final report cat( counter, primal.infeasibility, dual.infeasibility, sigfig, alfa, primal.obj, dual.obj) ret <- new("ipop") ## repackage the results primal(ret) <- x dual(ret) <- drop(y) if ((sigfig > sigf) & (counter < maxiter)) how(ret) <- 'converged' else { ## must have run out of counts if ((primal.infeasibility > 10e5) & (dual.infeasibility > 10e5)) how(ret) <- 'primal and dual infeasible' if (primal.infeasibility > 10e5) how(ret) <- 'primal infeasible' if (dual.infeasibility > 10e5) how(ret) <- 'dual infeasible' else ## don't really know how(ret) <- 'slow convergence, change bound?' } ret }) setGeneric("chunkmult",function(Z, csize, colscale) standardGeneric("chunkmult")) setMethod("chunkmult",signature(Z="matrix"), function(Z, csize, colscale) { n <- dim(Z)[1] m <- dim(Z)[2] d <- sqrt(colscale) nchunks <- ceiling(m/csize) res <- matrix(0,n,n) for( i in 1:nchunks) { lowerb <- (i - 1) * csize + 1 upperb <- min(i * csize, m) buffer <- t(Z[,lowerb:upperb,drop = FALSE]) bufferd <- d[lowerb:upperb] buffer <- buffer / bufferd res <- res + crossprod(buffer) } return(res) }) kernlab/R/rvm.R0000644000176000001440000004145112560371302013054 0ustar ripleyusers## relevance vector machine ## author : alexandros setGeneric("rvm", function(x, ...) standardGeneric("rvm")) setMethod("rvm",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m[[1]] <- as.name("model.frame") m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 x <- model.matrix(Terms, m) y <- model.extract(m, "response") ret <- rvm(x, y, ...) kcall(ret) <- cl terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("rvm",signature(x="vector"), function(x,...) { x <- t(t(x)) ret <- rvm(x, ...) ret }) setMethod("rvm",signature(x="list"), function (x, y, type = "regression", kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), alpha = 5, var = 0.1, # variance var.fix = FALSE, # fixed variance? iterations = 100, # no. of iterations verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ... ,subset ,na.action = na.omit) { if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") K <- kernelMatrix(kernel,x) ret <- rvm(x=K, y=y, kernel=kernel, alpha = alpha, var= var, var.fix = var.fix, iterations = iterations, verbosity = verbosity, tol = tol, minmaxdiff=minmaxdiff,cross=cross,fit=fit, na.action=na.action) kernelf(ret) <- kernel xmatrix(ret) <- x return(ret) }) setMethod("rvm",signature(x="matrix"), function (x, y, type = "regression", kernel = "rbfdot", kpar = "automatic", alpha = ncol(as.matrix(x)), var = 0.1, # variance var.fix = FALSE, # fixed variance? iterations = 100, # no. of iterations verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ... ,subset ,na.action = na.omit) { ## subsetting and na-handling for matrices ret <- new("rvm") if (!missing(subset)) x <- x[subset,] if (is.null(y)) x <- na.action(x) else { df <- na.action(data.frame(y, x)) y <- df[,1] x <- as.matrix(df[,-1]) } ncols <- ncol(x) m <- nrows <- nrow(x) if (is.null (type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- "regression" # in case of classification: transform factors into integers if (is.factor(y)) { stop("classification not supported with rvm, you can use ksvm(), lssvm() or gausspr()") } else { if (type(ret) == "classification" && any(as.integer (y) != y)) stop ("classification not supported with rvm, you can use ksvm(), lssvm() or gausspr()") if(type(ret) == "classification") lev(ret) <- unique (y) } # initialize nclass(ret) <- length (lev(ret)) if(!is.null(type)) type(ret) <- match.arg(type,c("classification", "regression")) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","matrix")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(rvm(as.kernelMatrix(x), y = y,type = type, alpha = alpha, var = var, # variance var.fix = var.fix, # fixed variance? iterations = iterations, # no. of iterations verbosity = verbosity, tol = tol, minmaxdiff = minmaxdiff, cross = cross, fit = fit ,subset ,na.action = na.omit, ...)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(length(alpha) == m) thetavec <- 1/alpha else if (length(alpha) == 1) thetavec <- rep(1/alpha, m) else stop("length of initial alpha vector is wrong (has to be one or equal with number of train data") wvec <- rep(1, m) piter <- iterations*0.4 if (type(ret) == "regression") { K <- kernelMatrix(kernel, x) diag(K) <- diag(K)+ 10e-7 Kml <- crossprod(K, y) for (i in 1:iterations) { nzindex <- thetavec > tol thetavec [!nzindex] <- wvec [!nzindex] <- 0 Kr <- K [ ,nzindex, drop = FALSE] thetatmp <- thetavec[nzindex] n <- sum (nzindex) Rinv <- backsolve(chol(crossprod(Kr)/var + diag(1/thetatmp)),diag(1,n)) ## compute the new wvec coefficients wvec [nzindex] <- (Rinv %*% (crossprod(Rinv, Kml [nzindex])))/var diagSigma <- rowSums(Rinv^2) ## error err <- sum ((y - Kr %*% wvec [nzindex])^2) if(var < 2e-9) { warning("Model might be overfitted") break } ## log some information if (verbosity > 0) { log.det.Sigma.inv <- - 2 * sum (log (diag (Rinv))) ## compute the marginal likelihood to monitor convergence mlike <- -1/2 * (log.det.Sigma.inv + sum (log (thetatmp)) + m * log (var) + 1/var * err + (wvec [nzindex]^2) %*% (1/thetatmp)) cat ("Marg. Likelihood =", formatC (mlike), "\tnRV=", n, "\tvar=", var, "\n") } ## compute zeta zeta <- 1 - diagSigma / thetatmp ## compute logtheta for convergence checking logtheta <- - log(thetavec[nzindex]) ## update thetavec if(i < piter){ thetavec [nzindex] <- wvec [nzindex]^2 / zeta thetavec [thetavec <= 0] <- 0 } else{ thetavec [nzindex] <- (wvec [nzindex]^2/zeta - diagSigma)/zeta thetavec [thetavec <= 0] <- 0 } ## Stop if largest alpha change is too small maxdiff <- max(abs(logtheta[thetavec[which(nzindex)]!=0] + log(thetavec[thetavec!=0]))) if(maxdiff < minmaxdiff) break; ## update variance if (!var.fix) { var <- err / (m - sum (zeta)) } } if(verbosity == 0) mlike(ret) <- drop(-1/2 * (-2*sum(log(diag(Rinv))) + sum (log (thetatmp)) + m * log (var) + 1/var * err + (wvec [nzindex]^2) %*% (1/thetatmp))) nvar(ret) <- var error(ret) <- sqrt(err/m) if(fit) fitted(ret) <- Kr %*% wvec [nzindex] } if(type(ret)=="classification") { stop("classification with the relevance vector machine not implemented yet") } kcall(ret) <- match.call() kernelf(ret) <- kernel alpha(ret) <- wvec[nzindex] tol(ret) <- tol xmatrix(ret) <- x ymatrix(ret) <- y RVindex(ret) <- which(nzindex) nRV(ret) <- length(RVindex(ret)) if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross!=0) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="classification") { cret <- rvm(x[cind,],factor (lev(ret)[y[cind]], levels = lev(ret)),type=type(ret),kernel=kernel,alpha = alpha,var = var, var.fix=var.fix, tol=tol, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="regression") { cret <- rvm(x[cind,],y[cind],type=type(ret),kernel=kernel,tol=tol,alpha = alpha, var = var, var.fix=var.fix, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } return(ret) }) setMethod("rvm",signature(x="kernelMatrix"), function (x, y, type = "regression", alpha = ncol(as.matrix(x)), var = 0.1, # variance var.fix = FALSE, # fixed variance? iterations = 100, # no. of iterations verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ... ,subset ) { ## subsetting and na-handling for matrices ret <- new("rvm") if (!missing(subset)) x <- as.kernelMatrix(x[subset,subset]) if (is.null(y)) stop("response y missing") ncols <- ncol(x) m <- nrows <- nrow(x) if (is.null (type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- "regression" # in case of classification: transform factors into integers if (is.factor(y)) { stop("Claasification is not implemented, you can use ksvm(), gausspr() or lssvm()") } else { if (type(ret) == "classification" && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if(type(ret) == "classification") lev(ret) <- unique (y) } # initialize nclass(ret) <- length (lev(ret)) if(!is.null(type)) type(ret) <- match.arg(type,c("classification", "regression")) if(length(alpha) == m) thetavec <- 1/alpha else if (length(alpha) == 1) thetavec <- rep(1/alpha, m) else stop("length of initial alpha vector is wrong (has to be one or equal with number of train data") wvec <- rep(1, m) piter <- iterations*0.4 if (type(ret) == "regression") { Kml <- crossprod(x, y) for (i in 1:iterations) { nzindex <- thetavec > tol thetavec [!nzindex] <- wvec [!nzindex] <- 0 Kr <- x [ ,nzindex, drop = FALSE] thetatmp <- thetavec[nzindex] n <- sum (nzindex) Rinv <- backsolve(chol(crossprod(Kr)/var + diag(1/thetatmp)),diag(1,n)) ## compute the new wvec coefficients wvec [nzindex] <- (Rinv %*% (crossprod(Rinv, Kml [nzindex])))/var diagSigma <- rowSums(Rinv^2) ## error err <- sum ((y - Kr %*% wvec [nzindex])^2) if(var < 2e-9) { warning("Model might be overfitted") break } ## log some information if (verbosity > 0) { log.det.Sigma.inv <- - 2 * sum (log (diag (Rinv))) ## compute the marginal likelihood to monitor convergence mlike <- -1/2 * (log.det.Sigma.inv + sum (log (thetatmp)) + m * log (var) + 1/var * err + (wvec [nzindex]^2) %*% (1/thetatmp)) cat ("Marg. Likelihood =", formatC (mlike), "\tnRV=", n, "\tvar=", var, "\n") } ## compute zeta zeta <- 1 - diagSigma / thetatmp ## compute logtheta for convergence checking logtheta <- - log(thetavec[nzindex]) ## update thetavec if(i < piter){ thetavec [nzindex] <- wvec [nzindex]^2 / zeta thetavec [thetavec <= 0] <- 0 } else{ thetavec [nzindex] <- (wvec [nzindex]^2/zeta - diagSigma)/zeta thetavec [thetavec <= 0] <- 0 } ## Stop if largest alpha change is too small maxdiff <- max(abs(logtheta[thetavec[which(nzindex)]!=0] + log(thetavec[thetavec!=0]))) if(maxdiff < minmaxdiff) break; ## update variance if (!var.fix) { var <- err / (m - sum (zeta)) } } if(verbosity == 0) mlike(ret) <- drop(-1/2 * (-2*sum(log(diag(Rinv))) + sum (log (thetatmp)) + m * log (var) + 1/var * err + (wvec [nzindex]^2) %*% (1/thetatmp))) nvar(ret) <- var error(ret) <- sqrt(err/m) if(fit) fitted(ret) <- Kr %*% wvec [nzindex] } if(type(ret)=="classification") { stop("classification with the relevance vector machine not implemented yet") } kcall(ret) <- match.call() kernelf(ret) <- " Kernel Matrix used. \n" coef(ret) <- alpha(ret) <- wvec[nzindex] tol(ret) <- tol xmatrix(ret) <- x ymatrix(ret) <- y RVindex(ret) <- which(nzindex) nRV(ret) <- length(RVindex(ret)) if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross!=0) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="classification") { cret <- rvm(as.kernelMatrix(x[cind,cind]),factor (lev(ret)[y[cind]], levels = lev(ret)),type=type(ret),alpha = alpha,var = var, var.fix=var.fix, tol=tol, cross = 0, fit = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind][,RVindex(cret),drop=FALSE])) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="regression") { cret <- rvm(as.kernelMatrix(x[cind,cind]),y[cind],type=type(ret),tol=tol,alpha = alpha, var = var, var.fix=var.fix, cross = 0, fit = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind][,RVindex(cret),drop=FALSE])) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m)/cross + cerror } } cross(ret) <- cerror } return(ret) }) setMethod("predict", signature(object = "rvm"), function (object, newdata, ...) { if (missing(newdata)) return(fitted(object)) if(!is(newdata,"kernelMatrix") && !is(newdata,"list")){ ncols <- ncol(xmatrix(object)) nrows <- nrow(xmatrix(object)) oldco <- ncols if (!is.null(terms(object))) { newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = na.action) } else newdata <- if (is.vector (newdata)) t(t(newdata)) else as.matrix(newdata) newcols <- 0 newnrows <- nrow(newdata) newncols <- ncol(newdata) newco <- newncols if (oldco != newco) stop ("test vector does not match model !") p<-0 } if(type(object) == "regression") { if(is(newdata,"kernelMatrix")) ret <- newdata %*% coef(object) - b(object) if(is(newdata,"list")) ret <- kernelMult(kernelf(object),newdata,xmatrix(object)[RVindex(object)],alpha(object)) else ret <- kernelMult(kernelf(object),newdata,as.matrix(xmatrix(object)[RVindex(object),,drop=FALSE]),alpha(object)) } ret }) setMethod("show","rvm", function(object){ cat("Relevance Vector Machine object of class \"rvm\"","\n") cat("Problem type: regression","\n","\n") show(kernelf(object)) cat(paste("\nNumber of Relevance Vectors :", nRV(object),"\n")) cat("Variance : ",round(nvar(object),9)) cat("\n") if(!is.null(fitted(object))) cat(paste("Training error :", round(error(object),9),"\n")) if(cross(object)!= -1) cat("Cross validation error :",round(cross(object),9),"\n") ##train error & loss }) kernlab/vignettes/0000755000176000001440000000000012560430717013735 5ustar ripleyuserskernlab/vignettes/jss.bib0000644000176000001440000003410612055335060015210 0ustar ripleyusers@Article{kernlab:Karatzoglou+Smola+Hornik:2004, author = {Alexandros Karatzoglou and Alex Smola and Kurt Hornik and Achim Zeileis}, title = {kernlab -- An \proglang{S4} Package for Kernel Methods in \proglang{R}}, year = {2004}, journal = {Journal of Statistical Software}, volume = {11}, number = {9}, pages = {1--20}, url = {http://www.jstatsoft.org/v11/i09/} } @Book{kernlab:Schoelkopf+Smola:2002, author = {Bernhard Sch\"olkopf and Alex Smola}, title = {Learning with Kernels}, publisher = {MIT Press}, year = 2002, } @Book{kernlab:Chambers:1998, Author = {John M. Chambers}, title = {Programming with Data}, Publisher = {Springer, New York}, Year = 1998, note = {ISBN 0-387-98503-4}, } @Book{kernlab:Hastie:2001, author = {T. Hastie and R. Tibshirani and J. H. Friedman}, title = {The Elements of Statistical Learning}, publisher = {Springer}, Year = 2001, } @Article{kernlab:Vanderbei:1999, author = {Robert Vanderbei}, title = {{LOQO}: An Interior Point Code for Quadratic Programming}, journal = {Optimization Methods and Software}, year = 1999, volume = 12, pages = {251--484}, url = {http://www.sor.princeton.edu/~rvdb/ps/loqo6.pdf}, } @Misc{kernlab:Leisch+Dimitriadou, author = {Fiedrich Leisch and Evgenia Dimitriadou}, title = {\pkg{mlbench}---{A} Collection for Artificial and Real-world Machine Learning Benchmarking Problems}, howpublished = {\textsf{R} package, Version 0.5-6}, note = {Available from \url{http://CRAN.R-project.org}}, year = 2001, month = 12, } @Misc{kernlab:Roever:2004, author = {Christian Roever and Nils Raabe and Karsten Luebke and Uwe Ligges}, title = { \pkg{klaR} -- Classification and Visualization}, howpublished = {\textsf{R} package, Version 0.3-3}, note = {Available from \url{http://cran.R-project.org}}, year = 2004, month = 7, } @Article{kernlab:Hsu+Lin:2002, author = {C.-W. Hsu and Chih-Jen Lin}, title = {A Comparison of Methods for Multi-class Support Vector Machines}, journal = {IEEE Transactions on Neural Networks}, year = 2002, volume = 13, pages = {415--425}, url = {http://www.csie.ntu.edu.tw/~cjlin/papers/multisvm.ps.gz}, } @Misc{kernlab:Chang+Lin:2001, author = {Chih-Chung Chang and Chih-Jen Lin}, title = {{LIBSVM}: A Library for Support Vector Machines}, note = {Software available at \url{http://www.csie.ntu.edu.tw/~cjlin/libsvm}}, year = 2001, } @Article{kernlab:Platt:2000, Author = {J. C. Platt}, Title = {Probabilistic Outputs for Support Vector Machines and Comparison to Regularized Likelihood Methods}, Journal = {Advances in Large Margin Classifiers, A. Smola, P. Bartlett, B. Sch\"olkopf and D. Schuurmans, Eds.}, Year = 2000, publisher = {Cambridge, MA: MIT Press}, url = {http://citeseer.nj.nec.com/platt99probabilistic.html}, } @Article{kernlab:Platt:1998, Author = {J. C. Platt}, Title = {Probabilistic Outputs for Support Vector Machines and Comparison to Regularized Likelihood Methods}, Journal = {B. Sch\"olkopf, C. J. C. Burges, A. J. Smola, editors, Advances in Kernel Methods --- Support Vector Learning}, Year = 1998, publisher = {Cambridge, MA: MIT Press}, url = {http://research.microsoft.com/~jplatt/abstracts/smo.html}, } @Article{kernlab:Keerthi:2002, Author = {S. S. Kerthi and E. G. Gilbert}, Title = {Convergence of a Generalized {SMO} Algorithm for {SVM} Classifier Design}, Journal = {Machine Learning}, pages = {351--360}, Year = 2002, volume = 46, url = {http://guppy.mpe.nus.edu.sg/~mpessk/svm/conv_ml.ps.gz}, } @Article{kernlab:Olvi:2000, Author = {Alex J. Smola and Olvi L. Mangasarian and Bernhard Sch\"olkopf}, Title = {Sparse Kernel Feature Analysis}, Journal = {24th Annual Conference of Gesellschaft fr Klassifikation}, publisher = {University of Passau}, Year = 2000, url = {ftp://ftp.cs.wisc.edu/pub/dmi/tech-reports/99-04.ps}, } @Unpublished{kernlab:Lin:2001, Author = {H.-T. Lin and Chih-Jen Lin and R. C. Weng}, Title = {A Note on {Platt's} Probabilistic Outputs for Support Vector Machines}, Year = 2001, note = {Available at \url{http://www.csie.ntu.edu.tw/~cjlin/papers/plattprob.ps}}, } @Unpublished{kernlab:Weng:2004, Author = {C.-J Lin and R C. Weng}, Title = {Probabilistic Predictions for Support Vector Regression}, Year = 2004, note = {Available at \url{http://www.csie.ntu.edu.tw/~cjlin/papers/svrprob.pdf}}, } @Article{kernlab:Crammer:2000, Author = {K. Crammer and Y. Singer}, Title = {On the Learnability and Design of Output Codes for Multiclass Prolems}, Year = 2000, Journal = {Computational Learning Theory}, Pages = {35--46}, url = {http://www.cs.huji.ac.il/~kobics/publications/mlj01.ps.gz}, } @Article{kernlab:joachim:1999, Author = {Thorsten Joachims}, Title = {Making Large-scale {SVM} Learning Practical}, Journal = {In Advances in Kernel Methods --- Support Vector Learning}, Chapter = 11, Year = 1999, publisher = {MIT Press}, url = {http://www-ai.cs.uni-dortmund.de/DOKUMENTE/joachims_99a.ps.gz}, } @Article{kernlab:Meyer:2001, author = {David Meyer}, title = {Support Vector Machines}, journal = {R News}, year = 2001, volume = 1, number = 3, pages = {23--26}, month = {September}, url = {http://CRAN.R-project.org/doc/Rnews/}, note = {\url{http://CRAN.R-project.org/doc/Rnews/}} } @ARTICLE{kernlab:meyer+leisch+hornik:2003, AUTHOR = {David Meyer and Friedrich Leisch and Kurt Hornik}, TITLE = {The Support Vector Machine under Test}, JOURNAL = {Neurocomputing}, YEAR = 2003, MONTH = {September}, PAGES = {169--186}, VOLUME = 55, } @Book{kernlab:Vapnik:1998, author = {Vladimir Vapnik}, Title = {Statistical Learning Theory}, Year = 1998, publisher = {Wiley, New York}, } @Book{kernlab:Vapnik2:1995, author = {Vladimir Vapnik}, Title = {The Nature of Statistical Learning Theory}, Year = 1995, publisher = {Springer, NY}, } @Article{kernlab:Wu:2003, Author = {Ting-Fan Wu and Chih-Jen Lin and Ruby C. Weng}, Title = {Probability Estimates for Multi-class Classification by Pairwise Coupling}, Year = 2003, Journal = {Advances in Neural Information Processing}, Publisher = {MIT Press Cambridge Mass.}, Volume = 16, url = {http://books.nips.cc/papers/files/nips16/NIPS2003_0538.pdf}, } @Article{kernlab:Williams:1995, Author = {Christopher K. I. Williams and Carl Edward Rasmussen}, Title = {Gaussian Processes for Regression}, Year = 1995, Journal = {Advances in Neural Information Processing}, Publisher = {MIT Press Cambridge Mass.}, Volume = 8, url = {http://books.nips.cc/papers/files/nips08/0514.pdf}, } @Article{kernlab:Schoelkopf:1998, Author = {B. Sch\"olkopf and A. Smola and K. R. M\"uller}, Title = {Nonlinear Component Analysis as a Kernel Eigenvalue Problem}, Journal = {Neural Computation}, Volume = 10, Pages = {1299--1319}, Year = 1998, url = {http://mlg.anu.edu.au/~smola/papers/SchSmoMul98.pdf}, } @Article{kernlab:Tipping:2001, Author = {M. E. Tipping}, Title = {Sparse Bayesian Learning and the Relevance Vector Machine}, Journal = {Journal of Machine Learning Research}, Volume = 1, Year = 2001, Pages = {211--244}, url = {http://www.jmlr.org/papers/volume1/tipping01a/tipping01a.pdf}, } @Article{kernlab:Zhou:2003, Author = {D. Zhou and J. Weston and A. Gretton and O. Bousquet and B. Sch\"olkopf}, Title = {Ranking on Data Manifolds}, Journal = {Advances in Neural Information Processing Systems}, Volume = 16, Year = 2003, Publisher = {MIT Press Cambridge Mass.}, url = {http://www.kyb.mpg.de/publications/pdfs/pdf2334.pdf}, } @Article{kernlab:Andrew:2001, Author = {Andrew Y. Ng and Michael I. Jordan and Yair Weiss}, Title = {On Spectral Clustering: Analysis and an Algorithm}, Journal = {Advances in Neural Information Processing Systems}, Volume = 14, Publisher = {MIT Press Cambridge Mass.}, url = {http://www.nips.cc/NIPS2001/papers/psgz/AA35.ps.gz}, } @Article{kernlab:Caputo:2002, Author = {B. Caputo and K. Sim and F. Furesjo and A. Smola}, Title = {Appearance-based Object Recognition using {SVMs}: Which Kernel Should {I} Use?}, Journal = {Proc of NIPS workshop on Statistical methods for computational experiments in visual processing and computer vision, Whistler, 2002}, Year = 2002, } @Article{kernlab:Putten:2000, Author = {Peter van der Putten and Michel de Ruiter and Maarten van Someren}, Title = {CoIL Challenge 2000 Tasks and Results: Predicting and Explaining Caravan Policy Ownership}, Journal = {Coil Challenge 2000}, Year = 2000, url = {http://www.liacs.nl/~putten/library/cc2000/}, } @Article{kernlab:Hsu:2002, Author = {C.-W. Hsu and Chih-Jen Lin}, Title = {A Simple Decomposition Method for Support Vector Machines}, Journal = {Machine Learning}, Year = 2002, Pages = {291--314}, volume = 46, url = {http://www.csie.ntu.edu.tw/~cjlin/papers/decomp.ps.gz}, } @Article{kernlab:Knerr:1990, Author = {S. Knerr and L. Personnaz and G. Dreyfus}, Title = {Single-layer Learning Revisited: A Stepwise Procedure for Building and Training a Neural Network.}, Journal = {J. Fogelman, editor, Neurocomputing: Algorithms, Architectures and Applications}, Publisher = {Springer-Verlag}, Year = 1990, } @Article{kernlab:Kressel:1999, Author = {U. Kre{\ss}el}, Title = {Pairwise Classification and Support Vector Machines}, Year = 1999, Journal = {B. Sch\"olkopf, C. J. C. Burges, A. J. Smola, editors, Advances in Kernel Methods --- Support Vector Learning}, Pages = {255--268}, Publisher = {Cambridge, MA, MIT Press}, } @Article{kernlab:Hsu2:2002, Title = {A Comparison of Methods for Multi-class Support Vector Machines}, Author = {C.-W. Hsu and Chih-Jen Lin}, Journal = {IEEE Transactions on Neural Networks}, Volume = 13, Year = 2002, Pages = {1045--1052}, url = {http://www.csie.ntu.edu.tw/~cjlin/papers/multisvm.ps.gz}, } @Article{kernlab:Tax:1999, Title = {Support Vector Domain Description}, Author = {David M. J. Tax and Robert P. W. Duin}, Journal = {Pattern Recognition Letters}, Volume = 20, Pages = {1191--1199}, Year = 1999, Publisher = {Elsevier}, url = {http://www.ph.tn.tudelft.nl/People/bob/papers/prl_99_svdd.pdf}, } @Article{kernlab:Williamson:1999, Title = {Estimating the Support of a High-Dimensonal Distribution}, Author = {B. Sch\"olkopf and J. Platt and J. Shawe-Taylor and A. J. Smola and R. C. Williamson}, Journal = {Microsoft Research, Redmond, WA}, Volume = {TR 87}, Year = 1999, url = {http://research.microsoft.com/research/pubs/view.aspx?msr_tr_id=MSR-TR-99-87}, } @Article{kernlab:Smola1:2000, Title = {New Support Vector Algorithms}, Author = {B. Sch\"olkopf and A. J. Smola and R. C. Williamson and P. L. Bartlett}, Journal = {Neural Computation}, Volume = 12, Year = 2000, Pages = {1207--1245}, url = {http://caliban.ingentaselect.com/vl=3338649/cl=47/nw=1/rpsv/cgi-bin/cgi?body=linker&reqidx=0899-7667(2000)12:5L.1207}, } @Article{kernlab:Wright:1999, Title = {Modified {Cholesky} Factorizations in Interior-point Algorithms for Linear Programming}, Author = {S. Wright}, Journal = {Journal in Optimization}, Volume = 9, publisher = {SIAM}, Year = 1999, Pages = {1159--1191}, ur = {http://www-unix.mcs.anl.gov/~wright/papers/P600.pdf}, } @Article{kernlab:more:1999, Title = {Newton's Method for Large-scale Bound Constrained Problems}, Author = {Chih-Jen Lin and J. J. More}, Journal = {SIAM Journal on Optimization}, volume = 9, pages = {1100--1127}, Year = 1999, } @Article{kernlab:Ng:2001, Title = {On Spectral Clustering: Analysis and an Algorithm}, Author = {Andrew Y. Ng and Michael I. Jordan and Yair Weiss}, Journal = {Neural Information Processing Symposium 2001}, Year = 2001, url = {http://www.nips.cc/NIPS2001/papers/psgz/AA35.ps.gz} } @Article{kernlab:kuss:2003, Title = {The Geometry of Kernel Canonical Correlation Analysis}, Author = {Malte Kuss and Thore Graepel}, Journal = {MPI-Technical Reports}, url = {http://www.kyb.mpg.de/publication.html?publ=2233}, Year = 2003, } %% Mathias Seeger gp pub. @Article{kernlab:Kivinen:2004, Title = {Online Learning with Kernels}, Author = {Jyrki Kivinen and Alexander Smola and Robert Williamson}, Journal ={IEEE Transactions on Signal Processing}, volume = 52, Year = 2004, url = {http://mlg.anu.edu.au/~smola/papers/KivSmoWil03.pdf}, } kernlab/vignettes/A.cls0000644000176000001440000001273612055335060014623 0ustar ripleyusers\def\fileversion{1.0} \def\filename{A} \def\filedate{2004/10/08} %% %% \NeedsTeXFormat{LaTeX2e} \ProvidesClass{A}[\filedate\space\fileversion\space A class ] %% options \LoadClass[10pt,a4paper,twoside]{article} \newif\if@notitle \@notitlefalse \DeclareOption{notitle}{\@notitletrue} \ProcessOptions %% required packages \RequirePackage{graphicx,a4wide,color,hyperref,ae,fancyvrb,thumbpdf} \RequirePackage[T1]{fontenc} \usepackage[authoryear,round,longnamesfirst]{natbib} \bibpunct{(}{)}{;}{a}{}{,} \bibliographystyle{jss} %% paragraphs \setlength{\parskip}{0.7ex plus0.1ex minus0.1ex} \setlength{\parindent}{0em} %% commands \let\code=\texttt \let\proglang=\textsf \newcommand{\pkg}[1]{{\normalfont\fontseries{b}\selectfont #1}} \newcommand{\email}[1]{\href{mailto:#1}{\normalfont\texttt{#1}}} \newcommand{\E}{\mathsf{E}} \newcommand{\VAR}{\mathsf{VAR}} \newcommand{\COV}{\mathsf{COV}} \newcommand{\Prob}{\mathsf{P}} %% for all publications \newcommand{\Plaintitle}[1]{\def\@Plaintitle{#1}} \newcommand{\Shorttitle}[1]{\def\@Shorttitle{#1}} \newcommand{\Plainauthor}[1]{\def\@Plainauthor{#1}} \newcommand{\Keywords}[1]{\def\@Keywords{#1}} \newcommand{\Plainkeywords}[1]{\def\@Plainkeywords{#1}} \newcommand{\Abstract}[1]{\def\@Abstract{#1}} %% defaults \author{Firstname Lastname\\Affiliation} \title{Title} \Abstract{---!!!---an abstract is required---!!!---} \Plainauthor{\@author} \Plaintitle{\@title} \Shorttitle{\@title} \Keywords{---!!!---at least one keyword is required---!!!---} \Plainkeywords{\@Keywords} %% Sweave(-like) %\DefineVerbatimEnvironment{Sinput}{Verbatim}{fontshape=sl} %\DefineVerbatimEnvironment{Soutput}{Verbatim}{} %\DefineVerbatimEnvironment{Scode}{Verbatim}{fontshape=sl} %\newenvironment{Schunk}{}{} \DefineVerbatimEnvironment{Code}{Verbatim}{} \DefineVerbatimEnvironment{CodeInput}{Verbatim}{fontshape=sl} \DefineVerbatimEnvironment{CodeOutput}{Verbatim}{} \newenvironment{CodeChunk}{}{} \setkeys{Gin}{width=0.8\textwidth} %% new \maketitle \def\maketitle{ \begingroup \def\thefootnote{\fnsymbol{footnote}} \def\@makefnmark{\hbox to 0pt{$^{\@thefnmark}$\hss}} \long\def\@makefntext##1{\parindent 1em\noindent \hbox to1.8em{\hss $\m@th ^{\@thefnmark}$}##1} \@maketitle \@thanks \endgroup \setcounter{footnote}{0} \thispagestyle{empty} \markboth{\centerline{\@Shorttitle}}{\centerline{\@Plainauthor}} \pagestyle{myheadings} \let\maketitle\relax \let\@maketitle\relax \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax } \def\@maketitle{\vbox{\hsize\textwidth \linewidth\hsize {\centering {\LARGE\bf \@title\par} \def\And{\end{tabular}\hfil\linebreak[0]\hfil \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces}% \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\@author\end{tabular}% \vskip 0.3in minus 0.1in \hrule \begin{abstract} \@Abstract \end{abstract}} \textit{Keywords}:~\@Keywords. \vskip 0.1in minus 0.05in \hrule \vskip 0.2in minus 0.1in }} %% sections, subsections, and subsubsections \newlength{\preXLskip} \newlength{\preLskip} \newlength{\preMskip} \newlength{\preSskip} \newlength{\postMskip} \newlength{\postSskip} \setlength{\preXLskip}{1.8\baselineskip plus 0.5ex minus 0ex} \setlength{\preLskip}{1.5\baselineskip plus 0.3ex minus 0ex} \setlength{\preMskip}{1\baselineskip plus 0.2ex minus 0ex} \setlength{\preSskip}{.8\baselineskip plus 0.2ex minus 0ex} \setlength{\postMskip}{.5\baselineskip plus 0ex minus 0.1ex} \setlength{\postSskip}{.3\baselineskip plus 0ex minus 0.1ex} \newcommand{\jsssec}[2][default]{\vskip \preXLskip% \pdfbookmark[1]{#1}{Section.\thesection.#1}% \refstepcounter{section}% \centerline{\textbf{\Large \thesection. #2}} \nopagebreak \vskip \postMskip \nopagebreak} \newcommand{\jsssecnn}[1]{\vskip \preXLskip% \centerline{\textbf{\Large #1}} \nopagebreak \vskip \postMskip \nopagebreak} \newcommand{\jsssubsec}[2][default]{\vskip \preMskip% \pdfbookmark[2]{#1}{Subsection.\thesubsection.#1}% \refstepcounter{subsection}% \textbf{\large \thesubsection. #2} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssubsecnn}[1]{\vskip \preMskip% \textbf{\large #1} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssubsubsec}[2][default]{\vskip \preSskip% \pdfbookmark[3]{#1}{Subsubsection.\thesubsubsection.#1}% \refstepcounter{subsubsection}% {\large \textit{#2}} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssubsubsecnn}[1]{\vskip \preSskip% {\textit{\large #1}} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssimplesec}[2][default]{\vskip \preLskip% %% \pdfbookmark[1]{#1}{Section.\thesection.#1}% \refstepcounter{section}% \textbf{\large #1} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssimplesecnn}[1]{\vskip \preLskip% \textbf{\large #1} \nopagebreak \vskip \postSskip \nopagebreak} \renewcommand{\section}{\secdef \jsssec \jsssecnn} \renewcommand{\subsection}{\secdef \jsssubsec \jsssubsecnn} \renewcommand{\subsubsection}{\secdef \jsssubsubsec \jsssubsubsecnn} %% colors \definecolor{Red}{rgb}{0.7,0,0} \definecolor{Blue}{rgb}{0,0,0.8} \hypersetup{% hyperindex = {true}, colorlinks = {true}, linktocpage = {true}, plainpages = {false}, linkcolor = {Blue}, citecolor = {Blue}, urlcolor = {Red}, pdfstartview = {Fit}, pdfpagemode = {UseOutlines}, pdfview = {XYZ null null null} } \AtBeginDocument{ \hypersetup{% pdfauthor = {\@Plainauthor}, pdftitle = {\@Plaintitle}, pdfkeywords = {\@Plainkeywords} } } \if@notitle %% \AtBeginDocument{\maketitle} \else \AtBeginDocument{\maketitle} \fi kernlab/vignettes/kernlab.Rnw0000644000176000001440000014230512055335060016042 0ustar ripleyusers\documentclass{A} \usepackage{amsfonts,thumbpdf,alltt} \newenvironment{smallverbatim}{\small\verbatim}{\endverbatim} \newenvironment{smallexample}{\begin{alltt}\small}{\end{alltt}} \SweaveOpts{engine=R,eps=FALSE} %\VignetteIndexEntry{kernlab - An S4 Package for Kernel Methods in R} %\VignetteDepends{kernlab} %\VignetteKeywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, S4, R} %\VignettePackage{kernlab} <>= library(kernlab) options(width = 70) @ \title{\pkg{kernlab} -- An \proglang{S4} Package for Kernel Methods in \proglang{R}} \Plaintitle{kernlab - An S4 Package for Kernel Methods in R} \author{Alexandros Karatzoglou\\Technische Universit\"at Wien \And Alex Smola\\Australian National University, NICTA \And Kurt Hornik\\Wirtschaftsuniversit\"at Wien } \Plainauthor{Alexandros Karatzoglou, Alex Smola, Kurt Hornik} \Abstract{ \pkg{kernlab} is an extensible package for kernel-based machine learning methods in \proglang{R}. It takes advantage of \proglang{R}'s new \proglang{S4} object model and provides a framework for creating and using kernel-based algorithms. The package contains dot product primitives (kernels), implementations of support vector machines and the relevance vector machine, Gaussian processes, a ranking algorithm, kernel PCA, kernel CCA, kernel feature analysis, online kernel methods and a spectral clustering algorithm. Moreover it provides a general purpose quadratic programming solver, and an incomplete Cholesky decomposition method. } \Keywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, \proglang{S4}, \proglang{R}} \Plainkeywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, S4, R} \begin{document} \section{Introduction} Machine learning is all about extracting structure from data, but it is often difficult to solve problems like classification, regression and clustering in the space in which the underlying observations have been made. Kernel-based learning methods use an implicit mapping of the input data into a high dimensional feature space defined by a kernel function, i.e., a function returning the inner product $ \langle \Phi(x),\Phi(y) \rangle$ between the images of two data points $x, y$ in the feature space. The learning then takes place in the feature space, provided the learning algorithm can be entirely rewritten so that the data points only appear inside dot products with other points. This is often referred to as the ``kernel trick'' \citep{kernlab:Schoelkopf+Smola:2002}. More precisely, if a projection $\Phi: X \rightarrow H$ is used, the dot product $\langle\Phi(x),\Phi(y)\rangle$ can be represented by a kernel function~$k$ \begin{equation} \label{eq:kernel} k(x,y)= \langle \Phi(x),\Phi(y) \rangle, \end{equation} which is computationally simpler than explicitly projecting $x$ and $y$ into the feature space~$H$. One interesting property of kernel-based systems is that, once a valid kernel function has been selected, one can practically work in spaces of any dimension without paying any computational cost, since feature mapping is never effectively performed. In fact, one does not even need to know which features are being used. Another advantage is the that one can design and use a kernel for a particular problem that could be applied directly to the data without the need for a feature extraction process. This is particularly important in problems where a lot of structure of the data is lost by the feature extraction process (e.g., text processing). The inherent modularity of kernel-based learning methods allows one to use any valid kernel on a kernel-based algorithm. \subsection{Software review} The most prominent kernel based learning algorithm is without doubt the support vector machine (SVM), so the existence of many support vector machine packages comes as little surprise. Most of the existing SVM software is written in \proglang{C} or \proglang{C++}, e.g.\ the award winning \pkg{libsvm}\footnote{\url{http://www.csie.ntu.edu.tw/~cjlin/libsvm/}} \citep{kernlab:Chang+Lin:2001}, \pkg{SVMlight}\footnote{\url{http://svmlight.joachims.org}} \citep{kernlab:joachim:1999}, \pkg{SVMTorch}\footnote{\url{http://www.torch.ch}}, Royal Holloway Support Vector Machines\footnote{\url{http://svm.dcs.rhbnc.ac.uk}}, \pkg{mySVM}\footnote{\url{http://www-ai.cs.uni-dortmund.de/SOFTWARE/MYSVM/index.eng.html}}, and \pkg{M-SVM}\footnote{\url{http://www.loria.fr/~guermeur/}} with many packages providing interfaces to \proglang{MATLAB} (such as \pkg{libsvm}), and even some native \proglang{MATLAB} toolboxes\footnote{ \url{http://www.isis.ecs.soton.ac.uk/resources/svminfo/}}\,\footnote{ \url{http://asi.insa-rouen.fr/~arakotom/toolbox/index}}\,\footnote{ \url{http://www.cis.tugraz.at/igi/aschwaig/software.html}}. Putting SVM specific software aside and considering the abundance of other kernel-based algorithms published nowadays, there is little software available implementing a wider range of kernel methods with some exceptions like the \pkg{Spider}\footnote{\url{http://www.kyb.tuebingen.mpg.de/bs/people/spider/}} software which provides a \proglang{MATLAB} interface to various \proglang{C}/\proglang{C++} SVM libraries and \proglang{MATLAB} implementations of various kernel-based algorithms, \pkg{Torch} \footnote{\url{http://www.torch.ch}} which also includes more traditional machine learning algorithms, and the occasional \proglang{MATLAB} or \proglang{C} program found on a personal web page where an author includes code from a published paper. \subsection[R software]{\proglang{R} software} The \proglang{R} package \pkg{e1071} offers an interface to the award winning \pkg{libsvm} \citep{kernlab:Chang+Lin:2001}, a very efficient SVM implementation. \pkg{libsvm} provides a robust and fast SVM implementation and produces state of the art results on most classification and regression problems \citep{kernlab:Meyer+Leisch+Hornik:2003}. The \proglang{R} interface provided in \pkg{e1071} adds all standard \proglang{R} functionality like object orientation and formula interfaces to \pkg{libsvm}. Another SVM related \proglang{R} package which was made recently available is \pkg{klaR} \citep{kernlab:Roever:2004} which includes an interface to \pkg{SVMlight}, a popular SVM implementation along with other classification tools like Regularized Discriminant Analysis. However, most of the \pkg{libsvm} and \pkg{klaR} SVM code is in \proglang{C++}. Therefore, if one would like to extend or enhance the code with e.g.\ new kernels or different optimizers, one would have to modify the core \proglang{C++} code. \section[kernlab]{\pkg{kernlab}} \pkg{kernlab} aims to provide the \proglang{R} user with basic kernel functionality (e.g., like computing a kernel matrix using a particular kernel), along with some utility functions commonly used in kernel-based methods like a quadratic programming solver, and modern kernel-based algorithms based on the functionality that the package provides. Taking advantage of the inherent modularity of kernel-based methods, \pkg{kernlab} aims to allow the user to switch between kernels on an existing algorithm and even create and use own kernel functions for the kernel methods provided in the package. \subsection[S4 objects]{\proglang{S4} objects} \pkg{kernlab} uses \proglang{R}'s new object model described in ``Programming with Data'' \citep{kernlab:Chambers:1998} which is known as the \proglang{S4} class system and is implemented in the \pkg{methods} package. In contrast with the older \proglang{S3} model for objects in \proglang{R}, classes, slots, and methods relationships must be declared explicitly when using the \proglang{S4} system. The number and types of slots in an instance of a class have to be established at the time the class is defined. The objects from the class are validated against this definition and have to comply to it at any time. \proglang{S4} also requires formal declarations of methods, unlike the informal system of using function names to identify a certain method in \proglang{S3}. An \proglang{S4} method is declared by a call to \code{setMethod} along with the name and a ``signature'' of the arguments. The signature is used to identify the classes of one or more arguments of the method. Generic functions can be declared using the \code{setGeneric} function. Although such formal declarations require package authors to be more disciplined than when using the informal \proglang{S3} classes, they provide assurance that each object in a class has the required slots and that the names and classes of data in the slots are consistent. An example of a class used in \pkg{kernlab} is shown below. Typically, in a return object we want to include information on the result of the method along with additional information and parameters. Usually \pkg{kernlab}'s classes include slots for the kernel function used and the results and additional useful information. \begin{smallexample} setClass("specc", representation("vector", # the vector containing the cluster centers="matrix", # the cluster centers size="vector", # size of each cluster kernelf="function", # kernel function used withinss = "vector"), # within cluster sum of squares prototype = structure(.Data = vector(), centers = matrix(), size = matrix(), kernelf = ls, withinss = vector())) \end{smallexample} Accessor and assignment function are defined and used to access the content of each slot which can be also accessed with the \verb|@| operator. \subsection{Namespace} Namespaces were introduced in \proglang{R} 1.7.0 and provide a means for packages to control the way global variables and methods are being made available. Due to the number of assignment and accessor function involved, a namespace is used to control the methods which are being made visible outside the package. Since \proglang{S4} methods are being used, the \pkg{kernlab} namespace also imports methods and variables from the \pkg{methods} package. \subsection{Data} The \pkg{kernlab} package also includes data set which will be used to illustrate the methods included in the package. The \code{spam} data set \citep{kernlab:Hastie:2001} set collected at Hewlett-Packard Labs contains data on 2788 and 1813 e-mails classified as non-spam and spam, respectively. The 57 variables of each data vector indicate the frequency of certain words and characters in the e-mail. Another data set included in \pkg{kernlab}, the \code{income} data set \citep{kernlab:Hastie:2001}, is taken by a marketing survey in the San Francisco Bay concerning the income of shopping mall customers. It consists of 14 demographic attributes (nominal and ordinal variables) including the income and 8993 observations. The \code{ticdata} data set \citep{kernlab:Putten:2000} was used in the 2000 Coil Challenge and contains information on customers of an insurance company. The data consists of 86 variables and includes product usage data and socio-demographic data derived from zip area codes. The data was collected to answer the following question: Can you predict who would be interested in buying a caravan insurance policy and give an explanation why? The \code{promotergene} is a data set of E. Coli promoter gene sequences (DNA) with 106 observations and 58 variables available at the UCI Machine Learning repository. Promoters have a region where a protein (RNA polymerase) must make contact and the helical DNA sequence must have a valid conformation so that the two pieces of the contact region spatially align. The data contains DNA sequences of promoters and non-promoters. The \code{spirals} data set was created by the \code{mlbench.spirals} function in the \pkg{mlbench} package \citep{kernlab:Leisch+Dimitriadou}. This two-dimensional data set with 300 data points consists of two spirals where Gaussian noise is added to each data point. \subsection{Kernels} A kernel function~$k$ calculates the inner product of two vectors $x$, $x'$ in a given feature mapping $\Phi: X \rightarrow H$. The notion of a kernel is obviously central in the making of any kernel-based algorithm and consequently also in any software package containing kernel-based methods. Kernels in \pkg{kernlab} are \proglang{S4} objects of class \code{kernel} extending the \code{function} class with one additional slot containing a list with the kernel hyper-parameters. Package \pkg{kernlab} includes 7 different kernel classes which all contain the class \code{kernel} and are used to implement the existing kernels. These classes are used in the function dispatch mechanism of the kernel utility functions described below. Existing kernel functions are initialized by ``creator'' functions. All kernel functions take two feature vectors as parameters and return the scalar dot product of the vectors. An example of the functionality of a kernel in \pkg{kernlab}: <>= ## create a RBF kernel function with sigma hyper-parameter 0.05 rbf <- rbfdot(sigma = 0.05) rbf ## create two random feature vectors x <- rnorm(10) y <- rnorm(10) ## compute dot product between x,y rbf(x, y) @ The package includes implementations of the following kernels: \begin{itemize} \item the linear \code{vanilladot} kernel implements the simplest of all kernel functions \begin{equation} k(x,x') = \langle x, x' \rangle \end{equation} which is useful specially when dealing with large sparse data vectors~$x$ as is usually the case in text categorization. \item the Gaussian radial basis function \code{rbfdot} \begin{equation} k(x,x') = \exp(-\sigma \|x - x'\|^2) \end{equation} which is a general purpose kernel and is typically used when no further prior knowledge is available about the data. \item the polynomial kernel \code{polydot} \begin{equation} k(x, x') = \left( \mathrm{scale} \cdot \langle x, x' \rangle + \mathrm{offset} \right)^\mathrm{degree}. \end{equation} which is used in classification of images. \item the hyperbolic tangent kernel \code{tanhdot} \begin{equation} k(x, x') = \tanh \left( \mathrm{scale} \cdot \langle x, x' \rangle + \mathrm{offset} \right) \end{equation} which is mainly used as a proxy for neural networks. \item the Bessel function of the first kind kernel \code{besseldot} \begin{equation} k(x, x') = \frac{\mathrm{Bessel}_{(\nu+1)}^n(\sigma \|x - x'\|)} {(\|x-x'\|)^{-n(\nu+1)}}. \end{equation} is a general purpose kernel and is typically used when no further prior knowledge is available and mainly popular in the Gaussian process community. \item the Laplace radial basis kernel \code{laplacedot} \begin{equation} k(x, x') = \exp(-\sigma \|x - x'\|) \end{equation} which is a general purpose kernel and is typically used when no further prior knowledge is available. \item the ANOVA radial basis kernel \code{anovadot} performs well in multidimensional regression problems \begin{equation} k(x, x') = \left(\sum_{k=1}^{n}\exp(-\sigma(x^k-{x'}^k)^2)\right)^{d} \end{equation} where $x^k$ is the $k$th component of $x$. \end{itemize} \subsection{Kernel utility methods} The package also includes methods for computing commonly used kernel expressions (e.g., the Gram matrix). These methods are written in such a way that they take functions (i.e., kernels) and matrices (i.e., vectors of patterns) as arguments. These can be either the kernel functions already included in \pkg{kernlab} or any other function implementing a valid dot product (taking two vector arguments and returning a scalar). In case one of the already implemented kernels is used, the function calls a vectorized implementation of the corresponding function. Moreover, in the case of symmetric matrices (e.g., the dot product matrix of a Support Vector Machine) they only require one argument rather than having to pass the same matrix twice (for rows and columns). The computations for the kernels already available in the package are vectorized whenever possible which guarantees good performance and acceptable memory requirements. Users can define their own kernel by creating a function which takes two vectors as arguments (the data points) and returns a scalar (the dot product). This function can then be based as an argument to the kernel utility methods. For a user defined kernel the dispatch mechanism calls a generic method implementation which calculates the expression by passing the kernel function through a pair of \code{for} loops. The kernel methods included are: \begin{description} \item[\code{kernelMatrix}] This is the most commonly used function. It computes $k(x, x')$, i.e., it computes the matrix $K$ where $K_{ij} = k(x_i, x_j)$ and $x$ is a \emph{row} vector. In particular, \begin{verbatim} K <- kernelMatrix(kernel, x) \end{verbatim} computes the matrix $K_{ij} = k(x_i, x_j)$ where the $x_i$ are the columns of $X$ and \begin{verbatim} K <- kernelMatrix(kernel, x1, x2) \end{verbatim} computes the matrix $K_{ij} = k(x1_i, x2_j)$. \item[\code{kernelFast}] This method is different to \code{kernelMatrix} for \code{rbfdot}, \code{besseldot}, and the \code{laplacedot} kernel, which are all RBF kernels. It is identical to \code{kernelMatrix}, except that it also requires the squared norm of the first argument as additional input. It is mainly used in kernel algorithms, where columns of the kernel matrix are computed per invocation. In these cases, evaluating the norm of each column-entry as it is done on a \code{kernelMatrix} invocation on an RBF kernel, over and over again would cause significant computational overhead. Its invocation is via \begin{verbatim} K = kernelFast(kernel, x1, x2, a) \end{verbatim} Here $a$ is a vector containing the squared norms of $x1$. \item[\code{kernelMult}] is a convenient way of computing kernel expansions. It returns the vector $f = (f(x_1), \dots, f(x_m))$ where \begin{equation} f(x_i) = \sum_{j=1}^{m} k(x_i, x_j) \alpha_j, \mbox{~hence~} f = K \alpha. \end{equation} The need for such a function arises from the fact that $K$ may sometimes be larger than the memory available. Therefore, it is convenient to compute $K$ only in stripes and discard the latter after the corresponding part of $K \alpha$ has been computed. The parameter \code{blocksize} determines the number of rows in the stripes. In particular, \begin{verbatim} f <- kernelMult(kernel, x, alpha) \end{verbatim} computes $f_i = \sum_{j=1}^m k(x_i, x_j) \alpha_j$ and \begin{verbatim} f <- kernelMult(kernel, x1, x2, alpha) \end{verbatim} computes $f_i = \sum_{j=1}^m k(x1_i, x2_j) \alpha_j$. \item[\code{kernelPol}] is a method very similar to \code{kernelMatrix} with the only difference that rather than computing $K_{ij} = k(x_i, x_j)$ it computes $K_{ij} = y_i y_j k(x_i, x_j)$. This means that \begin{verbatim} K <- kernelPol(kernel, x, y) \end{verbatim} computes the matrix $K_{ij} = y_i y_j k(x_i, x_j)$ where the $x_i$ are the columns of $x$ and $y_i$ are elements of the vector~$y$. Moreover, \begin{verbatim} K <- kernelPol(kernel, x1, x2, y1, y2) \end{verbatim} computes the matrix $K_{ij} = y1_i y2_j k(x1_i, x2_j)$. Both \code{x1} and \code{x2} may be matrices and \code{y1} and \code{y2} vectors. \end{description} An example using these functions : <>= ## create a RBF kernel function with sigma hyper-parameter 0.05 poly <- polydot(degree=2) ## create artificial data set x <- matrix(rnorm(60), 6, 10) y <- matrix(rnorm(40), 4, 10) ## compute kernel matrix kx <- kernelMatrix(poly, x) kxy <- kernelMatrix(poly, x, y) @ \section{Kernel methods} Providing a solid base for creating kernel-based methods is part of what we are trying to achieve with this package, the other being to provide a wider range of kernel-based methods in \proglang{R}. In the rest of the paper we present the kernel-based methods available in \pkg{kernlab}. All the methods in \pkg{kernlab} can be used with any of the kernels included in the package as well as with any valid user-defined kernel. User defined kernel functions can be passed to existing kernel-methods in the \code{kernel} argument. \subsection{Support vector machine} Support vector machines \citep{kernlab:Vapnik:1998} have gained prominence in the field of machine learning and pattern classification and regression. The solutions to classification and regression problems sought by kernel-based algorithms such as the SVM are linear functions in the feature space: \begin{equation} f(x) = w^\top \Phi(x) \end{equation} for some weight vector $w \in F$. The kernel trick can be exploited in this whenever the weight vector~$w$ can be expressed as a linear combination of the training points, $w = \sum_{i=1}^{n} \alpha_i \Phi(x_i)$, implying that $f$ can be written as \begin{equation} f(x) = \sum_{i=1}^{n}\alpha_i k(x_i, x) \end{equation} A very important issue that arises is that of choosing a kernel~$k$ for a given learning task. Intuitively, we wish to choose a kernel that induces the ``right'' metric in the space. Support Vector Machines choose a function $f$ that is linear in the feature space by optimizing some criterion over the sample. In the case of the 2-norm Soft Margin classification the optimization problem takes the form: \begin{eqnarray} \nonumber \mathrm{minimize} && t(w,\xi) = \frac{1}{2}{\|w\|}^2+\frac{C}{m}\sum_{i=1}^{m}\xi_i \\ \mbox{subject to~} && y_i ( \langle x_i , w \rangle +b ) \geq 1- \xi_i \qquad (i=1,\dots,m)\\ \nonumber && \xi_i \ge 0 \qquad (i=1,\dots, m) \end{eqnarray} Based on similar methodology, SVMs deal with the problem of novelty detection (or one class classification) and regression. \pkg{kernlab}'s implementation of support vector machines, \code{ksvm}, is based on the optimizers found in \pkg{bsvm}\footnote{\url{http://www.csie.ntu.edu.tw/~cjlin/bsvm}} \citep{kernlab:Hsu:2002} and \pkg{libsvm} \citep{kernlab:Chang+Lin:2001} which includes a very efficient version of the Sequential Minimization Optimization (SMO). SMO decomposes the SVM Quadratic Problem (QP) without using any numerical QP optimization steps. Instead, it chooses to solve the smallest possible optimization problem involving two elements of $\alpha_i$ because they must obey one linear equality constraint. At every step, SMO chooses two $\alpha_i$ to jointly optimize and finds the optimal values for these $\alpha_i$ analytically, thus avoiding numerical QP optimization, and updates the SVM to reflect the new optimal values. The SVM implementations available in \code{ksvm} include the C-SVM classification algorithm along with the $\nu$-SVM classification formulation which is equivalent to the former but has a more natural ($\nu$) model parameter taking values in $[0,1]$ and is proportional to the fraction of support vectors found in the data set and the training error. For classification problems which include more than two classes (multi-class) a one-against-one or pairwise classification method \citep{kernlab:Knerr:1990, kernlab:Kressel:1999} is used. This method constructs ${k \choose 2}$ classifiers where each one is trained on data from two classes. Prediction is done by voting where each classifier gives a prediction and the class which is predicted more often wins (``Max Wins''). This method has been shown to produce robust results when used with SVMs \citep{kernlab:Hsu2:2002}. Furthermore the \code{ksvm} implementation provides the ability to produce class probabilities as output instead of class labels. This is done by an improved implementation \citep{kernlab:Lin:2001} of Platt's posteriori probabilities \citep{kernlab:Platt:2000} where a sigmoid function \begin{equation} P(y=1\mid f) = \frac{1}{1+ e^{Af+B}} \end{equation} is fitted on the decision values~$f$ of the binary SVM classifiers, $A$ and $B$ are estimated by minimizing the negative log-likelihood function. To extend the class probabilities to the multi-class case, each binary classifiers class probability output is combined by the \code{couple} method which implements methods for combing class probabilities proposed in \citep{kernlab:Wu:2003}. Another approach for multIn order to create a similar probability output for regression, following \cite{kernlab:Weng:2004}, we suppose that the SVM is trained on data from the model \begin{equation} y_i = f(x_i) + \delta_i \end{equation} where $f(x_i)$ is the underlying function and $\delta_i$ is independent and identical distributed random noise. Given a test data $x$ the distribution of $y$ given $x$ and allows one to draw probabilistic inferences about $y$ e.g. one can construct a predictive interval $\Phi = \Phi(x)$ such that $y \in \Phi$ with a certain probability. If $\hat{f}$ is the estimated (predicted) function of the SVM on new data then $\eta = \eta(x) = y - \hat{f}(x)$ is the prediction error and $y \in \Phi$ is equivalent to $\eta \in \Phi $. Empirical observation shows that the distribution of the residuals $\eta$ can be modeled both by a Gaussian and a Laplacian distribution with zero mean. In this implementation the Laplacian with zero mean is used : \begin{equation} p(z) = \frac{1}{2\sigma}e^{-\frac{|z|}{\sigma}} \end{equation} Assuming that $\eta$ are independent the scale parameter $\sigma$ is estimated by maximizing the likelihood. The data for the estimation is produced by a three-fold cross-validation. For the Laplace distribution the maximum likelihood estimate is : \begin{equation} \sigma = \frac{\sum_{i=1}^m|\eta_i|}{m} \end{equation} i-class classification supported by the \code{ksvm} function is the one proposed in \cite{kernlab:Crammer:2000}. This algorithm works by solving a single optimization problem including the data from all classes: \begin{eqnarray} \nonumber \mathrm{minimize} && t(w_n,\xi) = \frac{1}{2}\sum_{n=1}^k{\|w_n\|}^2+\frac{C}{m}\sum_{i=1}^{m}\xi_i \\ \mbox{subject to~} && \langle x_i , w_{y_i} \rangle - \langle x_i , w_{n} \rangle \geq b_i^n - \xi_i \qquad (i=1,\dots,m) \\ \mbox{where} && b_i^n = 1 - \delta_{y_i,n} \end{eqnarray} where the decision function is \begin{equation} \mathrm{argmax}_{m=1,\dots,k} \langle x_i , w_{n} \rangle \end{equation} This optimization problem is solved by a decomposition method proposed in \cite{kernlab:Hsu:2002} where optimal working sets are found (that is, sets of $\alpha_i$ values which have a high probability of being non-zero). The QP sub-problems are then solved by a modified version of the \pkg{TRON}\footnote{\url{http://www-unix.mcs.anl.gov/~more/tron/}} \citep{kernlab:more:1999} optimization software. One-class classification or novelty detection \citep{kernlab:Williamson:1999, kernlab:Tax:1999}, where essentially an SVM detects outliers in a data set, is another algorithm supported by \code{ksvm}. SVM novelty detection works by creating a spherical decision boundary around a set of data points by a set of support vectors describing the spheres boundary. The $\nu$ parameter is used to control the volume of the sphere and consequently the number of outliers found. Again, the value of $\nu$ represents the fraction of outliers found. Furthermore, $\epsilon$-SVM \citep{kernlab:Vapnik2:1995} and $\nu$-SVM \citep{kernlab:Smola1:2000} regression are also available. The problem of model selection is partially addressed by an empirical observation for the popular Gaussian RBF kernel \citep{kernlab:Caputo:2002}, where the optimal values of the hyper-parameter of sigma are shown to lie in between the 0.1 and 0.9 quantile of the $\|x- x'\| $ statistics. The \code{sigest} function uses a sample of the training set to estimate the quantiles and returns a vector containing the values of the quantiles. Pretty much any value within this interval leads to good performance. An example for the \code{ksvm} function is shown below. <>= ## simple example using the promotergene data set data(promotergene) ## create test and training set tindex <- sample(1:dim(promotergene)[1],5) genetrain <- promotergene[-tindex, ] genetest <- promotergene[tindex,] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot",kpar="automatic",C=60,cross=3,prob.model=TRUE) gene predict(gene, genetest) predict(gene, genetest, type="probabilities") @ \begin{figure} \centering <>= set.seed(123) x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) @ \caption{A contour plot of the SVM decision values for a toy binary classification problem using the \code{plot} function} \label{fig:ksvm Plot} \end{figure} \subsection{Relevance vector machine} The relevance vector machine \citep{kernlab:Tipping:2001} is a probabilistic sparse kernel model identical in functional form to the SVM making predictions based on a function of the form \begin{equation} y(x) = \sum_{n=1}^{N} \alpha_n K(\mathbf{x},\mathbf{x}_n) + a_0 \end{equation} where $\alpha_n$ are the model ``weights'' and $K(\cdotp,\cdotp)$ is a kernel function. It adopts a Bayesian approach to learning, by introducing a prior over the weights $\alpha$ \begin{equation} p(\alpha, \beta) = \prod_{i=1}^m N(\beta_i \mid 0 , a_i^{-1}) \mathrm{Gamma}(\beta_i\mid \beta_\beta , \alpha_\beta) \end{equation} governed by a set of hyper-parameters $\beta$, one associated with each weight, whose most probable values are iteratively estimated for the data. Sparsity is achieved because in practice the posterior distribution in many of the weights is sharply peaked around zero. Furthermore, unlike the SVM classifier, the non-zero weights in the RVM are not associated with examples close to the decision boundary, but rather appear to represent ``prototypical'' examples. These examples are termed \emph{relevance vectors}. \pkg{kernlab} currently has an implementation of the RVM based on a type~II maximum likelihood method which can be used for regression. The functions returns an \proglang{S4} object containing the model parameters along with indexes for the relevance vectors and the kernel function and hyper-parameters used. <>= x <- seq(-20, 20, 0.5) y <- sin(x)/x + rnorm(81, sd = 0.03) y[41] <- 1 @ <>= rvmm <- rvm(x, y,kernel="rbfdot",kpar=list(sigma=0.1)) rvmm ytest <- predict(rvmm, x) @ \begin{figure} \centering <>= plot(x, y, cex=0.5) lines(x, ytest, col = "red") points(x[RVindex(rvmm)],y[RVindex(rvmm)],pch=21) @ \caption{Relevance vector regression on data points created by the $sinc(x)$ function, relevance vectors are shown circled.} \label{fig:RVM sigmoid} \end{figure} \subsection{Gaussian processes} Gaussian processes \citep{kernlab:Williams:1995} are based on the ``prior'' assumption that adjacent observations should convey information about each other. In particular, it is assumed that the observed variables are normal, and that the coupling between them takes place by means of the covariance matrix of a normal distribution. Using the kernel matrix as the covariance matrix is a convenient way of extending Bayesian modeling of linear estimators to nonlinear situations. Furthermore it represents the counterpart of the ``kernel trick'' in methods minimizing the regularized risk. For regression estimation we assume that rather than observing $t(x_i)$ we observe $y_i = t(x_i) + \xi_i$ where $\xi_i$ is assumed to be independent Gaussian distributed noise with zero mean. The posterior distribution is given by \begin{equation} p(\mathbf{y}\mid \mathbf{t}) = \left[ \prod_ip(y_i - t(x_i)) \right] \frac{1}{\sqrt{(2\pi)^m \det(K)}} \exp \left(\frac{1}{2}\mathbf{t}^T K^{-1} \mathbf{t} \right) \end{equation} and after substituting $\mathbf{t} = K\mathbf{\alpha}$ and taking logarithms \begin{equation} \ln{p(\mathbf{\alpha} \mid \mathbf{y})} = - \frac{1}{2\sigma^2}\| \mathbf{y} - K \mathbf{\alpha} \|^2 -\frac{1}{2}\mathbf{\alpha}^T K \mathbf{\alpha} +c \end{equation} and maximizing $\ln{p(\mathbf{\alpha} \mid \mathbf{y})}$ for $\mathbf{\alpha}$ to obtain the maximum a posteriori approximation yields \begin{equation} \mathbf{\alpha} = (K + \sigma^2\mathbf{1})^{-1} \mathbf{y} \end{equation} Knowing $\mathbf{\alpha}$ allows for prediction of $y$ at a new location $x$ through $y = K(x,x_i){\mathbf{\alpha}}$. In similar fashion Gaussian processes can be used for classification. \code{gausspr} is the function in \pkg{kernlab} implementing Gaussian processes for classification and regression. \subsection{Ranking} The success of Google has vividly demonstrated the value of a good ranking algorithm in real world problems. \pkg{kernlab} includes a ranking algorithm based on work published in \citep{kernlab:Zhou:2003}. This algorithm exploits the geometric structure of the data in contrast to the more naive approach which uses the Euclidean distances or inner products of the data. Since real world data are usually highly structured, this algorithm should perform better than a simpler approach based on a Euclidean distance measure. First, a weighted network is defined on the data and an authoritative score is assigned to every point. The query points act as source nodes that continually pump their scores to the remaining points via the weighted network, and the remaining points further spread the score to their neighbors. The spreading process is repeated until convergence and the points are ranked according to the scores they received. Suppose we are given a set of data points $X = {x_1, \dots, x_{s}, x_{s+1}, \dots, x_{m}}$ in $\mathbf{R}^n$ where the first $s$ points are the query points and the rest are the points to be ranked. The algorithm works by connecting the two nearest points iteratively until a connected graph $G = (X, E)$ is obtained where $E$ is the set of edges. The affinity matrix $K$ defined e.g.\ by $K_{ij} = \exp(-\sigma\|x_i - x_j \|^2)$ if there is an edge $e(i,j) \in E$ and $0$ for the rest and diagonal elements. The matrix is normalized as $L = D^{-1/2}KD^{-1/2}$ where $D_{ii} = \sum_{j=1}^m K_{ij}$, and \begin{equation} f(t+1) = \alpha Lf(t) + (1 - \alpha)y \end{equation} is iterated until convergence, where $\alpha$ is a parameter in $[0,1)$. The points are then ranked according to their final scores $f_{i}(t_f)$. \pkg{kernlab} includes an \proglang{S4} method implementing the ranking algorithm. The algorithm can be used both with an edge-graph where the structure of the data is taken into account, and without which is equivalent to ranking the data by their distance in the projected space. \begin{figure} \centering <>= data(spirals) ran <- spirals[rowSums(abs(spirals) < 0.55) == 2,] ranked <- ranking(ran, 54, kernel = "rbfdot", kpar = list(sigma = 100), edgegraph = TRUE) ranked[54, 2] <- max(ranked[-54, 2]) c<-1:86 op <- par(mfrow = c(1, 2),pty="s") plot(ran) plot(ran, cex=c[ranked[,3]]/40) @ \caption{The points on the left are ranked according to their similarity to the upper most left point. Points with a higher rank appear bigger. Instead of ranking the points on simple Euclidean distance the structure of the data is recognized and all points on the upper structure are given a higher rank although further away in distance than points in the lower structure.} \label{fig:Ranking} \end{figure} \subsection{Online learning with kernels} The \code{onlearn} function in \pkg{kernlab} implements the online kernel algorithms for classification, novelty detection and regression described in \citep{kernlab:Kivinen:2004}. In batch learning, it is typically assumed that all the examples are immediately available and are drawn independently from some distribution $P$. One natural measure of quality for some $f$ in that case is the expected risk \begin{equation} R[f,P] := E_{(x,y)~P}[l(f(x),y)] \end{equation} Since usually $P$ is unknown a standard approach is to instead minimize the empirical risk \begin{equation} R_{emp}[f,P] := \frac{1}{m}\sum_{t=1}^m l(f(x_t),y_t) \end{equation} Minimizing $R_{emp}[f]$ may lead to overfitting (complex functions that fit well on the training data but do not generalize to unseen data). One way to avoid this is to penalize complex functions by instead minimizing the regularized risk. \begin{equation} R_{reg}[f,S] := R_{reg,\lambda}[f,S] := R_{emp}[f] = \frac{\lambda}{2}\|f\|_{H}^2 \end{equation} where $\lambda > 0$ and $\|f\|_{H} = {\langle f,f \rangle}_{H}^{\frac{1}{2}}$ does indeed measure the complexity of $f$ in a sensible way. The constant $\lambda$ needs to be chosen appropriately for each problem. Since in online learning one is interested in dealing with one example at the time the definition of an instantaneous regularized risk on a single example is needed \begin{equation} R_inst[f,x,y] := R_{inst,\lambda}[f,x,y] := R_{reg,\lambda}[f,((x,y))] \end{equation} The implemented algorithms are classical stochastic gradient descent algorithms performing gradient descent on the instantaneous risk. The general form of the update rule is : \begin{equation} f_{t+1} = f_t - \eta \partial_f R_{inst,\lambda}[f,x_t,y_t]|_{f=f_t} \end{equation} where $f_i \in H$ and $\partial_f$< is short hand for $\partial \ \partial f$ (the gradient with respect to $f$) and $\eta_t > 0$ is the learning rate. Due to the learning taking place in a \textit{reproducing kernel Hilbert space} $H$ the kernel $k$ used has the property $\langle f,k(x,\cdotp)\rangle_H = f(x)$ and therefore \begin{equation} \partial_f l(f(x_t)),y_t) = l'(f(x_t),y_t)k(x_t,\cdotp) \end{equation} where $l'(z,y) := \partial_z l(z,y)$. Since $\partial_f\|f\|_H^2 = 2f$ the update becomes \begin{equation} f_{t+1} := (1 - \eta\lambda)f_t -\eta_t \lambda '( f_t(x_t),y_t)k(x_t,\cdotp) \end{equation} The \code{onlearn} function implements the online learning algorithm for regression, classification and novelty detection. The online nature of the algorithm requires a different approach to the use of the function. An object is used to store the state of the algorithm at each iteration $t$ this object is passed to the function as an argument and is returned at each iteration $t+1$ containing the model parameter state at this step. An empty object of class \code{onlearn} is initialized using the \code{inlearn} function. <>= ## create toy data set x <- rbind(matrix(rnorm(90),,2),matrix(rnorm(90)+3,,2)) y <- matrix(c(rep(1,45),rep(-1,45)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2),type="classification") ind <- sample(1:90,90) ## learn one data point at the time for(i in ind) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) @ \subsection{Spectral clustering} Spectral clustering \citep{kernlab:Ng:2001} is a recently emerged promising alternative to common clustering algorithms. In this method one uses the top eigenvectors of a matrix created by some similarity measure to cluster the data. Similarly to the ranking algorithm, an affinity matrix is created out from the data as \begin{equation} K_{ij}=\exp(-\sigma\|x_i - x_j \|^2) \end{equation} and normalized as $L = D^{-1/2}KD^{-1/2}$ where $D_{ii} = \sum_{j=1}^m K_{ij}$. Then the top $k$ eigenvectors (where $k$ is the number of clusters to be found) of the affinity matrix are used to form an $n \times k$ matrix $Y$ where each column is normalized again to unit length. Treating each row of this matrix as a data point, \code{kmeans} is finally used to cluster the points. \pkg{kernlab} includes an \proglang{S4} method called \code{specc} implementing this algorithm which can be used through an formula interface or a matrix interface. The \proglang{S4} object returned by the method extends the class ``vector'' and contains the assigned cluster for each point along with information on the centers size and within-cluster sum of squares for each cluster. In case a Gaussian RBF kernel is being used a model selection process can be used to determine the optimal value of the $\sigma$ hyper-parameter. For a good value of $\sigma$ the values of $Y$ tend to cluster tightly and it turns out that the within cluster sum of squares is a good indicator for the ``quality'' of the sigma parameter found. We then iterate through the sigma values to find an optimal value for $\sigma$. \begin{figure} \centering <>= data(spirals) sc <- specc(spirals, centers=2) plot(spirals, pch=(23 - 2*sc)) @ \caption{Clustering the two spirals data set with \code{specc}} \label{fig:Spectral Clustering} \end{figure} \subsection{Kernel principal components analysis} Principal component analysis (PCA) is a powerful technique for extracting structure from possibly high-dimensional datasets. PCA is an orthogonal transformation of the coordinate system in which we describe the data. The new coordinates by which we represent the data are called principal components. Kernel PCA \citep{kernlab:Schoelkopf:1998} performs a nonlinear transformation of the coordinate system by finding principal components which are nonlinearly related to the input variables. Given a set of centered observations $x_k$, $k=1,\dots,M$, $x_k \in \mathbf{R}^N$, PCA diagonalizes the covariance matrix $C = \frac{1}{M}\sum_{j=1}^Mx_jx_{j}^T$ by solving the eigenvalue problem $\lambda\mathbf{v}=C\mathbf{v}$. The same computation can be done in a dot product space $F$ which is related to the input space by a possibly nonlinear map $\Phi:\mathbf{R}^N \rightarrow F$, $x \mapsto \mathbf{X}$. Assuming that we deal with centered data and use the covariance matrix in $F$, \begin{equation} \hat{C}=\frac{1}{C}\sum_{j=1}^N \Phi(x_j)\Phi(x_j)^T \end{equation} the kernel principal components are then computed by taking the eigenvectors of the centered kernel matrix $K_{ij} = \langle \Phi(x_j),\Phi(x_j) \rangle$. \code{kpca}, the the function implementing KPCA in \pkg{kernlab}, can be used both with a formula and a matrix interface, and returns an \proglang{S4} object of class \code{kpca} containing the principal components the corresponding eigenvalues along with the projection of the training data on the new coordinate system. Furthermore, the \code{predict} function can be used to embed new data points into the new coordinate system. \begin{figure} \centering <>= data(spam) train <- sample(1:dim(spam)[1],400) kpc <- kpca(~.,data=spam[train,-58],kernel="rbfdot",kpar=list(sigma=0.001),features=2) kpcv <- pcv(kpc) plot(rotated(kpc),col=as.integer(spam[train,58]),xlab="1st Principal Component",ylab="2nd Principal Component") @ \caption{Projection of the spam data on two kernel principal components using an RBF kernel} \label{fig:KPCA} \end{figure} \subsection{Kernel feature analysis} Whilst KPCA leads to very good results there are nevertheless some issues to be addressed. First the computational complexity of the standard version of KPCA, the algorithm scales $O(m^3)$ and secondly the resulting feature extractors are given as a dense expansion in terms of the of the training patterns. Sparse solutions are often achieved in supervised learning settings by using an $l_1$ penalty on the expansion coefficients. An algorithm can be derived using the same approach in feature extraction requiring only $n$ basis functions to compute the first $n$ feature. Kernel feature analysis \citep{kernlab:Olvi:2000} is computationally simple and scales approximately one order of magnitude better on large data sets than standard KPCA. Choosing $\Omega [f] = \sum_{i=1}^m |\alpha_i |$ this yields \begin{equation} F_{LP} = \{ \mathbf{w} \vert \mathbf{w} = \sum_{i=1}^m \alpha_i \Phi(x_i) \mathrm{with} \sum_{i=1}^m |\alpha_i | \leq 1 \} \end{equation} This setting leads to the first ``principal vector'' in the $l_1$ context \begin{equation} \mathbf{\nu}^1 = \mathrm{argmax}_{\mathbf{\nu} \in F_{LP}} \frac{1}{m} \sum_{i=1}^m \langle \mathbf{\nu},\mathbf{\Phi}(x_i) - \frac{1}{m}\sum_{j=1}^m\mathbf{\Phi}(x_i) \rangle^2 \end{equation} Subsequent ``principal vectors'' can be defined by enforcing optimality with respect to the remaining orthogonal subspaces. Due to the $l_1$ constrain the solution has the favorable property of being sparse in terms of the coefficients $\alpha_i$. The function \code{kfa} in \pkg{kernlab} implements Kernel Feature Analysis by using a projection pursuit technique on a sample of the data. Results are then returned in an \proglang{S4} object. \begin{figure} \centering <>= data(promotergene) f <- kfa(~.,data=promotergene,features=2,kernel="rbfdot",kpar=list(sigma=0.013)) plot(predict(f,promotergene),col=as.numeric(promotergene[,1]),xlab="1st Feature",ylab="2nd Feature") @ \caption{Projection of the spam data on two features using an RBF kernel} \label{fig:KFA} \end{figure} \subsection{Kernel canonical correlation analysis} Canonical correlation analysis (CCA) is concerned with describing the linear relations between variables. If we have two data sets $x_1$ and $x_2$, then the classical CCA attempts to find linear combination of the variables which give the maximum correlation between the combinations. I.e., if \begin{eqnarray*} && y_1 = \mathbf{w_1}\mathbf{x_1} = \sum_j w_1 x_{1j} \\ && y_2 = \mathbf{w_2}\mathbf{x_2} = \sum_j w_2 x_{2j} \end{eqnarray*} one wishes to find those values of $\mathbf{w_1}$ and $\mathbf{w_2}$ which maximize the correlation between $y_1$ and $y_2$. Similar to the KPCA algorithm, CCA can be extended and used in a dot product space~$F$ which is related to the input space by a possibly nonlinear map $\Phi:\mathbf{R}^N \rightarrow F$, $x \mapsto \mathbf{X}$ as \begin{eqnarray*} && y_1 = \mathbf{w_1}\mathbf{\Phi(x_1)} = \sum_j w_1 \Phi(x_{1j}) \\ && y_2 = \mathbf{w_2}\mathbf{\Phi(x_2)} = \sum_j w_2 \Phi(x_{2j}) \end{eqnarray*} Following \citep{kernlab:kuss:2003}, the \pkg{kernlab} implementation of a KCCA projects the data vectors on a new coordinate system using KPCA and uses linear CCA to retrieve the correlation coefficients. The \code{kcca} method in \pkg{kernlab} returns an \proglang{S4} object containing the correlation coefficients for each data set and the corresponding correlation along with the kernel used. \subsection{Interior point code quadratic optimizer} In many kernel based algorithms, learning implies the minimization of some risk function. Typically we have to deal with quadratic or general convex problems for support vector machines of the type \begin{equation} \begin{array}{ll} \mathrm{minimize} & f(x) \\ \mbox{subject to~} & c_i(x) \leq 0 \mbox{~for all~} i \in [n]. \end{array} \end{equation} $f$ and $c_i$ are convex functions and $n \in \mathbf{N}$. \pkg{kernlab} provides the \proglang{S4} method \code{ipop} implementing an optimizer of the interior point family \citep{kernlab:Vanderbei:1999} which solves the quadratic programming problem \begin{equation} \begin{array}{ll} \mathrm{minimize} & c^\top x+\frac{1}{2}x^\top H x \\ \mbox{subject to~} & b \leq Ax \leq b + r\\ & l \leq x \leq u \\ \end{array} \end{equation} This optimizer can be used in regression, classification, and novelty detection in SVMs. \subsection{Incomplete cholesky decomposition} When dealing with kernel based algorithms, calculating a full kernel matrix should be avoided since it is already a $O(N^2)$ operation. Fortunately, the fact that kernel matrices are positive semidefinite is a strong constraint and good approximations can be found with small computational cost. The Cholesky decomposition factorizes a positive semidefinite $N \times N$ matrix $K$ as $K=ZZ^T$, where $Z$ is an upper triangular $N \times N$ matrix. Exploiting the fact that kernel matrices are usually of low rank, an \emph{incomplete Cholesky decomposition} \citep{kernlab:Wright:1999} finds a matrix $\tilde{Z}$ of size $N \times M$ where $M\ll N$ such that the norm of $K-\tilde{Z}\tilde{Z}^T$ is smaller than a given tolerance $\theta$. The main difference of incomplete Cholesky decomposition to the standard Cholesky decomposition is that pivots which are below a certain threshold are simply skipped. If $L$ is the number of skipped pivots, we obtain a $\tilde{Z}$ with only $M = N - L$ columns. The algorithm works by picking a column from $K$ to be added by maximizing a lower bound on the reduction of the error of the approximation. \pkg{kernlab} has an implementation of an incomplete Cholesky factorization called \code{inc.chol} which computes the decomposed matrix $\tilde{Z}$ from the original data for any given kernel without the need to compute a full kernel matrix beforehand. This has the advantage that no full kernel matrix has to be stored in memory. \section{Conclusions} In this paper we described \pkg{kernlab}, a flexible and extensible kernel methods package for \proglang{R} with existing modern kernel algorithms along with tools for constructing new kernel based algorithms. It provides a unified framework for using and creating kernel-based algorithms in \proglang{R} while using all of \proglang{R}'s modern facilities, like \proglang{S4} classes and namespaces. Our aim for the future is to extend the package and add more kernel-based methods as well as kernel relevant tools. Sources and binaries for the latest version of \pkg{kernlab} are available at CRAN\footnote{\url{http://CRAN.R-project.org}} under the GNU Public License. A shorter version of this introduction to the \proglang{R} package \pkg{kernlab} is published as \cite{kernlab:Karatzoglou+Smola+Hornik:2004} in the \emph{Journal of Statistical Software}. \bibliography{jss} \end{document} kernlab/MD50000644000176000001440000001547012651724611012244 0ustar ripleyusers29f9888009b3ff4e1a2f13eda6e8da18 *DESCRIPTION 4bf5b70ad948b31056e6fc5102ddd995 *NAMESPACE 7db9a58cb6e5aeae749727781fe388f5 *R/aobjects.R 0750c9216dfd490ac36814b8b1ae24f2 *R/couplers.R f8e0ac1a792745090fa9a8da65847804 *R/csi.R 0b2e246b1dd17906f717c8d7be919b98 *R/gausspr.R ab289bc31386f29fa9b2bc9a667504f4 *R/inchol.R bfa34b64d293a380c5c4d045105d4496 *R/ipop.R 5f574afe5df7904fb80bb214f01fcc6c *R/kcca.R 67aed700531a0ce066bb9300e7f0169c *R/kernelmatrix.R 10804a9fc1281e6af5ccfe98fcb786c2 *R/kernels.R a9caef19bea47f18788e88c931dc59af *R/kfa.R faa1891fddacccb076b960679b8fcf1a *R/kha.R fe614c20ff89de892f69a7fe9d5e3034 *R/kkmeans.R 78cd6c834753a4f6c9f2ce570df37aaa *R/kmmd.R b40405bcd225f13b79eb50a697453ea6 *R/kpca.R 3112bf34f131e0ae6ec4cd5703165d00 *R/kqr.R 1ba7decd1986133691d09ac2c38ffc55 *R/ksvm.R 74b56a01010520686b7a3056d6543101 *R/lssvm.R 9a6305a7f6f48b3d5b9897aee24c7a88 *R/onlearn.R fca7e1cdba31a9fe3f89e74c2d5ced3e *R/ranking.R 21f675dd26b29b6c41f986b9f21991c9 *R/rvm.R 924ca5f4426387e9bf558900e0f45b49 *R/sigest.R 3e247cd92d09d7ebcc50e4535b1664bc *R/specc.R a10554ee3096c655c2c900fd09330b9e *build/vignette.rds a18216a60f6bca0caeb1e7f3bc1e41f4 *data/income.rda df34952c3745281389f9cad12628f730 *data/musk.rda 05d32f54c207cbe6ad311670369a9de5 *data/promotergene.rda 8c75e60ab50929294ee51e2c691257d6 *data/reuters.rda 6227d190de9433751c259621b3ad17c4 *data/spam.rda f1286488185c385585374789bacddfce *data/spirals.rda 323669a1754224f2c62e216753570081 *data/ticdata.rda eb46ae31648115dd5dead970966f9cbf *inst/CITATION 68fe0d0d842fbc1b217f45934a8edf7a *inst/COPYRIGHTS 0d1b1a09dbb52e3b0e58676170c3ce3d *inst/doc/kernlab.R c4c223d07206b59e2d43a585d07164b1 *inst/doc/kernlab.Rnw d19f6c0cb3885e4fd5c2bcd482f1b42a *inst/doc/kernlab.pdf ca7923a78d389602d891a3cf6a5193d9 *man/as.kernelMatrix.Rd c0c282d5b6dd984608a1d5b9c92fe478 *man/couple.Rd e36dc0b16ba570c99ead7a48394dc66d *man/csi-class.Rd f87d54c4c4bf47f760cc6a779c7e525d *man/csi.Rd 704bfeedf89329461a20e4cb51a237f0 *man/dots.Rd 285c27b5d9a389dfd7e2f8e392de215c *man/gausspr-class.Rd fd9fe426e55ff79ffa5aabe84abd229c *man/gausspr.Rd b61d371ba2f8d8b137ec3c32a115c3ab *man/inchol-class.Rd f91fdd7d2e3c9aec28d31575d2ba0a6e *man/inchol.Rd 452553ee15225244a50b73aa08cca861 *man/income.Rd 9599ae27d6ebe41302c6236aa381b313 *man/inlearn.Rd bbcfe86bcb66e4b222b9ba13869fa2b0 *man/ipop-class.Rd c2e71c62027e5534eaf1f4c2dbcf0a6a *man/ipop.Rd 62c2b5318bb86222cb8d9cd361998d36 *man/kcca-class.Rd fb5a84011ee5c0fd03287b957379aab7 *man/kcca.Rd ef26a19723ffb7f6eb6dd3539905d6c4 *man/kernel-class.Rd 7357130456764a2b77cbf39d05d8dc98 *man/kernelMatrix.Rd 7a1e2bc5f883b6e7339bd717f0569eaf *man/kfa-class.Rd 22c7587c02310941aa5c484a3551ff70 *man/kfa.Rd 54afaeff97629d4a1353cdd98b5dde37 *man/kha-class.Rd 630bbe5b92f49a6eb501ddd0776fae3b *man/kha.Rd 730086452f568aacc5bea56bb514e2ff *man/kkmeans.Rd c3458139340043b2d63e9a642386582e *man/kmmd-class.Rd 6246385dba8697c83028cbece148c203 *man/kmmd.Rd b39a018897562f1cf907c7d0920186ce *man/kpca-class.Rd ba3a5bde31ea982871c7690edc588b23 *man/kpca.Rd 5a3b2344811fded04018d0b56d9bca23 *man/kqr-class.Rd 1ef59facd1ed13402b663beb16f6593a *man/kqr.Rd 3bdce4dc10887da4bacdac6830e66db8 *man/ksvm-class.Rd f98da25e651db60717100721a7a6f7cc *man/ksvm.Rd dd6a605572b276158f753cf3e3dce63e *man/lssvm-class.Rd bab982b9b6cdbdfa1d9c50cacd72408d *man/lssvm.Rd 95f670451348298d1c5daa00498f9f65 *man/musk.Rd 6d1c014b9f6bb8b59d032fd444bf5a04 *man/onlearn-class.Rd e14a6bd165c9595d1b014bd983d810b5 *man/onlearn.Rd 75f80214439e10c8d1b0104f5bcb44ba *man/plot.Rd f67747838e34ee3400ad4ffe299eba71 *man/prc-class.Rd fb4f0a2a30d3ec62e66a125f64d7f018 *man/predict.gausspr.Rd 69e21e71600ccf8a8df4a1adb84213fe *man/predict.kqr.Rd a92aae4f4aa90adbfc6d9f698426e55c *man/predict.ksvm.Rd 17510c748e43b26899603fff435572fb *man/promotergene.Rd f3a2c50017ea501680b53c9e221bf6b5 *man/ranking-class.Rd 0a26fab5b4dc78f254b408e396aba191 *man/ranking.Rd 8bee0b6c367f1c5f749b296ff48dcc23 *man/reuters.Rd 2b1f6b6093d9d0a915995b59caf1561d *man/rvm-class.Rd f406be43ad5c7a6d4e2b90c46e42d2a6 *man/rvm.Rd 86c5fd418857bae9a5c736e8c57a5c5e *man/sigest.Rd 38c1b0a597898ffd36fd635af5df2d32 *man/spam.Rd b176c7c0f1edb61818e9ecfde276f349 *man/specc-class.Rd 7c1efb159e6b590600d84151e848aca6 *man/specc.Rd c707c7af1229bdfca87272866bb3199a *man/spirals.Rd 149b3590c24913c3718c9f1d6c265b9a *man/stringdot.Rd 5a3d623ac56f129716429ba87481eaeb *man/ticdata.Rd fa4feb7dd29492877886e4d86d0cb8f4 *man/vm-class.Rd 2a6f9e9e044a78154d3cfda5936d6f48 *src/Makevars 2a6f9e9e044a78154d3cfda5936d6f48 *src/Makevars.win 3b77d80677bb88fb39cab4a7d2351056 *src/brweight.cpp 048d635dbf0db99a0b707bf0a9c06984 *src/brweight.h 50cd06527f816675b128669d222bee56 *src/ctable.cpp cb1e056dfcc52d5319e71981f9c90611 *src/ctable.h 342cbb0568a2fa8f27b1f0c42542737e *src/cweight.cpp 0ede046d861731d10f965e2ff8f50e4e *src/cweight.h 5c02223129df9d548c614acd0593645d *src/datatype.h f085fe8cca3cb634567600216eb4aad2 *src/dbreakpt.c b08bdfd188f69c9ab839895556789d64 *src/dcauchy.c 455ccdeed46ccda0958453306fe9a951 *src/dgpnrm.c c9ae627ea63dec6d72867c2026121648 *src/dgpstep.c 821081c5c42e2a20237abcced03a3a6f *src/dprecond.c 165209b9e9410785dcee940d35d53c05 *src/dprsrch.c 33b02078ecd469dfda0aeb1e5ba98cb2 *src/dspcg.c e13d4f68dd0e3b613f40066c47387233 *src/dtron.c f3c6c30f24ade3e5aa146d0f0a6b11f5 *src/dtrpcg.c 616fbd8165eddace388ffc7ffd90c753 *src/dtrqsol.c beb2c099ff3dd87e3474a30a49a8437e *src/errorcode.h 403e60e8ef01e6fcd8820a78287b8c4e *src/esa.cpp ab96f4b2f43cc0306c88547ab6abe1ad *src/esa.h 5a7166f36e34cc037b9c2006f8bc00c9 *src/expdecayweight.cpp 7f04e95fcd76ee21dcea4d7138d96326 *src/expdecayweight.h d16372bf79ce22a92dfcf3c0d0b769e7 *src/ilcpfactory.h f103b80f529451ab71a425a31ed1eabf *src/inductionsort.cpp fd4a5ad4b79ca119885410bb45c7d12f *src/inductionsort.h 76adf49038c3585cf216cd033a9b4183 *src/introsort.h 0073f847ac8606d19e03cb0eeb27e0a2 *src/isafactory.h 94245de3f9b29eee07fd1f7d8d8929cd *src/iweightfactory.h d2d7af10799002c2392f038e7d767c3f *src/kspectrumweight.cpp b5d07bb286e3767cda7a371c50d0122e *src/kspectrumweight.h 81884b6e3b3e02f26e75974febbdaa2d *src/lcp.cpp 6de81523902a1d4dce2b38ce3d57ce98 *src/lcp.h f47f3118ea197009f6f0e12edeb5fc17 *src/misc.c 6c508e5aad78e137392391c0d5e2fa4d *src/msufsort.cpp a7c14e599149c35dd243a8410cb2f9c0 *src/msufsort.h 36b8004ade5fe1c5c2edb01cf74ce5cd *src/solvebqp.c 823808c44b18f59c9eef3ad4f1f41930 *src/stack.h 079a2f29ea98ab6f5ca4e814bb2917ba *src/stringk.c a826262fdc41d078229ec858f1fcab1a *src/stringkernel.cpp 393bf9882322163e203b2a03789e7b05 *src/stringkernel.h ae74f6ea199b5d5b9b4b045afac5fa40 *src/svm.cpp 670301bb88ff2b0f28ece190a96635c7 *src/svm.h 5f5910aab31dc2ebacb4b15caba8e873 *src/wkasailcp.cpp fd6807b3526c7d5442f66a2660bd9e4c *src/wkasailcp.h f48a5df5ecbf1ac1831e5582798eb57d *src/wmsufsort.cpp 2694af88ced7e4391e92120d0c90587c *src/wmsufsort.h a324922cf3b84ae82f364be31135168f *vignettes/A.cls 0bb2f41f77a58dd866a86cd0b164b3c6 *vignettes/jss.bib c4c223d07206b59e2d43a585d07164b1 *vignettes/kernlab.Rnw kernlab/build/0000755000176000001440000000000012560430717013024 5ustar ripleyuserskernlab/build/vignette.rds0000644000176000001440000000045412560430717015366 0ustar ripleyusersuPN0t$T!¥?TR!Um\dZI8DrCaihwv^BBK'NK8 i*$h (2NwuԂP-mL sC w/8XVȓeMޫ-{z_7U%+R q໷ Ŵ"PgP <a\4B9VҼ]s_nrP> _L?vXN?q1';: mD;VNN~ekernlab/DESCRIPTION0000644000176000001440000000174212651724610013436 0ustar ripleyusersPackage: kernlab Version: 0.9-23 Title: Kernel-Based Machine Learning Lab Authors@R: c(person("Alexandros", "Karatzoglou", role = c("aut", "cre"), email = "alexis@ci.tuwien.ac.at"), person("Alex", "Smola", role = "aut"), person("Kurt", "Hornik", role = "aut")) Description: Kernel-based machine learning methods for classification, regression, clustering, novelty detection, quantile regression and dimensionality reduction. Among other methods 'kernlab' includes Support Vector Machines, Spectral Clustering, Kernel PCA, Gaussian Processes and a QP solver. Depends: R (>= 2.10) Imports: methods, stats, grDevices, graphics LazyLoad: Yes License: GPL-2 NeedsCompilation: yes Packaged: 2016-01-26 16:42:01 UTC; ripley Author: Alexandros Karatzoglou [aut, cre], Alex Smola [aut], Kurt Hornik [aut] Maintainer: Alexandros Karatzoglou Repository: CRAN Date/Publication: 2016-01-26 18:14:48 kernlab/man/0000755000176000001440000000000012560414652012500 5ustar ripleyuserskernlab/man/rvm-class.Rd0000644000176000001440000001100211304023134014652 0ustar ripleyusers\name{rvm-class} \docType{class} \alias{rvm-class} \alias{RVindex} \alias{mlike} \alias{nvar} \alias{RVindex,rvm-method} \alias{alpha,rvm-method} \alias{cross,rvm-method} \alias{error,rvm-method} \alias{kcall,rvm-method} \alias{kernelf,rvm-method} \alias{kpar,rvm-method} \alias{lev,rvm-method} \alias{mlike,rvm-method} \alias{nvar,rvm-method} \alias{type,rvm-method} \alias{xmatrix,rvm-method} \alias{ymatrix,rvm-method} \title{Class "rvm"} \description{Relevance Vector Machine Class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("rvm", ...)}. or by calling the \code{rvm} function. } \section{Slots}{ \describe{ \item{\code{tol}:}{Object of class \code{"numeric"} contains tolerance of termination criteria used.} \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used } \item{\code{kpar}:}{Object of class \code{"list"} contains the hyperparameter used} \item{\code{kcall}:}{Object of class \code{"call"} contains the function call} \item{\code{type}:}{Object of class \code{"character"} contains type of problem} \item{\code{terms}:}{Object of class \code{"ANY"} containing the terms representation of the symbolic model used (when using a formula interface)} \item{\code{xmatrix}:}{Object of class \code{"matrix"} contains the data matrix used during computation} \item{\code{ymatrix}:}{Object of class \code{"output"} contains the response matrix} \item{\code{fitted}:}{Object of class \code{"output"} with the fitted values, (predict on training set).} \item{\code{lev}:}{Object of class \code{"vector"} contains the levels of the response (in classification)} \item{\code{nclass}:}{Object of class \code{"numeric"} contains the number of classes (in classification)} \item{\code{alpha}:}{Object of class \code{"listI"} containing the the resulting alpha vector} \item{\code{coef}:}{Object of class \code{"ANY"} containing the the resulting model parameters} \item{\code{nvar}:}{Object of class \code{"numeric"} containing the calculated variance (in case of regression)} \item{\code{mlike}:}{Object of class \code{"numeric"} containing the computed maximum likelihood} \item{\code{RVindex}:}{Object of class \code{"vector"} containing the indexes of the resulting relevance vectors } \item{\code{nRV}:}{Object of class \code{"numeric"} containing the number of relevance vectors} \item{\code{cross}:}{Object of class \code{"numeric"} containing the resulting cross validation error } \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed on NA} } } \section{Methods}{ \describe{ \item{RVindex}{\code{signature(object = "rvm")}: returns the index of the relevance vectors } \item{alpha}{\code{signature(object = "rvm")}: returns the resulting alpha vector} \item{cross}{\code{signature(object = "rvm")}: returns the resulting cross validation error} \item{error}{\code{signature(object = "rvm")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values } \item{kcall}{\code{signature(object = "rvm")}: returns the function call } \item{kernelf}{\code{signature(object = "rvm")}: returns the used kernel function } \item{kpar}{\code{signature(object = "rvm")}: returns the parameters of the kernel function} \item{lev}{\code{signature(object = "rvm")}: returns the levels of the response (in classification)} \item{mlike}{\code{signature(object = "rvm")}: returns the estimated maximum likelihood} \item{nvar}{\code{signature(object = "rvm")}: returns the calculated variance (in regression)} \item{type}{\code{signature(object = "rvm")}: returns the type of problem} \item{xmatrix}{\code{signature(object = "rvm")}: returns the data matrix used during computation} \item{ymatrix}{\code{signature(object = "rvm")}: returns the used response } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{rvm}}, \code{\link{ksvm-class}} } \examples{ # create data x <- seq(-20,20,0.1) y <- sin(x)/x + rnorm(401,sd=0.05) # train relevance vector machine foo <- rvm(x, y) foo alpha(foo) RVindex(foo) fitted(foo) kernelf(foo) nvar(foo) ## show slots slotNames(foo) } \keyword{classes} kernlab/man/onlearn.Rd0000644000176000001440000000467612560414652014442 0ustar ripleyusers\name{onlearn} \alias{onlearn} \alias{onlearn,onlearn-method} \title{Kernel Online Learning algorithms} \description{ Online Kernel-based Learning algorithms for classification, novelty detection, and regression. } \usage{ \S4method{onlearn}{onlearn}(obj, x, y = NULL, nu = 0.2, lambda = 1e-04) } \arguments{ \item{obj}{\code{obj} an object of class \code{onlearn} created by the initialization function \code{inlearn} containing the kernel to be used during learning and the parameters of the learned model} \item{x}{vector or matrix containing the data. Factors have to be numerically coded. If \code{x} is a matrix the code is run internally one sample at the time.} \item{y}{the class label in case of classification. Only binary classification is supported and class labels have to be -1 or +1. } \item{nu}{the parameter similarly to the \code{nu} parameter in SVM bounds the training error.} \item{lambda}{the learning rate} } \details{ The online algorithms are based on a simple stochastic gradient descent method in feature space. The state of the algorithm is stored in an object of class \code{onlearn} and has to be passed to the function at each iteration. } \value{ The function returns an \code{S4} object of class \code{onlearn} containing the model parameters and the last fitted value which can be retrieved by the accessor method \code{fit}. The value returned in the classification and novelty detection problem is the decision function value phi. The accessor methods \code{alpha} returns the model parameters. } \references{ Kivinen J. Smola A.J. Williamson R.C. \cr \emph{Online Learning with Kernels}\cr IEEE Transactions on Signal Processing vol. 52, Issue 8, 2004\cr \url{http://users.cecs.anu.edu.au/~williams/papers/P172.pdf}} \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{inlearn}}} \examples{ ## create toy data set x <- rbind(matrix(rnorm(100),,2),matrix(rnorm(100)+3,,2)) y <- matrix(c(rep(1,50),rep(-1,50)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2), type="classification") ind <- sample(1:100,100) ## learn one data point at the time for(i in ind) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) ## or learn all the data on <- onlearn(on,x[ind,],y[ind],nu=0.03,lambda=0.1) sign(predict(on,x)) } \keyword{classif} \keyword{neural} \keyword{regression} \keyword{ts} kernlab/man/kqr.Rd0000644000176000001440000002055212117365752013574 0ustar ripleyusers\name{kqr} \alias{kqr} \alias{kqr,formula-method} \alias{kqr,vector-method} \alias{kqr,matrix-method} \alias{kqr,list-method} \alias{kqr,kernelMatrix-method} \alias{coef,kqr-method} \alias{show,kqr-method} \title{Kernel Quantile Regression.} \description{The Kernel Quantile Regression algorithm \code{kqr} performs non-parametric Quantile Regression.} \usage{ \S4method{kqr}{formula}(x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE) \S4method{kqr}{vector}(x,...) \S4method{kqr}{matrix}(x, y, scaled = TRUE, tau = 0.5, C = 0.1, kernel = "rbfdot", kpar = "automatic", reduced = FALSE, rank = dim(x)[1]/6, fit = TRUE, cross = 0, na.action = na.omit) \S4method{kqr}{kernelMatrix}(x, y, tau = 0.5, C = 0.1, fit = TRUE, cross = 0) \S4method{kqr}{list}(x, y, tau = 0.5, C = 0.1, kernel = "strigdot", kpar= list(length=4, C=0.5), fit = TRUE, cross = 0) } \arguments{ \item{x}{e data or a symbolic description of the model to be fit. When not using a formula x can be a matrix or vector containing the training data or a kernel matrix of class \code{kernelMatrix} of the training data or a list of character vectors (for use with the string kernel). Note, that the intercept is always excluded, whether given in the formula or not.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{kqr} is called from.} \item{y}{a numeric vector or a column matrix containing the response.} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally (both \code{x} and \code{y} variables) to zero mean and unit variance. The center and scale values are returned and used for later predictions. (default: TRUE)} \item{tau}{the quantile to be estimated, this is generally a number strictly between 0 and 1. For 0.5 the median is calculated. (default: 0.5)} \item{C}{the cost regularization parameter. This parameter controls the smoothness of the fitted function, essentially higher values for C lead to less smooth functions.(default: 1)} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. \code{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{lenght, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the \code{kpar} parameter as well. In the case of a Radial Basis kernel function (Gaussian) kpar can also be set to the string "automatic" which uses the heuristics in 'sigest' to calculate a good 'sigma' value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic"). } \item{reduced}{use an incomplete cholesky decomposition to calculate a decomposed form \eqn{Z} of the kernel Matrix \eqn{K} (where \eqn{K = ZZ'}) and perform the calculations with \eqn{Z}. This might be useful when using \code{kqr} with large datasets since normally an n times n kernel matrix would be computed. Setting \code{reduced} to \code{TRUE} makes use of \code{csi} to compute a decomposed form instead and thus only a \eqn{n \times m} matrix where \eqn{m < n} and \eqn{n} the sample size is stored in memory (default: FALSE)} \item{rank}{the rank m of the decomposed matrix calculated when using an incomplete cholesky decomposition. This parameter is only taken into account when \code{reduced} is \code{TRUE}(default : dim(x)[1]/6)} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: 'TRUE')} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the Pinball loss and the for quantile regression} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{additional parameters.} } \details{In quantile regression a function is fitted to the data so that it satisfies the property that a portion \eqn{tau} of the data \eqn{y|n} is below the estimate. While the error bars of many regression problems can be viewed as such estimates quantile regression estimates this quantity directly. Kernel quantile regression is similar to nu-Support Vector Regression in that it minimizes a regularized loss function in RKHS. The difference between nu-SVR and kernel quantile regression is in the type of loss function used which in the case of quantile regression is the pinball loss (see reference for details.). Minimizing the regularized loss boils down to a quadratic problem which is solved using an interior point QP solver \code{ipop} implemented in \code{kernlab}. } \value{ An S4 object of class \code{kqr} containing the fitted model along with information.Accessor functions can be used to access the slots of the object which include : \item{alpha}{The resulting model parameters which can be also accessed by \code{coef}.} \item{kernelf}{the kernel function used.} \item{error}{Training error (if fit == TRUE)} see \code{kqr-class} for more details. } \references{Ichiro Takeuchi, Quoc V. Le, Timothy D. Sears, Alexander J. Smola\cr \emph{Nonparametric Quantile Estimation}\cr Journal of Machine Learning Research 7,2006,1231-1264 \cr \url{http://www.jmlr.org/papers/volume7/takeuchi06a/takeuchi06a.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{predict.kqr}}, \code{\link{kqr-class}}, \code{\link{ipop}}, \code{\link{rvm}}, \code{\link{ksvm}}} \examples{ # create data x <- sort(runif(300)) y <- sin(pi*x) + rnorm(300,0,sd=exp(sin(2*pi*x))) # first calculate the median qrm <- kqr(x, y, tau = 0.5, C=0.15) # predict and plot plot(x, y) ytest <- predict(qrm, x) lines(x, ytest, col="blue") # calculate 0.9 quantile qrm <- kqr(x, y, tau = 0.9, kernel = "rbfdot", kpar= list(sigma=10), C=0.15) ytest <- predict(qrm, x) lines(x, ytest, col="red") # calculate 0.1 quantile qrm <- kqr(x, y, tau = 0.1,C=0.15) ytest <- predict(qrm, x) lines(x, ytest, col="green") # print first 10 model coefficients coef(qrm)[1:10] } \keyword{regression} \keyword{nonlinear} \keyword{methods} kernlab/man/couple.Rd0000644000176000001440000000363211304023134014244 0ustar ripleyusers\name{couple} \alias{couple} \title{Probabilities Coupling function} \description{ \code{couple} is used to link class-probability estimates produced by pairwise coupling in multi-class classification problems. } \usage{ couple(probin, coupler = "minpair") } \arguments{ \item{probin}{ The pairwise coupled class-probability estimates} \item{coupler}{The type of coupler to use. Currently \code{minpar} and \code{pkpd} and \code{vote} are supported (see reference for more details). If \code{vote} is selected the returned value is a primitive estimate passed on given votes.} } \details{ As binary classification problems are much easier to solve many techniques exist to decompose multi-class classification problems into many binary classification problems (voting, error codes, etc.). Pairwise coupling (one against one) constructs a rule for discriminating between every pair of classes and then selecting the class with the most winning two-class decisions. By using Platt's probabilities output for SVM one can get a class probability for each of the \eqn{k(k-1)/2} models created in the pairwise classification. The couple method implements various techniques to combine these probabilities. } \value{ A matrix with the resulting probability estimates. } \references{ Ting-Fan Wu, Chih-Jen Lin, ruby C. Weng\cr \emph{Probability Estimates for Multi-class Classification by Pairwise Coupling}\cr Neural Information Processing Symposium 2003 \cr \url{http://books.nips.cc/papers/files/nips16/NIPS2003_0538.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{ \code{\link{predict.ksvm}}, \code{\link{ksvm}}} \examples{ ## create artificial pairwise probabilities pairs <- matrix(c(0.82,0.12,0.76,0.1,0.9,0.05),2) couple(pairs) couple(pairs, coupler="pkpd") couple(pairs, coupler ="vote") } \keyword{classif} kernlab/man/kpca.Rd0000644000176000001440000001207412560414652013711 0ustar ripleyusers\name{kpca} \alias{kpca} \alias{kpca,formula-method} \alias{kpca,matrix-method} \alias{kpca,kernelMatrix-method} \alias{kpca,list-method} \alias{predict,kpca-method} \title{Kernel Principal Components Analysis} \description{ Kernel Principal Components Analysis is a nonlinear form of principal component analysis.} \usage{ \S4method{kpca}{formula}(x, data = NULL, na.action, ...) \S4method{kpca}{matrix}(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 0, th = 1e-4, na.action = na.omit, ...) \S4method{kpca}{kernelMatrix}(x, features = 0, th = 1e-4, ...) \S4method{kpca}{list}(x, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), features = 0, th = 1e-4, na.action = na.omit, ...) } \arguments{ \item{x}{the data matrix indexed by row or a formula describing the model, or a kernel Matrix of class \code{kernelMatrix}, or a list of character vectors} \item{data}{an optional data frame containing the variables in the model (when using a formula).} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{features}{Number of features (principal components) to return. (default: 0 , all)} \item{th}{the value of the eigenvalue under which principal components are ignored (only valid when features = 0). (default : 0.0001) } \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{Using kernel functions one can efficiently compute principal components in high-dimensional feature spaces, related to input space by some non-linear map.\cr The data can be passed to the \code{kpca} function in a \code{matrix} or a \code{data.frame}, in addition \code{kpca} also supports input in the form of a kernel matrix of class \code{kernelMatrix} or as a list of character vectors where a string kernel has to be used. } \value{ An S4 object containing the principal component vectors along with the corresponding eigenvalues. \item{pcv}{a matrix containing the principal component vectors (column wise)} \item{eig}{The corresponding eigenvalues} \item{rotated}{The original data projected (rotated) on the principal components} \item{xmatrix}{The original data matrix} all the slots of the object can be accessed by accessor functions. } \note{The predict function can be used to embed new data on the new space} \references{ Schoelkopf B., A. Smola, K.-R. Mueller :\cr \emph{Nonlinear component analysis as a kernel eigenvalue problem}\cr Neural Computation 10, 1299-1319\cr \url{http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.29.1366} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{kcca}}, \code{pca}} \examples{ # another example using the iris data(iris) test <- sample(1:150,20) kpc <- kpca(~.,data=iris[-test,-5],kernel="rbfdot", kpar=list(sigma=0.2),features=2) #print the principal component vectors pcv(kpc) #plot the data projection on the components plot(rotated(kpc),col=as.integer(iris[-test,5]), xlab="1st Principal Component",ylab="2nd Principal Component") #embed remaining points emb <- predict(kpc,iris[test,-5]) points(emb,col=as.integer(iris[test,5])) } \keyword{cluster} kernlab/man/spirals.Rd0000644000176000001440000000054311304023134014430 0ustar ripleyusers\name{spirals} \alias{spirals} \title{Spirals Dataset} \description{A toy data set representing two spirals with Gaussian noise. The data was created with the \code{mlbench.spirals} function in \code{mlbench}. } \usage{data(spirals)} \format{ A matrix with 300 observations and 2 variables. } \examples{ data(spirals) plot(spirals) } \keyword{datasets} kernlab/man/predict.gausspr.Rd0000644000176000001440000000416612117365151016110 0ustar ripleyusers\name{predict.gausspr} \alias{predict.gausspr} \alias{predict,gausspr-method} \title{predict method for Gaussian Processes object} \description{Prediction of test data using Gaussian Processes} \usage{ \S4method{predict}{gausspr}(object, newdata, type = "response", coupler = "minpair") } \arguments{ \item{object}{an S4 object of class \code{gausspr} created by the \code{gausspr} function} \item{newdata}{a data frame or matrix containing new data} \item{type}{one of \code{response}, \code{probabilities} indicating the type of output: predicted values or matrix of class probabilities} \item{coupler}{Coupling method used in the multiclass case, can be one of \code{minpair} or \code{pkpd} (see reference for more details).} } \value{ \item{response}{predicted classes (the classes with majority vote) or the response value in regression.} \item{probabilities}{matrix of class probabilities (one column for each class and one row for each input).} } \references{ \itemize{ \item C. K. I. Williams and D. Barber \cr Bayesian classification with Gaussian processes. \cr IEEE Transactions on Pattern Analysis and Machine Intelligence, 20(12):1342-1351, 1998\cr \url{http://www.dai.ed.ac.uk/homes/ckiw/postscript/pami_final.ps.gz} \item T.F. Wu, C.J. Lin, R.C. Weng. \cr \emph{Probability estimates for Multi-class Classification by Pairwise Coupling}\cr \url{http://www.csie.ntu.edu.tw/~cjlin/papers/svmprob/svmprob.pdf} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \keyword{methods} \keyword{regression} \keyword{classif} \examples{ ## example using the promotergene data set data(promotergene) ## create test and training set ind <- sample(1:dim(promotergene)[1],20) genetrain <- promotergene[-ind, ] genetest <- promotergene[ind, ] ## train a support vector machine gene <- gausspr(Class~.,data=genetrain,kernel="rbfdot", kpar=list(sigma=0.015)) gene ## predict gene type probabilities on the test set genetype <- predict(gene,genetest,type="probabilities") genetype } kernlab/man/kqr-class.Rd0000644000176000001440000001051412117363316014666 0ustar ripleyusers\name{kqr-class} \docType{class} \alias{kqr-class} \alias{alpha,kqr-method} \alias{cross,kqr-method} \alias{error,kqr-method} \alias{kcall,kqr-method} \alias{kernelf,kqr-method} \alias{kpar,kqr-method} \alias{param,kqr-method} \alias{alphaindex,kqr-method} \alias{b,kqr-method} \alias{xmatrix,kqr-method} \alias{ymatrix,kqr-method} \alias{scaling,kqr-method} \title{Class "kqr"} \description{The Kernel Quantile Regression object class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("kqr", ...)}. or by calling the \code{kqr} function } \section{Slots}{ \describe{ \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} contains the kernel parameter used } \item{\code{coef}:}{Object of class \code{"ANY"} containing the model parameters} \item{\code{param}:}{Object of class \code{"list"} contains the cost parameter C and tau parameter used } \item{\code{kcall}:}{Object of class \code{"list"} contains the used function call } \item{\code{terms}:}{Object of class \code{"ANY"} contains the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"input"} containing the data matrix used } \item{\code{ymatrix}:}{Object of class \code{"output"} containing the response matrix} \item{\code{fitted}:}{Object of class \code{"output"} containing the fitted values } \item{\code{alpha}:}{Object of class \code{"listI"} containing the computes alpha values } \item{\code{b}:}{Object of class \code{"numeric"} containing the offset of the model.} \item{\code{scaling}}{Object of class \code{"ANY"} containing the scaling coefficients of the data (when case \code{scaled = TRUE} is used).} \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{cross}:}{Object of class \code{"numeric"} containing the cross validation error} \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed in NA } \item{\code{nclass}:}{Inherited from class \code{vm}, not used in kqr} \item{\code{lev}:}{Inherited from class \code{vm}, not used in kqr} \item{\code{type}:}{Inherited from class \code{vm}, not used in kqr} } } \section{Methods}{ \describe{ \item{coef}{\code{signature(object = "kqr")}: returns the coefficients (alpha) of the model} \item{alpha}{\code{signature(object = "kqr")}: returns the alpha vector (identical to \code{coef})} \item{b}{\code{signature(object = "kqr")}: returns the offset beta of the model.} \item{cross}{\code{signature(object = "kqr")}: returns the cross validation error } \item{error}{\code{signature(object = "kqr")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values } \item{kcall}{\code{signature(object = "kqr")}: returns the call performed} \item{kernelf}{\code{signature(object = "kqr")}: returns the kernel function used} \item{kpar}{\code{signature(object = "kqr")}: returns the kernel parameter used} \item{param}{\code{signature(object = "kqr")}: returns the cost regularization parameter C and tau used} \item{xmatrix}{\code{signature(object = "kqr")}: returns the data matrix used} \item{ymatrix}{\code{signature(object = "kqr")}: returns the response matrix used} \item{scaling}{\code{signature(object = "kqr")}: returns the scaling coefficients of the data (when \code{scaled = TRUE} is used)} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kqr}}, \code{\link{vm-class}}, \code{\link{ksvm-class}} } \examples{ # create data x <- sort(runif(300)) y <- sin(pi*x) + rnorm(300,0,sd=exp(sin(2*pi*x))) # first calculate the median qrm <- kqr(x, y, tau = 0.5, C=0.15) # predict and plot plot(x, y) ytest <- predict(qrm, x) lines(x, ytest, col="blue") # calculate 0.9 quantile qrm <- kqr(x, y, tau = 0.9, kernel = "rbfdot", kpar = list(sigma = 10), C = 0.15) ytest <- predict(qrm, x) lines(x, ytest, col="red") # print model coefficients and other information coef(qrm) b(qrm) error(qrm) kernelf(qrm) } \keyword{classes} kernlab/man/kfa-class.Rd0000644000176000001440000000371511304023134014623 0ustar ripleyusers\name{kfa-class} \docType{class} \alias{kfa-class} \alias{alpha,kfa-method} \alias{alphaindex,kfa-method} \alias{kcall,kfa-method} \alias{kernelf,kfa-method} \alias{predict,kfa-method} \alias{xmatrix,kfa-method} \title{Class "kfa"} \description{The class of the object returned by the Kernel Feature Analysis \code{kfa} function} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("kfa", ...)} or by calling the \code{kfa} method. The objects contain the features along with the alpha values. } \section{Slots}{ \describe{ \item{\code{alpha}:}{Object of class \code{"matrix"} containing the alpha values } \item{\code{alphaindex}:}{Object of class \code{"vector"} containing the indexes of the selected feature} \item{\code{kernelf}:}{Object of class \code{"kfunction"} containing the kernel function used} \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the selected features} \item{\code{kcall}:}{Object of class \code{"call"} containing the \code{kfa} function call} \item{\code{terms}:}{Object of class \code{"ANY"} containing the formula terms} } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "kfa")}: returns the alpha values } \item{alphaindex}{\code{signature(object = "kfa")}: returns the index of the selected features} \item{kcall}{\code{signature(object = "kfa")}: returns the function call } \item{kernelf}{\code{signature(object = "kfa")}: returns the kernel function used } \item{predict}{\code{signature(object = "kfa")}: used to embed more data points to the feature base} \item{xmatrix}{\code{signature(object = "kfa")}: returns the selected features. } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{kfa}}, \code{\link{kpca-class}} } \examples{ data(promotergene) f <- kfa(~.,data=promotergene) } \keyword{classes} kernlab/man/kmmd.Rd0000644000176000001440000001223512560414652013722 0ustar ripleyusers\name{kmmd} \alias{kmmd} \alias{kmmd,matrix-method} \alias{kmmd,list-method} \alias{kmmd,kernelMatrix-method} \alias{show,kmmd-method} \alias{H0} \alias{Asymbound} \alias{Radbound} \alias{mmdstats} \alias{AsympH0} \title{Kernel Maximum Mean Discrepancy.} \description{The Kernel Maximum Mean Discrepancy \code{kmmd} performs a non-parametric distribution test.} \usage{ \S4method{kmmd}{matrix}(x, y, kernel="rbfdot",kpar="automatic", alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 150, frac = 1, ...) \S4method{kmmd}{kernelMatrix}(x, y, Kxy, alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 100, frac = 1, ...) \S4method{kmmd}{list}(x, y, kernel="stringdot", kpar = list(type = "spectrum", length = 4), alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 150, frac = 1, ...) } \arguments{ \item{x}{data values, in a \code{matrix}, \code{list}, or \code{kernelMatrix}} \item{y}{data values, in a \code{matrix}, \code{list}, or \code{kernelMatrix}} \item{Kxy}{\code{kernlMatrix} between \eqn{x} and \eqn{y} values (only for the kernelMatrix interface)} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. \code{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{lenght, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the \code{kpar} parameter as well. In the case of a Radial Basis kernel function (Gaussian) kpar can also be set to the string "automatic" which uses the heuristics in 'sigest' to calculate a good 'sigma' value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic"). } \item{alpha}{the confidence level of the test (default: 0.05)} \item{asymptotic}{calculate the bounds asymptotically (suitable for smaller datasets) (default: FALSE)} \item{replace}{use replace when sampling for computing the asymptotic bounds (default : TRUE)} \item{ntimes}{number of times repeating the sampling procedure (default : 150)} \item{frac}{fraction of points to sample (frac : 1) } \item{\dots}{additional parameters.} } \details{\code{kmmd} calculates the kernel maximum mean discrepancy for samples from two distributions and conducts a test as to whether the samples are from different distributions with level \code{alpha}. } \value{ An S4 object of class \code{kmmd} containing the results of whether the H0 hypothesis is rejected or not. H0 being that the samples \eqn{x} and \eqn{y} come from the same distribution. The object contains the following slots : \item{\code{H0}}{is H0 rejected (logical)} \item{\code{AsympH0}}{is H0 rejected according to the asymptotic bound (logical)} \item{\code{kernelf}}{the kernel function used.} \item{\code{mmdstats}}{the test statistics (vector of two)} \item{\code{Radbound}}{the Rademacher bound} \item{\code{Asymbound}}{the asymptotic bound} see \code{kmmd-class} for more details. } \references{Gretton, A., K. Borgwardt, M. Rasch, B. Schoelkopf and A. Smola\cr \emph{A Kernel Method for the Two-Sample-Problem}\cr Neural Information Processing Systems 2006, Vancouver \cr \url{http://papers.nips.cc/paper/3110-a-kernel-method-for-the-two-sample-problem.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{ksvm}} \examples{ # create data x <- matrix(runif(300),100) y <- matrix(runif(300)+1,100) mmdo <- kmmd(x, y) mmdo } \keyword{htest} \keyword{nonlinear} \keyword{nonparametric} kernlab/man/as.kernelMatrix.Rd0000644000176000001440000000230411304023134016017 0ustar ripleyusers\name{as.kernelMatrix} \docType{methods} \alias{kernelMatrix-class} \alias{as.kernelMatrix} \alias{as.kernelMatrix-methods} \alias{as.kernelMatrix,matrix-method} \title{Assing kernelMatrix class to matrix objects} \description{\code{as.kernelMatrix} in package \pkg{kernlab} can be used to coerce the kernelMatrix class to matrix objects representing a kernel matrix. These matrices can then be used with the kernelMatrix interfaces which most of the functions in \pkg{kernlab} support.} \usage{ \S4method{as.kernelMatrix}{matrix}(x, center = FALSE) } \arguments{ \item{x}{matrix to be assigned the \code{kernelMatrix} class } \item{center}{center the kernel matrix in feature space (default: FALSE) } } \author{ Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{\code{\link{kernelMatrix}}, \code{\link{dots}}} \keyword{methods} \examples{ ## Create toy data x <- rbind(matrix(rnorm(10),,2),matrix(rnorm(10,mean=3),,2)) y <- matrix(c(rep(1,5),rep(-1,5))) ### Use as.kernelMatrix to label the cov. matrix as a kernel matrix ### which is eq. to using a linear kernel K <- as.kernelMatrix(crossprod(t(x))) K svp2 <- ksvm(K, y, type="C-svc") svp2 } kernlab/man/musk.Rd0000644000176000001440000000257011304023134013734 0ustar ripleyusers\name{musk} \alias{musk} \docType{data} \title{Musk data set} \description{ This dataset describes a set of 92 molecules of which 47 are judged by human experts to be musks and the remaining 45 molecules are judged to be non-musks. } \usage{data(musk)} \format{ A data frame with 476 observations on the following 167 variables. Variables 1-162 are "distance features" along rays. The distances are measured in hundredths of Angstroms. The distances may be negative or positive, since they are actually measured relative to an origin placed along each ray. The origin was defined by a "consensus musk" surface that is no longer used. Hence, any experiments with the data should treat these feature values as lying on an arbitrary continuous scale. In particular, the algorithm should not make any use of the zero point or the sign of each feature value. Variable 163 is the distance of the oxygen atom in the molecule to a designated point in 3-space. This is also called OXY-DIS. Variable 164 is the X-displacement from the designated point. Variable 165 is the Y-displacement from the designated point. Variable 166 is the Z-displacement from the designated point. Class: 0 for non-musk, and 1 for musk } \source{ UCI Machine Learning data repository \cr } \examples{ data(musk) muskm <- ksvm(Class~.,data=musk,kernel="rbfdot",C=1000) muskm } \keyword{datasets} kernlab/man/csi.Rd0000644000176000001440000001231012560414652013542 0ustar ripleyusers\name{csi} \docType{methods} \alias{csi} \alias{csi-methods} \alias{csi,matrix-method} \title{Cholesky decomposition with Side Information} \description{ The \code{csi} function in \pkg{kernlab} is an implementation of an incomplete Cholesky decomposition algorithm which exploits side information (e.g., classification labels, regression responses) to compute a low rank decomposition of a kernel matrix from the data. } \usage{ \S4method{csi}{matrix}(x, y, kernel="rbfdot", kpar=list(sigma=0.1), rank, centering = TRUE, kappa = 0.99 ,delta = 40 ,tol = 1e-5) } \arguments{ \item{x}{The data matrix indexed by row} \item{y}{the classification labels or regression responses. In classification y is a \eqn{m \times n} matrix where \eqn{m} the number of data and \eqn{n} the number of classes \eqn{y} and \eqn{y_i} is 1 if the corresponding x belongs to class i.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class \code{kernel}, which computes the inner product in feature space between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well. } \item{rank}{maximal rank of the computed kernel matrix} \item{centering}{if \code{TRUE} centering is performed (default: TRUE)} \item{kappa}{trade-off between approximation of K and prediction of Y (default: 0.99)} \item{delta}{number of columns of cholesky performed in advance (default: 40)} \item{tol}{minimum gain at each iteration (default: 1e-4)} } \details{An incomplete cholesky decomposition calculates \eqn{Z} where \eqn{K= ZZ'} \eqn{K} being the kernel matrix. Since the rank of a kernel matrix is usually low, \eqn{Z} tends to be smaller then the complete kernel matrix. The decomposed matrix can be used to create memory efficient kernel-based algorithms without the need to compute and store a complete kernel matrix in memory. \cr \code{csi} uses the class labels, or regression responses to compute a more appropriate approximation for the problem at hand considering the additional information from the response variable. } \value{ An S4 object of class "csi" which is an extension of the class "matrix". The object is the decomposed kernel matrix along with the slots : \item{pivots}{Indices on which pivots where done} \item{diagresidues}{Residuals left on the diagonal} \item{maxresiduals}{Residuals picked for pivoting} \item{predgain}{predicted gain before adding each column} \item{truegain}{actual gain after adding each column} \item{Q}{QR decomposition of the kernel matrix} \item{R}{QR decomposition of the kernel matrix} slots can be accessed either by \code{object@slot} or by accessor functions with the same name (e.g., \code{pivots(object))}} \references{ Francis R. Bach, Michael I. Jordan\cr \emph{Predictive low-rank decomposition for kernel methods.}\cr Proceedings of the Twenty-second International Conference on Machine Learning (ICML) 2005\cr \url{http://www.di.ens.fr/~fbach/bach_jordan_csi.pdf} } \author{Alexandros Karatzoglou (based on Matlab code by Francis Bach)\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{inchol}}, \code{\link{chol}}, \code{\link{csi-class}}} \examples{ data(iris) ## create multidimensional y matrix yind <- t(matrix(1:3,3,150)) ymat <- matrix(0, 150, 3) ymat[yind==as.integer(iris[,5])] <- 1 datamatrix <- as.matrix(iris[,-5]) # initialize kernel function rbf <- rbfdot(sigma=0.1) rbf Z <- csi(datamatrix,ymat, kernel=rbf, rank = 30) dim(Z) pivots(Z) # calculate kernel matrix K <- crossprod(t(Z)) # difference between approximated and real kernel matrix (K - kernelMatrix(kernel=rbf, datamatrix))[6,] } \keyword{methods} \keyword{algebra} \keyword{array} kernlab/man/ksvm-class.Rd0000644000176000001440000001532112117364353015054 0ustar ripleyusers\name{ksvm-class} \docType{class} \alias{ksvm-class} \alias{SVindex} \alias{alphaindex} \alias{prob.model} \alias{scaling} \alias{prior} \alias{show} \alias{param} \alias{b} \alias{obj} \alias{nSV} \alias{coef,vm-method} \alias{SVindex,ksvm-method} \alias{alpha,ksvm-method} \alias{alphaindex,ksvm-method} \alias{cross,ksvm-method} \alias{error,ksvm-method} \alias{param,ksvm-method} \alias{fitted,ksvm-method} \alias{prior,ksvm-method} \alias{prob.model,ksvm-method} \alias{kernelf,ksvm-method} \alias{kpar,ksvm-method} \alias{lev,ksvm-method} \alias{kcall,ksvm-method} \alias{scaling,ksvm-method} \alias{type,ksvm-method} \alias{xmatrix,ksvm-method} \alias{ymatrix,ksvm-method} \alias{b,ksvm-method} \alias{obj,ksvm-method} \alias{nSV,ksvm-method} \title{Class "ksvm" } \description{An S4 class containing the output (model) of the \code{ksvm} Support Vector Machines function } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("ksvm", ...)} or by calls to the \code{ksvm} function. } \section{Slots}{ \describe{ \item{\code{type}:}{Object of class \code{"character"} containing the support vector machine type ("C-svc", "nu-svc", "C-bsvc", "spoc-svc", "one-svc", "eps-svr", "nu-svr", "eps-bsvr")} \item{\code{param}:}{Object of class \code{"list"} containing the Support Vector Machine parameters (C, nu, epsilon)} \item{\code{kernelf}:}{Object of class \code{"function"} containing the kernel function} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel function parameters (hyperparameters)} \item{\code{kcall}:}{Object of class \code{"ANY"} containing the \code{ksvm} function call} \item{\code{scaling}:}{Object of class \code{"ANY"} containing the scaling information performed on the data} \item{\code{terms}:}{Object of class \code{"ANY"} containing the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"input"} (\code{"list"} for multiclass problems or \code{"matrix"} for binary classification and regression problems) containing the support vectors calculated from the data matrix used during computations (possibly scaled and without NA). In the case of multi-class classification each list entry contains the support vectors from each binary classification problem from the one-against-one method.} \item{\code{ymatrix}:}{Object of class \code{"output"} the response \code{"matrix"} or \code{"factor"} or \code{"vector"} or \code{"logical"}} \item{\code{fitted}:}{Object of class \code{"output"} with the fitted values, predictions using the training set.} \item{\code{lev}:}{Object of class \code{"vector"} with the levels of the response (in the case of classification)} \item{\code{prob.model}:}{Object of class \code{"list"} with the class prob. model} \item{\code{prior}:}{Object of class \code{"list"} with the prior of the training set} \item{\code{nclass}:}{Object of class \code{"numeric"} containing the number of classes (in the case of classification)} \item{\code{alpha}:}{Object of class \code{"listI"} containing the resulting alpha vector (\code{"list"} or \code{"matrix"} in case of multiclass classification) (support vectors)} \item{\code{coef}:}{Object of class \code{"ANY"} containing the resulting coefficients} \item{\code{alphaindex}:}{Object of class \code{"list"} containing} \item{\code{b}:}{Object of class \code{"numeric"} containing the resulting offset } \item{\code{SVindex}:}{Object of class \code{"vector"} containing the indexes of the support vectors} \item{\code{nSV}:}{Object of class \code{"numeric"} containing the number of support vectors } \item{\code{obj}:}{Object of class \code{vector} containing the value of the objective function. When using one-against-one in multiclass classification this is a vector.} \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{cross}:}{Object of class \code{"numeric"} containing the cross-validation error } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed for NA } } } \section{Methods}{ \describe{ \item{SVindex}{\code{signature(object = "ksvm")}: return the indexes of support vectors} \item{alpha}{\code{signature(object = "ksvm")}: returns the complete 5 alpha vector (wit zero values)} \item{alphaindex}{\code{signature(object = "ksvm")}: returns the indexes of non-zero alphas (support vectors)} \item{cross}{\code{signature(object = "ksvm")}: returns the cross-validation error } \item{error}{\code{signature(object = "ksvm")}: returns the training error } \item{obj}{\code{signature(object = "ksvm")}: returns the value of the objective function} \item{fitted}{\code{signature(object = "vm")}: returns the fitted values (predict on training set) } \item{kernelf}{\code{signature(object = "ksvm")}: returns the kernel function} \item{kpar}{\code{signature(object = "ksvm")}: returns the kernel parameters (hyperparameters)} \item{lev}{\code{signature(object = "ksvm")}: returns the levels in case of classification } \item{prob.model}{\code{signature(object="ksvm")}: returns class prob. model values} \item{param}{\code{signature(object="ksvm")}: returns the parameters of the SVM in a list (C, epsilon, nu etc.)} \item{prior}{\code{signature(object="ksvm")}: returns the prior of the training set} \item{kcall}{\code{signature(object="ksvm")}: returns the \code{ksvm} function call} \item{scaling}{\code{signature(object = "ksvm")}: returns the scaling values } \item{show}{\code{signature(object = "ksvm")}: prints the object information} \item{type}{\code{signature(object = "ksvm")}: returns the problem type} \item{xmatrix}{\code{signature(object = "ksvm")}: returns the data matrix used} \item{ymatrix}{\code{signature(object = "ksvm")}: returns the response vector} } } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzolgou@ci.tuwien.ac.at}} \seealso{ \code{\link{ksvm}}, \code{\link{rvm-class}}, \code{\link{gausspr-class}} } \examples{ ## simple example using the promotergene data set data(promotergene) ## train a support vector machine gene <- ksvm(Class~.,data=promotergene,kernel="rbfdot", kpar=list(sigma=0.015),C=50,cross=4) gene # the kernel function kernelf(gene) # the alpha values alpha(gene) # the coefficients coef(gene) # the fitted values fitted(gene) # the cross validation error cross(gene) } \keyword{classes} kernlab/man/csi-class.Rd0000644000176000001440000000545411304023134014642 0ustar ripleyusers\name{csi-class} \docType{class} \alias{csi-class} \alias{Q} \alias{R} \alias{predgain} \alias{truegain} \alias{diagresidues,csi-method} \alias{maxresiduals,csi-method} \alias{pivots,csi-method} \alias{predgain,csi-method} \alias{truegain,csi-method} \alias{Q,csi-method} \alias{R,csi-method} \title{Class "csi"} \description{The reduced Cholesky decomposition object} \section{Objects from the Class}{Objects can be created by calls of the form \code{new("csi", ...)}. or by calling the \code{csi} function.} \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"matrix"} contains the decomposed matrix} \item{\code{pivots}:}{Object of class \code{"vector"} contains the pivots performed} \item{\code{diagresidues}:}{Object of class \code{"vector"} contains the diagonial residues} \item{\code{maxresiduals}:}{Object of class \code{"vector"} contains the maximum residues} \item{predgain}{Object of class \code{"vector"} contains the predicted gain before adding each column} \item{truegain}{Object of class \code{"vector"} contains the actual gain after adding each column} \item{Q}{Object of class \code{"matrix"} contains Q from the QR decomposition of the kernel matrix} \item{R}{Object of class \code{"matrix"} contains R from the QR decomposition of the kernel matrix} } } \section{Extends}{ Class \code{"matrix"}, directly. } \section{Methods}{ \describe{ \item{diagresidues}{\code{signature(object = "csi")}: returns the diagonial residues} \item{maxresiduals}{\code{signature(object = "csi")}: returns the maximum residues} \item{pivots}{\code{signature(object = "csi")}: returns the pivots performed} \item{predgain}{\code{signature(object = "csi")}: returns the predicted gain before adding each column} \item{truegain}{\code{signature(object = "csi")}: returns the actual gain after adding each column} \item{Q}{\code{signature(object = "csi")}: returns Q from the QR decomposition of the kernel matrix} \item{R}{\code{signature(object = "csi")}: returns R from the QR decomposition of the kernel matrix} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{csi}}, \code{\link{inchol-class}}} \examples{ data(iris) ## create multidimensional y matrix yind <- t(matrix(1:3,3,150)) ymat <- matrix(0, 150, 3) ymat[yind==as.integer(iris[,5])] <- 1 datamatrix <- as.matrix(iris[,-5]) # initialize kernel function rbf <- rbfdot(sigma=0.1) rbf Z <- csi(datamatrix,ymat, kernel=rbf, rank = 30) dim(Z) pivots(Z) # calculate kernel matrix K <- crossprod(t(Z)) # difference between approximated and real kernel matrix (K - kernelMatrix(kernel=rbf, datamatrix))[6,] } \keyword{classes} kernlab/man/vm-class.Rd0000644000176000001440000000732511304023134014505 0ustar ripleyusers\name{vm-class} \docType{class} \alias{vm-class} \alias{cross} \alias{alpha} \alias{error} \alias{type} \alias{kernelf} \alias{xmatrix} \alias{ymatrix} \alias{lev} \alias{kcall} \alias{alpha,vm-method} \alias{cross,vm-method} \alias{error,vm-method} \alias{fitted,vm-method} \alias{kernelf,vm-method} \alias{kpar,vm-method} \alias{lev,vm-method} \alias{kcall,vm-method} \alias{type,vm-method} \alias{xmatrix,vm-method} \alias{ymatrix,vm-method} \title{Class "vm" } \description{An S4 VIRTUAL class used as a base for the various vector machine classes in \pkg{kernlab}} \section{Objects from the Class}{ Objects from the class cannot be created directly but only contained in other classes. } \section{Slots}{ \describe{ \item{\code{alpha}:}{Object of class \code{"listI"} containing the resulting alpha vector (list in case of multiclass classification) (support vectors)} \item{\code{type}:}{Object of class \code{"character"} containing the vector machine type e.g., ("C-svc", "nu-svc", "C-bsvc", "spoc-svc", "one-svc", "eps-svr", "nu-svr", "eps-bsvr")} \item{\code{kernelf}:}{Object of class \code{"function"} containing the kernel function} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel function parameters (hyperparameters)} \item{\code{kcall}:}{Object of class \code{"call"} containing the function call} \item{\code{terms}:}{Object of class \code{"ANY"} containing the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"input"} the data matrix used during computations (support vectors) (possibly scaled and without NA)} \item{\code{ymatrix}:}{Object of class \code{"output"} the response matrix/vector } \item{\code{fitted}:}{Object of class \code{"output"} with the fitted values, predictions using the training set.} \item{\code{lev}:}{Object of class \code{"vector"} with the levels of the response (in the case of classification)} \item{\code{nclass}:}{Object of class \code{"numeric"} containing the number of classes (in the case of classification)} \item{\code{error}:}{Object of class \code{"vector"} containing the training error} \item{\code{cross}:}{Object of class \code{"vector"} containing the cross-validation error } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed for NA } } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "vm")}: returns the complete alpha vector (wit zero values)} \item{cross}{\code{signature(object = "vm")}: returns the cross-validation error } \item{error}{\code{signature(object = "vm")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values (predict on training set) } \item{kernelf}{\code{signature(object = "vm")}: returns the kernel function} \item{kpar}{\code{signature(object = "vm")}: returns the kernel parameters (hyperparameters)} \item{lev}{\code{signature(object = "vm")}: returns the levels in case of classification } \item{kcall}{\code{signature(object="vm")}: returns the function call} \item{type}{\code{signature(object = "vm")}: returns the problem type} \item{xmatrix}{\code{signature(object = "vm")}: returns the data matrix used(support vectors)} \item{ymatrix}{\code{signature(object = "vm")}: returns the response vector} } } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzolgou@ci.tuwien.ac.at}} \seealso{ \code{\link{ksvm-class}}, \code{\link{rvm-class}}, \code{\link{gausspr-class}} } \keyword{classes} kernlab/man/ticdata.Rd0000644000176000001440000002013411304023134014362 0ustar ripleyusers\name{ticdata} \alias{ticdata} \title{The Insurance Company Data} \description{ This data set used in the CoIL 2000 Challenge contains information on customers of an insurance company. The data consists of 86 variables and includes product usage data and socio-demographic data derived from zip area codes. The data was collected to answer the following question: Can you predict who would be interested in buying a caravan insurance policy and give an explanation why ? } \usage{data(ticdata)} \format{ ticdata: Dataset to train and validate prediction models and build a description (9822 customer records). Each record consists of 86 attributes, containing sociodemographic data (attribute 1-43) and product ownership (attributes 44-86). The sociodemographic data is derived from zip codes. All customers living in areas with the same zip code have the same sociodemographic attributes. Attribute 86, \code{CARAVAN:Number of mobile home policies}, is the target variable. Data Format \tabular{rlll}{ \tab 1 \tab \code{STYPE} \tab Customer Subtype\cr \tab 2 \tab \code{MAANTHUI} \tab Number of houses 1 - 10\cr \tab 3 \tab \code{MGEMOMV} \tab Avg size household 1 - 6\cr \tab 4 \tab \code{MGEMLEEF} \tab Average age\cr \tab 5 \tab \code{MOSHOOFD} \tab Customer main type\cr \tab 6 \tab \code{MGODRK} \tab Roman catholic \cr \tab 7 \tab \code{MGODPR} \tab Protestant ... \cr \tab 8 \tab \code{MGODOV} \tab Other religion \cr \tab 9 \tab \code{MGODGE} \tab No religion \cr \tab 10 \tab \code{MRELGE} \tab Married \cr \tab 11 \tab \code{MRELSA} \tab Living together \cr \tab 12 \tab \code{MRELOV} \tab Other relation \cr \tab 13 \tab \code{MFALLEEN} \tab Singles \cr \tab 14 \tab \code{MFGEKIND} \tab Household without children \cr \tab 15 \tab \code{MFWEKIND} \tab Household with children \cr \tab 16 \tab \code{MOPLHOOG} \tab High level education \cr \tab 17 \tab \code{MOPLMIDD} \tab Medium level education \cr \tab 18 \tab \code{MOPLLAAG} \tab Lower level education \cr \tab 19 \tab \code{MBERHOOG} \tab High status \cr \tab 20 \tab \code{MBERZELF} \tab Entrepreneur \cr \tab 21 \tab \code{MBERBOER} \tab Farmer \cr \tab 22 \tab \code{MBERMIDD} \tab Middle management \cr \tab 23 \tab \code{MBERARBG} \tab Skilled labourers \cr \tab 24 \tab \code{MBERARBO} \tab Unskilled labourers \cr \tab 25 \tab \code{MSKA} \tab Social class A \cr \tab 26 \tab \code{MSKB1} \tab Social class B1 \cr \tab 27 \tab \code{MSKB2} \tab Social class B2 \cr \tab 28 \tab \code{MSKC} \tab Social class C \cr \tab 29 \tab \code{MSKD} \tab Social class D \cr \tab 30 \tab \code{MHHUUR} \tab Rented house \cr \tab 31 \tab \code{MHKOOP} \tab Home owners \cr \tab 32 \tab \code{MAUT1} \tab 1 car \cr \tab 33 \tab \code{MAUT2} \tab 2 cars \cr \tab 34 \tab \code{MAUT0} \tab No car \cr \tab 35 \tab \code{MZFONDS} \tab National Health Service \cr \tab 36 \tab \code{MZPART} \tab Private health insurance \cr \tab 37 \tab \code{MINKM30} \tab Income >30.000 \cr \tab 38 \tab \code{MINK3045} \tab Income 30-45.000 \cr \tab 39 \tab \code{MINK4575} \tab Income 45-75.000 \cr \tab 40 \tab \code{MINK7512} \tab Income 75-122.000 \cr \tab 41 \tab \code{MINK123M} \tab Income <123.000 \cr \tab 42 \tab \code{MINKGEM} \tab Average income \cr \tab 43 \tab \code{MKOOPKLA} \tab Purchasing power class \cr \tab 44 \tab \code{PWAPART} \tab Contribution private third party insurance \cr \tab 45 \tab \code{PWABEDR} \tab Contribution third party insurance (firms) \cr \tab 46 \tab \code{PWALAND} \tab Contribution third party insurance (agriculture) \cr \tab 47 \tab \code{PPERSAUT} \tab Contribution car policies \cr \tab 48 \tab \code{PBESAUT} \tab Contribution delivery van policies \cr \tab 49 \tab \code{PMOTSCO} \tab Contribution motorcycle/scooter policies \cr \tab 50 \tab \code{PVRAAUT} \tab Contribution lorry policies \cr \tab 51 \tab \code{PAANHANG} \tab Contribution trailer policies \cr \tab 52 \tab \code{PTRACTOR} \tab Contribution tractor policies \cr \tab 53 \tab \code{PWERKT} \tab Contribution agricultural machines policies \cr \tab 54 \tab \code{PBROM} \tab Contribution moped policies \cr \tab 55 \tab \code{PLEVEN} \tab Contribution life insurances \cr \tab 56 \tab \code{PPERSONG} \tab Contribution private accident insurance policies \cr \tab 57 \tab \code{PGEZONG} \tab Contribution family accidents insurance policies \cr \tab 58 \tab \code{PWAOREG} \tab Contribution disability insurance policies \cr \tab 59 \tab \code{PBRAND} \tab Contribution fire policies \cr \tab 60 \tab \code{PZEILPL} \tab Contribution surfboard policies \cr \tab 61 \tab \code{PPLEZIER} \tab Contribution boat policies \cr \tab 62 \tab \code{PFIETS} \tab Contribution bicycle policies \cr \tab 63 \tab \code{PINBOED} \tab Contribution property insurance policies \cr \tab 64 \tab \code{PBYSTAND} \tab Contribution social security insurance policies \cr \tab 65 \tab \code{AWAPART} \tab Number of private third party insurance 1 - 12 \cr \tab 66 \tab \code{AWABEDR} \tab Number of third party insurance (firms) ... \cr \tab 67 \tab \code{AWALAND} \tab Number of third party insurance (agriculture) \cr \tab 68 \tab \code{APERSAUT} \tab Number of car policies \cr \tab 69 \tab \code{ABESAUT} \tab Number of delivery van policies \cr \tab 70 \tab \code{AMOTSCO} \tab Number of motorcycle/scooter policies \cr \tab 71 \tab \code{AVRAAUT} \tab Number of lorry policies \cr \tab 72 \tab \code{AAANHANG} \tab Number of trailer policies \cr \tab 73 \tab \code{ATRACTOR} \tab Number of tractor policies \cr \tab 74 \tab \code{AWERKT} \tab Number of agricultural machines policies \cr \tab 75 \tab \code{ABROM} \tab Number of moped policies \cr \tab 76 \tab \code{ALEVEN} \tab Number of life insurances \cr \tab 77 \tab \code{APERSONG} \tab Number of private accident insurance policies \cr \tab 78 \tab \code{AGEZONG} \tab Number of family accidents insurance policies \cr \tab 79 \tab \code{AWAOREG} \tab Number of disability insurance policies \cr \tab 80 \tab \code{ABRAND} \tab Number of fire policies \cr \tab 81 \tab \code{AZEILPL} \tab Number of surfboard policies \cr \tab 82 \tab \code{APLEZIER} \tab Number of boat policies \cr \tab 83 \tab \code{AFIETS} \tab Number of bicycle policies \cr \tab 84 \tab \code{AINBOED} \tab Number of property insurance policies \cr \tab 85 \tab \code{ABYSTAND} \tab Number of social security insurance policies \cr \tab 86 \tab \code{CARAVAN} \tab Number of mobile home policies 0 - 1 \cr } Note: All the variables starting with M are zipcode variables. They give information on the distribution of that variable, e.g., Rented house, in the zipcode area of the customer. } \details{ Information about the insurance company customers consists of 86 variables and includes product usage data and socio-demographic data derived from zip area codes. The data was supplied by the Dutch data mining company Sentient Machine Research and is based on a real world business problem. The training set contains over 5000 descriptions of customers, including the information of whether or not they have a caravan insurance policy. The test set contains 4000 customers. The test and data set are merged in the ticdata set. More information about the data set and the CoIL 2000 Challenge along with publications based on the data set can be found at \url{http://www.liacs.nl/~putten/library/cc2000/}. } \source{ \itemize{ \item UCI KDD Archive:\url{http://kdd.ics.uci.edu} \item Donor: Sentient Machine Research \cr Peter van der Putten \cr Sentient Machine Research \cr Baarsjesweg 224 \cr 1058 AA Amsterdam \cr The Netherlands \cr +31 20 6186927 \cr pvdputten@hotmail.com, putten@liacs.nl } } \references{Peter van der Putten, Michel de Ruiter, Maarten van Someren \emph{CoIL Challenge 2000 Tasks and Results: Predicting and Explaining Caravan Policy Ownership}\cr \url{http://www.liacs.nl/~putten/library/cc2000/}} \keyword{datasets} kernlab/man/inlearn.Rd0000644000176000001440000000600712117362575014426 0ustar ripleyusers\name{inlearn} \alias{inlearn} \alias{inlearn,numeric-method} \title{Onlearn object initialization} \description{ Online Kernel Algorithm object \code{onlearn} initialization function. } \usage{ \S4method{inlearn}{numeric}(d, kernel = "rbfdot", kpar = list(sigma = 0.1), type = "novelty", buffersize = 1000) } \arguments{ \item{d}{the dimensionality of the data to be learned} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the \code{kpar} parameter as well.} \item{type}{the type of problem to be learned by the online algorithm : \code{classification}, \code{regression}, \code{novelty}} \item{buffersize}{the size of the buffer to be used} } \details{ The \code{inlearn} is used to initialize a blank \code{onlearn} object. } \value{ The function returns an \code{S4} object of class \code{onlearn} that can be used by the \code{onlearn} function. } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{onlearn}}, \code{\link{onlearn-class}} } \examples{ ## create toy data set x <- rbind(matrix(rnorm(100),,2),matrix(rnorm(100)+3,,2)) y <- matrix(c(rep(1,50),rep(-1,50)),,1) ## initialize onlearn object on <- inlearn(2, kernel = "rbfdot", kpar = list(sigma = 0.2), type = "classification") ## learn one data point at the time for(i in sample(1:100,100)) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) } \keyword{classif} \keyword{neural} \keyword{regression} \keyword{ts} kernlab/man/prc-class.Rd0000644000176000001440000000353311304023134014644 0ustar ripleyusers\name{prc-class} \docType{class} \alias{prc-class} \alias{eig} \alias{pcv} \alias{eig,prc-method} \alias{kcall,prc-method} \alias{kernelf,prc-method} \alias{pcv,prc-method} \alias{xmatrix,prc-method} \title{Class "prc"} \description{Principal Components Class} \section{Objects of class "prc"}{Objects from the class cannot be created directly but only contained in other classes.} \section{Slots}{ \describe{ \item{\code{pcv}:}{Object of class \code{"matrix"} containing the principal component vectors } \item{\code{eig}:}{Object of class \code{"vector"} containing the corresponding eigenvalues} \item{\code{kernelf}:}{Object of class \code{"kfunction"} containing the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel parameters used } \item{\code{xmatrix}:}{Object of class \code{"input"} containing the data matrix used } \item{\code{kcall}:}{Object of class \code{"ANY"} containing the function call } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed on NA } } } \section{Methods}{ \describe{ \item{eig}{\code{signature(object = "prc")}: returns the eigenvalues } \item{kcall}{\code{signature(object = "prc")}: returns the performed call} \item{kernelf}{\code{signature(object = "prc")}: returns the used kernel function} \item{pcv}{\code{signature(object = "prc")}: returns the principal component vectors } \item{predict}{\code{signature(object = "prc")}: embeds new data } \item{xmatrix}{\code{signature(object = "prc")}: returns the used data matrix } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kpca-class}},\code{\link{kha-class}}, \code{\link{kfa-class}} } \keyword{classes} kernlab/man/promotergene.Rd0000644000176000001440000000310712117365235015476 0ustar ripleyusers\name{promotergene} \alias{promotergene} \docType{data} \title{E. coli promoter gene sequences (DNA)} \description{ Promoters have a region where a protein (RNA polymerase) must make contact and the helical DNA sequence must have a valid conformation so that the two pieces of the contact region spatially align. The data contains DNA sequences of promoters and non-promoters. } \usage{data(promotergene)} \format{ A data frame with 106 observations and 58 variables. The first variable \code{Class} is a factor with levels \code{+} for a promoter gene and \code{-} for a non-promoter gene. The remaining 57 variables \code{V2 to V58} are factors describing the sequence. The DNA bases are coded as follows: \code{a} adenine \code{c} cytosine \code{g} guanine \code{t} thymine } \source{ UCI Machine Learning data repository \cr \url{ftp://ftp.ics.uci.edu/pub/machine-learning-databases/molecular-biology/promoter-gene-sequences} } \references{ Towell, G., Shavlik, J. and Noordewier, M. \cr \emph{Refinement of Approximate Domain Theories by Knowledge-Based Artificial Neural Networks.} \cr In Proceedings of the Eighth National Conference on Artificial Intelligence (AAAI-90) } \examples{ data(promotergene) ## Create classification model using Gaussian Processes prom <- gausspr(Class~.,data=promotergene,kernel="rbfdot", kpar=list(sigma=0.02),cross=4) prom ## Create model using Support Vector Machines promsv <- ksvm(Class~.,data=promotergene,kernel="laplacedot", kpar="automatic",C=60,cross=4) promsv } \keyword{datasets} kernlab/man/stringdot.Rd0000644000176000001440000000631111304023134014767 0ustar ripleyusers\name{stringdot} \alias{stringdot} \title{String Kernel Functions} \description{ String kernels. } \usage{ stringdot(length = 4, lambda = 1.1, type = "spectrum", normalized = TRUE) } \arguments{ \item{length}{The length of the substrings considered} \item{lambda}{The decay factor} \item{type}{Type of string kernel, currently the following kernels are supported : \cr \code{spectrum} the kernel considers only matching substring of exactly length \eqn{n} (also know as string kernel). Each such matching substring is given a constant weight. The length parameter in this kernel has to be \eqn{length > 1}.\cr \code{boundrange} this kernel (also known as boundrange) considers only matching substrings of length less than or equal to a given number N. This type of string kernel requires a length parameter \eqn{length > 1}\cr \code{constant} The kernel considers all matching substrings and assigns constant weight (e.g. 1) to each of them. This \code{constant} kernel does not require any additional parameter.\cr \code{exponential} Exponential Decay kernel where the substring weight decays as the matching substring gets longer. The kernel requires a decay factor \eqn{ \lambda > 1}\cr \code{string} essentially identical to the spectrum kernel, only computed using a more conventional way.\cr \code{fullstring} essentially identical to the boundrange kernel only computed in a more conventional way. \cr } \item{normalized}{normalize string kernel values, (default: \code{TRUE})} } \details{ The kernel generating functions are used to initialize a kernel function which calculates the dot (inner) product between two feature vectors in a Hilbert Space. These functions or their function generating names can be passed as a \code{kernel} argument on almost all functions in \pkg{kernlab}(e.g., \code{ksvm}, \code{kpca} etc.). The string kernels calculate similarities between two strings (e.g. texts or sequences) by matching the common substring in the strings. Different types of string kernel exists and are mainly distinguished by how the matching is performed i.e. some string kernels count the exact matchings of \eqn{n} characters (spectrum kernel) between the strings, others allow gaps (mismatch kernel) etc. } \value{ Returns an S4 object of class \code{stringkernel} which extents the \code{function} class. The resulting function implements the given kernel calculating the inner (dot) product between two character vectors. \item{kpar}{a list containing the kernel parameters (hyperparameters) used.} The kernel parameters can be accessed by the \code{kpar} function. } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \note{ The \code{spectrum} and \code{boundrange} kernel are faster and more efficient implementations of the \code{string} and \code{fullstring} kernels which will be still included in \code{kernlab} for the next two versions. } \seealso{ \code{\link{dots} }, \code{\link{kernelMatrix} }, \code{\link{kernelMult}}, \code{\link{kernelPol}}} \examples{ sk <- stringdot(type="string", length=5) sk } \keyword{symbolmath} kernlab/man/ranking.Rd0000644000176000001440000001251512117365427014427 0ustar ripleyusers\name{ranking} \alias{ranking} \alias{ranking,matrix-method} \alias{ranking,list-method} \alias{ranking,kernelMatrix-method} \title{Ranking} \description{ A universal ranking algorithm which assigns importance/ranking to data points given a query. } \usage{ \S4method{ranking}{matrix}(x, y, kernel ="rbfdot", kpar = list(sigma = 1), scale = FALSE, alpha = 0.99, iterations = 600, edgegraph = FALSE, convergence = FALSE ,...) \S4method{ranking}{kernelMatrix}(x, y, alpha = 0.99, iterations = 600, convergence = FALSE,...) \S4method{ranking}{list}(x, y, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), alpha = 0.99, iterations = 600, convergence = FALSE, ...) } \arguments{ \item{x}{a matrix containing the data to be ranked, or the kernel matrix of data to be ranked or a list of character vectors} \item{y}{The index of the query point in the data matrix or a vector of length equal to the rows of the data matrix having a one at the index of the query points index and zero at all the other points.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{scale}{If TRUE the data matrix columns are scaled to zero mean and unit variance.} \item{alpha}{ The \code{alpha} parameter takes values between 0 and 1 and is used to control the authoritative scores received from the unlabeled points. For 0 no global structure is found the algorithm ranks the points similarly to the original distance metric.} \item{iterations}{Maximum number of iterations} \item{edgegraph}{Construct edgegraph (only supported with the RBF kernel)} \item{convergence}{Include convergence matrix in results} \item{\dots}{Additional arguments} } \details{ A simple universal ranking algorithm which exploits the intrinsic global geometric structure of the data. In many real world applications this should be superior to a local method in which the data are simply ranked by pairwise Euclidean distances. Firstly a weighted network is defined on the data and an authoritative score is assigned to each query. The query points act as source nodes that continually pump their authoritative scores to the remaining points via the weighted network and the remaining points further spread the scores they received to their neighbors. This spreading process is repeated until convergence and the points are ranked according to their score at the end of the iterations. } \value{ An S4 object of class \code{ranking} which extends the \code{matrix} class. The first column of the returned matrix contains the original index of the points in the data matrix the second column contains the final score received by each point and the third column the ranking of the point. The object contains the following slots : \item{edgegraph}{Containing the edgegraph of the data points. } \item{convergence}{Containing the convergence matrix} } \references{ D. Zhou, J. Weston, A. Gretton, O. Bousquet, B. Schoelkopf \cr \emph{Ranking on Data Manifolds}\cr Advances in Neural Information Processing Systems 16.\cr MIT Press Cambridge Mass. 2004 \cr \url{http://www.kyb.mpg.de/publications/pdfs/pdf2334.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{ranking-class}}, \code{\link{specc}} } \examples{ data(spirals) ## create data from spirals ran <- spirals[rowSums(abs(spirals) < 0.55) == 2,] ## rank points according to similarity to the most upper left point ranked <- ranking(ran, 54, kernel = "rbfdot", kpar = list(sigma = 100), edgegraph = TRUE) ranked[54, 2] <- max(ranked[-54, 2]) c<-1:86 op <- par(mfrow = c(1, 2),pty="s") plot(ran) plot(ran, cex=c[ranked[,3]]/40) } \keyword{cluster} \keyword{classif} kernlab/man/dots.Rd0000644000176000001440000001005711304023134013725 0ustar ripleyusers\name{dots} \alias{dots} \alias{kernels} \alias{rbfdot} \alias{polydot} \alias{tanhdot} \alias{vanilladot} \alias{laplacedot} \alias{besseldot} \alias{anovadot} \alias{fourierdot} \alias{splinedot} \alias{kpar} \alias{kfunction} \alias{show,kernel-method} \title{Kernel Functions} \description{ The kernel generating functions provided in kernlab. \cr The Gaussian RBF kernel \eqn{k(x,x') = \exp(-\sigma \|x - x'\|^2)} \cr The Polynomial kernel \eqn{k(x,x') = (scale + offset)^{degree}}\cr The Linear kernel \eqn{k(x,x') = }\cr The Hyperbolic tangent kernel \eqn{k(x, x') = \tanh(scale + offset)}\cr The Laplacian kernel \eqn{k(x,x') = \exp(-\sigma \|x - x'\|)} \cr The Bessel kernel \eqn{k(x,x') = (- Bessel_{(\nu+1)}^n \sigma \|x - x'\|^2)} \cr The ANOVA RBF kernel \eqn{k(x,x') = \sum_{1\leq i_1 \ldots < i_D \leq N} \prod_{d=1}^D k(x_{id}, {x'}_{id})} where k(x,x) is a Gaussian RBF kernel. \cr The Spline kernel \eqn{ \prod_{d=1}^D 1 + x_i x_j + x_i x_j min(x_i, x_j) - \frac{x_i + x_j}{2} min(x_i,x_j)^2 + \frac{min(x_i,x_j)^3}{3}} \\ The String kernels (see \code{stringdot}. } \usage{ rbfdot(sigma = 1) polydot(degree = 1, scale = 1, offset = 1) tanhdot(scale = 1, offset = 1) vanilladot() laplacedot(sigma = 1) besseldot(sigma = 1, order = 1, degree = 1) anovadot(sigma = 1, degree = 1) splinedot() } \arguments{ \item{sigma}{The inverse kernel width used by the Gaussian the Laplacian, the Bessel and the ANOVA kernel } \item{degree}{The degree of the polynomial, bessel or ANOVA kernel function. This has to be an positive integer.} \item{scale}{The scaling parameter of the polynomial and tangent kernel is a convenient way of normalizing patterns without the need to modify the data itself} \item{offset}{The offset used in a polynomial or hyperbolic tangent kernel} \item{order}{The order of the Bessel function to be used as a kernel} } \details{ The kernel generating functions are used to initialize a kernel function which calculates the dot (inner) product between two feature vectors in a Hilbert Space. These functions can be passed as a \code{kernel} argument on almost all functions in \pkg{kernlab}(e.g., \code{ksvm}, \code{kpca} etc). Although using one of the existing kernel functions as a \code{kernel} argument in various functions in \pkg{kernlab} has the advantage that optimized code is used to calculate various kernel expressions, any other function implementing a dot product of class \code{kernel} can also be used as a kernel argument. This allows the user to use, test and develop special kernels for a given data set or algorithm. For details on the string kernels see \code{stringdot}. } \value{ Return an S4 object of class \code{kernel} which extents the \code{function} class. The resulting function implements the given kernel calculating the inner (dot) product between two vectors. \item{kpar}{a list containing the kernel parameters (hyperparameters) used.} The kernel parameters can be accessed by the \code{kpar} function. } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \note{If the offset in the Polynomial kernel is set to $0$, we obtain homogeneous polynomial kernels, for positive values, we have inhomogeneous kernels. Note that for negative values the kernel does not satisfy Mercer's condition and thus the optimizers may fail. \cr In the Hyperbolic tangent kernel if the offset is negative the likelihood of obtaining a kernel matrix that is not positive definite is much higher (since then even some diagonal elements may be negative), hence if this kernel has to be used, the offset should always be positive. Note, however, that this is no guarantee that the kernel will be positive. } \seealso{\code{stringdot}, \code{\link{kernelMatrix} }, \code{\link{kernelMult}}, \code{\link{kernelPol}}} \examples{ rbfkernel <- rbfdot(sigma = 0.1) rbfkernel kpar(rbfkernel) ## create two vectors x <- rnorm(10) y <- rnorm(10) ## calculate dot product rbfkernel(x,y) } \keyword{symbolmath} kernlab/man/kmmd-class.Rd0000644000176000001440000000415311304023134015007 0ustar ripleyusers\name{kmmd-class} \docType{class} \alias{kmmd-class} \alias{kernelf,kmmd-method} \alias{H0,kmmd-method} \alias{AsympH0,kmmd-method} \alias{Radbound,kmmd-method} \alias{Asymbound,kmmd-method} \alias{mmdstats,kmmd-method} \title{Class "kqr"} \description{The Kernel Maximum Mean Discrepancy object class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("kmmd", ...)}. or by calling the \code{kmmd} function } \section{Slots}{ \describe{ \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used} \item{\code{xmatrix}:}{Object of class \code{"kernelMatrix"} containing the data used } \item{H0}{Object of class \code{"logical"} contains value of : is H0 rejected (logical)} \item{\code{AsympH0}}{Object of class \code{"logical"} contains value : is H0 rejected according to the asymptotic bound (logical)} \item{\code{mmdstats}}{Object of class \code{"vector"} contains the test statistics (vector of two)} \item{\code{Radbound}}{Object of class \code{"numeric"} contains the Rademacher bound} \item{\code{Asymbound}}{Object of class \code{"numeric"} contains the asymptotic bound} } } \section{Methods}{ \describe{ \item{kernelf}{\code{signature(object = "kmmd")}: returns the kernel function used} \item{H0}{\code{signature(object = "kmmd")}: returns the value of H0 being rejected} \item{AsympH0}{\code{signature(object = "kmmd")}: returns the value of H0 being rejected according to the asymptotic bound} \item{mmdstats}{\code{signature(object = "kmmd")}: returns the values of the mmd statistics} \item{Radbound}{\code{signature(object = "kmmd")}: returns the value of the Rademacher bound} \item{Asymbound}{\code{signature(object = "kmmd")}: returns the value of the asymptotic bound} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kmmd}}, } \examples{ # create data x <- matrix(runif(300),100) y <- matrix(runif(300)+1,100) mmdo <- kmmd(x, y) H0(mmdo) } \keyword{classes} kernlab/man/kha-class.Rd0000644000176000001440000000450312117362716014640 0ustar ripleyusers\name{kha-class} \docType{class} \alias{kha-class} \alias{eig,kha-method} \alias{kcall,kha-method} \alias{kernelf,kha-method} \alias{pcv,kha-method} \alias{xmatrix,kha-method} \alias{eskm,kha-method} \title{Class "kha"} \description{ The Kernel Hebbian Algorithm class} \section{Objects objects of class "kha"}{ Objects can be created by calls of the form \code{new("kha", ...)}. or by calling the \code{kha} function. } \section{Slots}{ \describe{ \item{\code{pcv}:}{Object of class \code{"matrix"} containing the principal component vectors } \item{\code{eig}:}{Object of class \code{"vector"} containing the corresponding normalization values} \item{\code{eskm}:}{Object of class \code{"vector"} containing the kernel sum} \item{\code{kernelf}:}{Object of class \code{"kfunction"} containing the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel parameters used } \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the data matrix used } \item{\code{kcall}:}{Object of class \code{"ANY"} containing the function call } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed on NA } } } \section{Methods}{ \describe{ \item{eig}{\code{signature(object = "kha")}: returns the normalization values } \item{kcall}{\code{signature(object = "kha")}: returns the performed call} \item{kernelf}{\code{signature(object = "kha")}: returns the used kernel function} \item{pcv}{\code{signature(object = "kha")}: returns the principal component vectors } \item{eskm}{\code{signature(object = "kha")}: returns the kernel sum} \item{predict}{\code{signature(object = "kha")}: embeds new data } \item{xmatrix}{\code{signature(object = "kha")}: returns the used data matrix } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kha}}, \code{\link{ksvm-class}}, \code{\link{kcca-class}} } \examples{ # another example using the iris data(iris) test <- sample(1:50,20) kpc <- kha(~.,data=iris[-test,-5], kernel="rbfdot", kpar=list(sigma=0.2),features=2, eta=0.001, maxiter=65) #print the principal component vectors pcv(kpc) kernelf(kpc) eig(kpc) } \keyword{classes} kernlab/man/rvm.Rd0000644000176000001440000001565412117366150013603 0ustar ripleyusers\name{rvm} \alias{rvm} \alias{rvm-methods} \alias{rvm,formula-method} \alias{rvm,list-method} \alias{rvm,vector-method} \alias{rvm,kernelMatrix-method} \alias{rvm,matrix-method} \alias{show,rvm-method} \alias{predict,rvm-method} \alias{coef,rvm-method} \title{Relevance Vector Machine} \description{ The Relevance Vector Machine is a Bayesian model for regression and classification of identical functional form to the support vector machine. The \code{rvm} function currently supports only regression. } \usage{ \S4method{rvm}{formula}(x, data=NULL, ..., subset, na.action = na.omit) \S4method{rvm}{vector}(x, ...) \S4method{rvm}{matrix}(x, y, type="regression", kernel="rbfdot", kpar="automatic", alpha= ncol(as.matrix(x)), var=0.1, var.fix=FALSE, iterations=100, verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ... , subset, na.action = na.omit) \S4method{rvm}{list}(x, y, type = "regression", kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), alpha = 5, var = 0.1, var.fix = FALSE, iterations = 100, verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ..., subset, na.action = na.omit) } \arguments{ \item{x}{a symbolic description of the model to be fit. When not using a formula x can be a matrix or vector containing the training data or a kernel matrix of class \code{kernelMatrix} of the training data or a list of character vectors (for use with the string kernel). Note, that the intercept is always excluded, whether given in the formula or not.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `rvm' is called from.} \item{y}{a response vector with one label for each row/component of \code{x}. Can be either a factor (for classification tasks) or a numeric vector (for regression).} \item{type}{\code{rvm} can only be used for regression at the moment.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel "Gaussian" \item \code{polydot} Polynomial kernel \item \code{vanilladot} Linear kernel \item \code{tanhdot} Hyperbolic tangent kernel \item \code{laplacedot} Laplacian kernel \item \code{besseldot} Bessel kernel \item \code{anovadot} ANOVA RBF kernel \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well. In the case of a Radial Basis kernel function (Gaussian) kpar can also be set to the string "automatic" which uses the heuristics in \code{\link{sigest}} to calculate a good \code{sigma} value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic").} \item{alpha}{The initial alpha vector. Can be either a vector of length equal to the number of data points or a single number.} \item{var}{the initial noise variance} \item{var.fix}{Keep noise variance fix during iterations (default: FALSE)} \item{iterations}{Number of iterations allowed (default: 100)} \item{tol}{tolerance of termination criterion} \item{minmaxdiff}{termination criteria. Stop when max difference is equal to this parameter (default:1e-3) } \item{verbosity}{print information on algorithm convergence (default = FALSE)} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: TRUE)} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the Mean Squared Error for regression} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{The Relevance Vector Machine typically leads to sparser models then the SVM. It also performs better in many cases (specially in regression). } \value{ An S4 object of class "rvm" containing the fitted model. Accessor functions can be used to access the slots of the object which include : \item{alpha}{The resulting relevance vectors} \item{alphaindex}{ The index of the resulting relevance vectors in the data matrix} \item{nRV}{Number of relevance vectors} \item{RVindex}{The indexes of the relevance vectors} \item{error}{Training error (if \code{fit = TRUE})} ... } \references{ Tipping, M. E.\cr \emph{Sparse Bayesian learning and the relevance vector machine}\cr Journal of Machine Learning Research 1, 211-244\cr \url{http://www.jmlr.org/papers/volume1/tipping01a/tipping01a.pdf} } \author{ Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{ksvm}}} \examples{ # create data x <- seq(-20,20,0.1) y <- sin(x)/x + rnorm(401,sd=0.05) # train relevance vector machine foo <- rvm(x, y) foo # print relevance vectors alpha(foo) RVindex(foo) # predict and plot ytest <- predict(foo, x) plot(x, y, type ="l") lines(x, ytest, col="red") } \keyword{regression} \keyword{nonlinear} kernlab/man/spam.Rd0000644000176000001440000000412711304023134013715 0ustar ripleyusers\name{spam} \alias{spam} \title{Spam E-mail Database} \description{A data set collected at Hewlett-Packard Labs, that classifies 4601 e-mails as spam or non-spam. In addition to this class label there are 57 variables indicating the frequency of certain words and characters in the e-mail.} \usage{data(spam)} \format{A data frame with 4601 observations and 58 variables. The first 48 variables contain the frequency of the variable name (e.g., business) in the e-mail. If the variable name starts with num (e.g., num650) the it indicates the frequency of the corresponding number (e.g., 650). The variables 49-54 indicate the frequency of the characters `;', `(', `[', `!', `\$', and `\#'. The variables 55-57 contain the average, longest and total run-length of capital letters. Variable 58 indicates the type of the mail and is either \code{"nonspam"} or \code{"spam"}, i.e. unsolicited commercial e-mail.} \details{ The data set contains 2788 e-mails classified as \code{"nonspam"} and 1813 classified as \code{"spam"}. The ``spam'' concept is diverse: advertisements for products/web sites, make money fast schemes, chain letters, pornography... This collection of spam e-mails came from the collectors' postmaster and individuals who had filed spam. The collection of non-spam e-mails came from filed work and personal e-mails, and hence the word 'george' and the area code '650' are indicators of non-spam. These are useful when constructing a personalized spam filter. One would either have to blind such non-spam indicators or get a very wide collection of non-spam to generate a general purpose spam filter. } \source{ \itemize{ \item Creators: Mark Hopkins, Erik Reeber, George Forman, Jaap Suermondt at Hewlett-Packard Labs, 1501 Page Mill Rd., Palo Alto, CA 94304 \item Donor: George Forman (gforman at nospam hpl.hp.com) 650-857-7835 } These data have been taken from the UCI Repository Of Machine Learning Databases at \url{http://www.ics.uci.edu/~mlearn/MLRepository.html}} \references{ T. Hastie, R. Tibshirani, J.H. Friedman. \emph{The Elements of Statistical Learning.} Springer, 2001. } \keyword{datasets} kernlab/man/kernelMatrix.Rd0000644000176000001440000001254111304023134015421 0ustar ripleyusers\name{kernelMatrix} \alias{kernelMatrix} \alias{kernelMult} \alias{kernelPol} \alias{kernelFast} \alias{kernelPol,kernel-method} \alias{kernelMatrix,kernel-method} \alias{kernelMult,kernel-method} \alias{kernelFast,kernel-method} \alias{kernelMatrix,rbfkernel-method} \alias{kernelMatrix,polykernel-method} \alias{kernelMatrix,vanillakernel-method} \alias{kernelMatrix,tanhkernel-method} \alias{kernelMatrix,laplacekernel-method} \alias{kernelMatrix,anovakernel-method} \alias{kernelMatrix,splinekernel-method} \alias{kernelMatrix,besselkernel-method} \alias{kernelMatrix,stringkernel-method} \alias{kernelMult,rbfkernel,ANY-method} \alias{kernelMult,splinekernel,ANY-method} \alias{kernelMult,polykernel,ANY-method} \alias{kernelMult,tanhkernel,ANY-method} \alias{kernelMult,laplacekernel,ANY-method} \alias{kernelMult,besselkernel,ANY-method} \alias{kernelMult,anovakernel,ANY-method} \alias{kernelMult,vanillakernel,ANY-method} \alias{kernelMult,character,kernelMatrix-method} \alias{kernelMult,stringkernel,ANY-method} \alias{kernelPol,rbfkernel-method} \alias{kernelPol,splinekernel-method} \alias{kernelPol,polykernel-method} \alias{kernelPol,tanhkernel-method} \alias{kernelPol,vanillakernel-method} \alias{kernelPol,anovakernel-method} \alias{kernelPol,besselkernel-method} \alias{kernelPol,laplacekernel-method} \alias{kernelPol,stringkernel-method} \alias{kernelFast,rbfkernel-method} \alias{kernelFast,splinekernel-method} \alias{kernelFast,polykernel-method} \alias{kernelFast,tanhkernel-method} \alias{kernelFast,vanillakernel-method} \alias{kernelFast,anovakernel-method} \alias{kernelFast,besselkernel-method} \alias{kernelFast,laplacekernel-method} \alias{kernelFast,stringkernel-method} \alias{kernelFast,splinekernel-method} \title{Kernel Matrix functions} \description{ \code{kernelMatrix} calculates the kernel matrix \eqn{K_{ij} = k(x_i,x_j)} or \eqn{K_{ij} = k(x_i,y_j)}.\cr \code{kernelPol} computes the quadratic kernel expression \eqn{H = z_i z_j k(x_i,x_j)}, \eqn{H = z_i k_j k(x_i,y_j)}.\cr \code{kernelMult} calculates the kernel expansion \eqn{f(x_i) = \sum_{i=1}^m z_i k(x_i,x_j)}\cr \code{kernelFast} computes the kernel matrix, identical to \code{kernelMatrix}, except that it also requires the squared norm of the first argument as additional input, useful in iterative kernel matrix calculations. } \usage{ \S4method{kernelMatrix}{kernel}(kernel, x, y = NULL) \S4method{kernelPol}{kernel}(kernel, x, y = NULL, z, k = NULL) \S4method{kernelMult}{kernel}(kernel, x, y = NULL, z, blocksize = 256) \S4method{kernelFast}{kernel}(kernel, x, y, a) } \arguments{ \item{kernel}{the kernel function to be used to calculate the kernel matrix. This has to be a function of class \code{kernel}, i.e. which can be generated either one of the build in kernel generating functions (e.g., \code{rbfdot} etc.) or a user defined function of class \code{kernel} taking two vector arguments and returning a scalar.} \item{x}{a data matrix to be used to calculate the kernel matrix, or a list of vector when a \code{stringkernel} is used} \item{y}{second data matrix to calculate the kernel matrix, or a list of vector when a \code{stringkernel} is used} \item{z}{a suitable vector or matrix} \item{k}{a suitable vector or matrix} \item{a}{the squared norm of \code{x}, e.g., \code{rowSums(x^2)}} \item{blocksize}{the kernel expansion computations are done block wise to avoid storing the kernel matrix into memory. \code{blocksize} defines the size of the computational blocks.} } \details{ Common functions used during kernel based computations.\cr The \code{kernel} parameter can be set to any function, of class kernel, which computes the inner product in feature space between two vector arguments. \pkg{kernlab} provides the most popular kernel functions which can be initialized by using the following functions: \itemize{ \item \code{rbfdot} Radial Basis kernel function \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} the Spline kernel } (see example.) \code{kernelFast} is mainly used in situations where columns of the kernel matrix are computed per invocation. In these cases, evaluating the norm of each row-entry over and over again would cause significant computational overhead. } \value{ \code{kernelMatrix} returns a symmetric diagonal semi-definite matrix.\cr \code{kernelPol} returns a matrix.\cr \code{kernelMult} usually returns a one-column matrix. } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{rbfdot}}, \code{\link{polydot}}, \code{\link{tanhdot}}, \code{\link{vanilladot}}} \examples{ ## use the spam data data(spam) dt <- as.matrix(spam[c(10:20,3000:3010),-58]) ## initialize kernel function rbf <- rbfdot(sigma = 0.05) rbf ## calculate kernel matrix kernelMatrix(rbf, dt) yt <- as.matrix(as.integer(spam[c(10:20,3000:3010),58])) yt[yt==2] <- -1 ## calculate the quadratic kernel expression kernelPol(rbf, dt, ,yt) ## calculate the kernel expansion kernelMult(rbf, dt, ,yt) } \keyword{algebra} \keyword{array} kernlab/man/gausspr-class.Rd0000644000176000001440000001041212055335061015547 0ustar ripleyusers\name{gausspr-class} \docType{class} \alias{gausspr-class} \alias{alpha,gausspr-method} \alias{cross,gausspr-method} \alias{error,gausspr-method} \alias{kcall,gausspr-method} \alias{kernelf,gausspr-method} \alias{kpar,gausspr-method} \alias{lev,gausspr-method} \alias{type,gausspr-method} \alias{alphaindex,gausspr-method} \alias{xmatrix,gausspr-method} \alias{ymatrix,gausspr-method} \alias{scaling,gausspr-method} \title{Class "gausspr"} \description{The Gaussian Processes object class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("gausspr", ...)}. or by calling the \code{gausspr} function } \section{Slots}{ \describe{ \item{\code{tol}:}{Object of class \code{"numeric"} contains tolerance of termination criteria} \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} contains the kernel parameter used } \item{\code{kcall}:}{Object of class \code{"list"} contains the used function call } \item{\code{type}:}{Object of class \code{"character"} contains type of problem } \item{\code{terms}:}{Object of class \code{"ANY"} contains the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"input"} containing the data matrix used } \item{\code{ymatrix}:}{Object of class \code{"output"} containing the response matrix} \item{\code{fitted}:}{Object of class \code{"output"} containing the fitted values } \item{\code{lev}:}{Object of class \code{"vector"} containing the levels of the response (in case of classification) } \item{\code{nclass}:}{Object of class \code{"numeric"} containing the number of classes (in case of classification) } \item{\code{alpha}:}{Object of class \code{"listI"} containing the computes alpha values } \item{\code{alphaindex}}{Object of class \code{"list"} containing the indexes for the alphas in various classes (in multi-class problems).} \item{\code{sol}}{Object of class \code{"matrix"} containing the solution to the Gaussian Process formulation, it is used to compute the variance in regression problems.} \item{\code{scaling}}{Object of class \code{"ANY"} containing the scaling coefficients of the data (when case \code{scaled = TRUE} is used).} \item{\code{nvar}:}{Object of class \code{"numeric"} containing the computed variance} \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{cross}:}{Object of class \code{"numeric"} containing the cross validation error} \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed in NA } } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "gausspr")}: returns the alpha vector} \item{cross}{\code{signature(object = "gausspr")}: returns the cross validation error } \item{error}{\code{signature(object = "gausspr")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values } \item{kcall}{\code{signature(object = "gausspr")}: returns the call performed} \item{kernelf}{\code{signature(object = "gausspr")}: returns the kernel function used} \item{kpar}{\code{signature(object = "gausspr")}: returns the kernel parameter used} \item{lev}{\code{signature(object = "gausspr")}: returns the response levels (in classification) } \item{type}{\code{signature(object = "gausspr")}: returns the type of problem} \item{xmatrix}{\code{signature(object = "gausspr")}: returns the data matrix used} \item{ymatrix}{\code{signature(object = "gausspr")}: returns the response matrix used} \item{scaling}{\code{signature(object = "gausspr")}: returns the scaling coefficients of the data (when \code{scaled = TRUE} is used)} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{gausspr}}, \code{\link{ksvm-class}}, \code{\link{vm-class}} } \examples{ # train model data(iris) test <- gausspr(Species~.,data=iris,var=2) test alpha(test) error(test) lev(test) } \keyword{classes} kernlab/man/onlearn-class.Rd0000644000176000001440000000672412117365114015535 0ustar ripleyusers\name{onlearn-class} \docType{class} \alias{onlearn-class} \alias{alpha,onlearn-method} \alias{b,onlearn-method} \alias{buffer,onlearn-method} \alias{fit,onlearn-method} \alias{kernelf,onlearn-method} \alias{kpar,onlearn-method} \alias{predict,onlearn-method} \alias{rho,onlearn-method} \alias{rho} \alias{show,onlearn-method} \alias{type,onlearn-method} \alias{xmatrix,onlearn-method} \alias{buffer} \title{Class "onlearn"} \description{ The class of objects used by the Kernel-based Online learning algorithms} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("onlearn", ...)}. or by calls to the function \code{inlearn}. } \section{Slots}{ \describe{ \item{\code{kernelf}:}{Object of class \code{"function"} containing the used kernel function} \item{\code{buffer}:}{Object of class \code{"numeric"} containing the size of the buffer} \item{\code{kpar}:}{Object of class \code{"list"} containing the hyperparameters of the kernel function.} \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the data points (similar to support vectors) } \item{\code{fit}:}{Object of class \code{"numeric"} containing the decision function value of the last data point} \item{\code{onstart}:}{Object of class \code{"numeric"} used for indexing } \item{\code{onstop}:}{Object of class \code{"numeric"} used for indexing} \item{\code{alpha}:}{Object of class \code{"ANY"} containing the model parameters} \item{\code{rho}:}{Object of class \code{"numeric"} containing model parameter} \item{\code{b}:}{Object of class \code{"numeric"} containing the offset} \item{\code{pattern}:}{Object of class \code{"factor"} used for dealing with factors} \item{\code{type}:}{Object of class \code{"character"} containing the problem type (classification, regression, or novelty } } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "onlearn")}: returns the model parameters} \item{b}{\code{signature(object = "onlearn")}: returns the offset } \item{buffer}{\code{signature(object = "onlearn")}: returns the buffer size} \item{fit}{\code{signature(object = "onlearn")}: returns the last decision function value} \item{kernelf}{\code{signature(object = "onlearn")}: return the kernel function used} \item{kpar}{\code{signature(object = "onlearn")}: returns the hyper-parameters used} \item{onlearn}{\code{signature(obj = "onlearn")}: the learning function} \item{predict}{\code{signature(object = "onlearn")}: the predict function} \item{rho}{\code{signature(object = "onlearn")}: returns model parameter} \item{show}{\code{signature(object = "onlearn")}: show function} \item{type}{\code{signature(object = "onlearn")}: returns the type of problem} \item{xmatrix}{\code{signature(object = "onlearn")}: returns the stored data points} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{onlearn}}, \code{\link{inlearn}} } \examples{ ## create toy data set x <- rbind(matrix(rnorm(100),,2),matrix(rnorm(100)+3,,2)) y <- matrix(c(rep(1,50),rep(-1,50)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2), type="classification") ## learn one data point at the time for(i in sample(1:100,100)) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) } \keyword{classes} kernlab/man/ranking-class.Rd0000644000176000001440000000261612117365252015527 0ustar ripleyusers\name{ranking-class} \docType{class} \alias{ranking-class} \alias{edgegraph} \alias{convergence} \alias{convergence,ranking-method} \alias{edgegraph,ranking-method} \alias{show,ranking-method} \title{Class "ranking"} \description{Object of the class \code{"ranking"} are created from the \code{ranking} function and extend the class \code{matrix}} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("ranking", ...)}. } \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"matrix"} containing the data ranking and scores} \item{\code{convergence}:}{Object of class \code{"matrix"} containing the convergence matrix} \item{\code{edgegraph}:}{Object of class \code{"matrix"} containing the edgegraph} } } \section{Extends}{ Class \code{"matrix"}, directly. } \section{Methods}{ \describe{ \item{show}{\code{signature(object = "ranking")}: displays the ranking score matrix} } } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{ \code{\link{ranking}} } \examples{ data(spirals) ## create data set to be ranked ran<-spirals[rowSums(abs(spirals)<0.55)==2,] ## rank points according to "relevance" to point 54 (up left) ranked<-ranking(ran,54,kernel="rbfdot", kpar=list(sigma=100),edgegraph=TRUE) ranked edgegraph(ranked)[1:10,1:10] } \keyword{classes} kernlab/man/kernel-class.Rd0000644000176000001440000000422311304023134015335 0ustar ripleyusers\name{kernel-class} \docType{class} \alias{rbfkernel-class} \alias{polykernel-class} \alias{vanillakernel-class} \alias{tanhkernel-class} \alias{anovakernel-class} \alias{besselkernel-class} \alias{laplacekernel-class} \alias{splinekernel-class} \alias{stringkernel-class} \alias{fourierkernel-class} \alias{kfunction-class} \alias{kernel-class} \alias{kpar,kernel-method} \title{Class "kernel" "rbfkernel" "polykernel", "tanhkernel", "vanillakernel"} \description{ The built-in kernel classes in \pkg{kernlab}} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("rbfkernel")}, \code{new{"polykernel"}}, \code{new{"tanhkernel"}}, \code{new{"vanillakernel"}}, \code{new{"anovakernel"}}, \code{new{"besselkernel"}}, \code{new{"laplacekernel"}}, \code{new{"splinekernel"}}, \code{new{"stringkernel"}} or by calling the \code{rbfdot}, \code{polydot}, \code{tanhdot}, \code{vanilladot}, \code{anovadot}, \code{besseldot}, \code{laplacedot}, \code{splinedot}, \code{stringdot} functions etc.. } \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"function"} containing the kernel function } \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel parameters } } } \section{Extends}{ Class \code{"kernel"}, directly. Class \code{"function"}, by class \code{"kernel"}. } \section{Methods}{ \describe{ \item{kernelMatrix}{\code{signature(kernel = "rbfkernel", x = "matrix")}: computes the kernel matrix} \item{kernelMult}{\code{signature(kernel = "rbfkernel", x = "matrix")}: computes the quadratic kernel expression} \item{kernelPol}{\code{signature(kernel = "rbfkernel", x = "matrix")}: computes the kernel expansion} \item{kernelFast}{\code{signature(kernel = "rbfkernel", x = "matrix"),,a}: computes parts or the full kernel matrix, mainly used in kernel algorithms where columns of the kernel matrix are computed per invocation } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{ \code{\link{dots}} } \examples{ rbfkernel <- rbfdot(sigma = 0.1) rbfkernel is(rbfkernel) kpar(rbfkernel) } \keyword{classes} kernlab/man/lssvm-class.Rd0000644000176000001440000001040611304023134015221 0ustar ripleyusers\name{lssvm-class} \docType{class} \alias{lssvm-class} \alias{alpha,lssvm-method} \alias{b,lssvm-method} \alias{cross,lssvm-method} \alias{error,lssvm-method} \alias{kcall,lssvm-method} \alias{kernelf,lssvm-method} \alias{kpar,lssvm-method} \alias{param,lssvm-method} \alias{lev,lssvm-method} \alias{type,lssvm-method} \alias{alphaindex,lssvm-method} \alias{xmatrix,lssvm-method} \alias{ymatrix,lssvm-method} \alias{scaling,lssvm-method} \alias{nSV,lssvm-method} \title{Class "lssvm"} \description{The Gaussian Processes object } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("lssvm", ...)}. or by calling the \code{lssvm} function } \section{Slots}{ \describe{ \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} contains the kernel parameter used } \item{\code{param}:}{Object of class \code{"list"} contains the regularization parameter used.} \item{\code{kcall}:}{Object of class \code{"call"} contains the used function call } \item{\code{type}:}{Object of class \code{"character"} contains type of problem } \item{\code{coef}:}{Object of class \code{"ANY"} contains the model parameter } \item{\code{terms}:}{Object of class \code{"ANY"} contains the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the data matrix used } \item{\code{ymatrix}:}{Object of class \code{"output"} containing the response matrix} \item{\code{fitted}:}{Object of class \code{"output"} containing the fitted values } \item{\code{b}:}{Object of class \code{"numeric"} containing the offset } \item{\code{lev}:}{Object of class \code{"vector"} containing the levels of the response (in case of classification) } \item{\code{scaling}:}{Object of class \code{"ANY"} containing the scaling information performed on the data} \item{\code{nclass}:}{Object of class \code{"numeric"} containing the number of classes (in case of classification) } \item{\code{alpha}:}{Object of class \code{"listI"} containing the computes alpha values } \item{\code{alphaindex}}{Object of class \code{"list"} containing the indexes for the alphas in various classes (in multi-class problems).} \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{cross}:}{Object of class \code{"numeric"} containing the cross validation error} \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed in NA } \item{\code{nSV}:}{Object of class \code{"numeric"} containing the number of model parameters } } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "lssvm")}: returns the alpha vector} \item{cross}{\code{signature(object = "lssvm")}: returns the cross validation error } \item{error}{\code{signature(object = "lssvm")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values } \item{kcall}{\code{signature(object = "lssvm")}: returns the call performed} \item{kernelf}{\code{signature(object = "lssvm")}: returns the kernel function used} \item{kpar}{\code{signature(object = "lssvm")}: returns the kernel parameter used} \item{param}{\code{signature(object = "lssvm")}: returns the regularization parameter used} \item{lev}{\code{signature(object = "lssvm")}: returns the response levels (in classification) } \item{type}{\code{signature(object = "lssvm")}: returns the type of problem} \item{scaling}{\code{signature(object = "ksvm")}: returns the scaling values } \item{xmatrix}{\code{signature(object = "lssvm")}: returns the data matrix used} \item{ymatrix}{\code{signature(object = "lssvm")}: returns the response matrix used} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{lssvm}}, \code{\link{ksvm-class}} } \examples{ # train model data(iris) test <- lssvm(Species~.,data=iris,var=2) test alpha(test) error(test) lev(test) } \keyword{classes} kernlab/man/kfa.Rd0000644000176000001440000001115012117362655013531 0ustar ripleyusers\name{kfa} \alias{kfa} \alias{kfa,formula-method} \alias{kfa,matrix-method} \alias{show,kfa-method} \alias{coef,kfa-method} \title{Kernel Feature Analysis} \description{ The Kernel Feature Analysis algorithm is an algorithm for extracting structure from possibly high-dimensional data sets. Similar to \code{kpca} a new basis for the data is found. The data can then be projected on the new basis. } \usage{ \S4method{kfa}{formula}(x, data = NULL, na.action = na.omit, ...) \S4method{kfa}{matrix}(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 0, subset = 59, normalize = TRUE, na.action = na.omit) } \arguments{ \item{x}{ The data matrix indexed by row or a formula describing the model. Note, that an intercept is always included, whether given in the formula or not.} \item{data}{an optional data frame containing the variables in the model (when using a formula).} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes an inner product in feature space between two vector arguments. \pkg{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{features}{Number of features (principal components) to return. (default: 0 , all)} \item{subset}{the number of features sampled (used) from the data set} \item{normalize}{normalize the feature selected (default: TRUE)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{ Kernel Feature analysis is similar to Kernel PCA, but instead of extracting eigenvectors of the training dataset in feature space, it approximates the eigenvectors by selecting training patterns which are good basis vectors for the training set. It works by choosing a fixed size subset of the data set and scaling it to unit length (under the kernel). It then chooses the features that maximize the value of the inner product (kernel function) with the rest of the patterns. } \value{ \code{kfa} returns an object of class \code{kfa} containing the features selected by the algorithm. \item{xmatrix}{contains the features selected} \item{alpha}{contains the sparse alpha vector} The \code{predict} function can be used to embed new data points into to the selected feature base. } \references{Alex J. Smola, Olvi L. Mangasarian and Bernhard Schoelkopf\cr \emph{Sparse Kernel Feature Analysis}\cr Data Mining Institute Technical Report 99-04, October 1999\cr \url{ftp://ftp.cs.wisc.edu/pub/dmi/tech-reports/99-04.ps} } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{kpca}}, \code{\link{kfa-class}}} \examples{ data(promotergene) f <- kfa(~.,data=promotergene,features=2,kernel="rbfdot", kpar=list(sigma=0.01)) plot(predict(f,promotergene),col=as.numeric(promotergene[,1])) } \keyword{cluster} kernlab/man/ksvm.Rd0000644000176000001440000003660712560414652013763 0ustar ripleyusers\name{ksvm} \alias{ksvm} \alias{ksvm,formula-method} \alias{ksvm,vector-method} \alias{ksvm,matrix-method} \alias{ksvm,kernelMatrix-method} \alias{ksvm,list-method} \alias{show,ksvm-method} \alias{coef,ksvm-method} \title{Support Vector Machines} \description{ Support Vector Machines are an excellent tool for classification, novelty detection, and regression. \code{ksvm} supports the well known C-svc, nu-svc, (classification) one-class-svc (novelty) eps-svr, nu-svr (regression) formulations along with native multi-class classification formulations and the bound-constraint SVM formulations.\cr \code{ksvm} also supports class-probabilities output and confidence intervals for regression. } \usage{ \S4method{ksvm}{formula}(x, data = NULL, ..., subset, na.action = na.omit, scaled = TRUE) \S4method{ksvm}{vector}(x, ...) \S4method{ksvm}{matrix}(x, y = NULL, scaled = TRUE, type = NULL, kernel ="rbfdot", kpar = "automatic", C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ..., subset, na.action = na.omit) \S4method{ksvm}{kernelMatrix}(x, y = NULL, type = NULL, C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ...) \S4method{ksvm}{list}(x, y = NULL, type = NULL, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ..., na.action = na.omit) } \arguments{ \item{x}{a symbolic description of the model to be fit. When not using a formula x can be a matrix or vector containing the training data or a kernel matrix of class \code{kernelMatrix} of the training data or a list of character vectors (for use with the string kernel). Note, that the intercept is always excluded, whether given in the formula or not.} \item{data}{an optional data frame containing the training data, when using a formula. By default the data is taken from the environment which `ksvm' is called from.} \item{y}{a response vector with one label for each row/component of \code{x}. Can be either a factor (for classification tasks) or a numeric vector (for regression).} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally (both \code{x} and \code{y} variables) to zero mean and unit variance. The center and scale values are returned and used for later predictions.} \item{type}{\code{ksvm} can be used for classification , for regression, or for novelty detection. Depending on whether \code{y} is a factor or not, the default setting for \code{type} is \code{C-svc} or \code{eps-svr}, respectively, but can be overwritten by setting an explicit value.\cr Valid options are: \itemize{ \item \code{C-svc} C classification \item \code{nu-svc} nu classification \item \code{C-bsvc} bound-constraint svm classification \item \code{spoc-svc} Crammer, Singer native multi-class \item \code{kbb-svc} Weston, Watkins native multi-class \item \code{one-svc} novelty detection \item \code{eps-svr} epsilon regression \item \code{nu-svr} nu regression \item \code{eps-bsvr} bound-constraint svm regression } } \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes the inner product in feature space between two vector arguments (see \code{\link{kernels}}). \cr kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel "Gaussian" \item \code{polydot} Polynomial kernel \item \code{vanilladot} Linear kernel \item \code{tanhdot} Hyperbolic tangent kernel \item \code{laplacedot} Laplacian kernel \item \code{besseldot} Bessel kernel \item \code{anovadot} ANOVA RBF kernel \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } Setting the kernel parameter to "matrix" treats \code{x} as a kernel matrix calling the \code{kernelMatrix} interface.\cr The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well. In the case of a Radial Basis kernel function (Gaussian) kpar can also be set to the string "automatic" which uses the heuristics in \code{\link{sigest}} to calculate a good \code{sigma} value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic").} \item{C}{cost of constraints violation (default: 1) this is the `C'-constant of the regularization term in the Lagrange formulation.} \item{nu}{parameter needed for \code{nu-svc}, \code{one-svc}, and \code{nu-svr}. The \code{nu} parameter sets the upper bound on the training error and the lower bound on the fraction of data points to become Support Vectors (default: 0.2).} \item{epsilon}{epsilon in the insensitive-loss function used for \code{eps-svr}, \code{nu-svr} and \code{eps-bsvm} (default: 0.1)} \item{prob.model}{if set to \code{TRUE} builds a model for calculating class probabilities or in case of regression, calculates the scaling parameter of the Laplacian distribution fitted on the residuals. Fitting is done on output data created by performing a 3-fold cross-validation on the training data. For details see references. (default: \code{FALSE})} \item{class.weights}{a named vector of weights for the different classes, used for asymmetric class sizes. Not all factor levels have to be supplied (default weight: 1). All components have to be named.} \item{cache}{cache memory in MB (default 40)} \item{tol}{tolerance of termination criterion (default: 0.001)} \item{shrinking}{option whether to use the shrinking-heuristics (default: \code{TRUE})} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the accuracy rate for classification and the Mean Squared Error for regression} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: \code{TRUE})} \item{\dots}{additional parameters for the low level fitting function} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} } \value{ An S4 object of class \code{"ksvm"} containing the fitted model, Accessor functions can be used to access the slots of the object (see examples) which include: \item{alpha}{The resulting support vectors, (alpha vector) (possibly scaled).} \item{alphaindex}{The index of the resulting support vectors in the data matrix. Note that this index refers to the pre-processed data (after the possible effect of \code{na.omit} and \code{subset})} \item{coef}{The corresponding coefficients times the training labels.} \item{b}{The negative intercept.} \item{nSV}{The number of Support Vectors} \item{obj}{The value of the objective function. In case of one-against-one classification this is a vector of values} \item{error}{Training error} \item{cross}{Cross validation error, (when cross > 0)} \item{prob.model}{Contains the width of the Laplacian fitted on the residuals in case of regression, or the parameters of the sigmoid fitted on the decision values in case of classification.} } \details{ \code{ksvm} uses John Platt's SMO algorithm for solving the SVM QP problem an most SVM formulations. On the \code{spoc-svc}, \code{kbb-svc}, \code{C-bsvc} and \code{eps-bsvr} formulations a chunking algorithm based on the TRON QP solver is used. \cr For multiclass-classification with \eqn{k} classes, \eqn{k > 2}, \code{ksvm} uses the `one-against-one'-approach, in which \eqn{k(k-1)/2} binary classifiers are trained; the appropriate class is found by a voting scheme, The \code{spoc-svc} and the \code{kbb-svc} formulations deal with the multiclass-classification problems by solving a single quadratic problem involving all the classes.\cr If the predictor variables include factors, the formula interface must be used to get a correct model matrix. \cr In classification when \code{prob.model} is \code{TRUE} a 3-fold cross validation is performed on the data and a sigmoid function is fitted on the resulting decision values \eqn{f}. The data can be passed to the \code{ksvm} function in a \code{matrix} or a \code{data.frame}, in addition \code{ksvm} also supports input in the form of a kernel matrix of class \code{kernelMatrix} or as a list of character vectors where a string kernel has to be used.\cr The \code{plot} function for binary classification \code{ksvm} objects displays a contour plot of the decision values with the corresponding support vectors highlighted.\cr The predict function can return class probabilities for classification problems by setting the \code{type} parameter to "probabilities". \cr The problem of model selection is partially addressed by an empirical observation for the RBF kernels (Gaussian , Laplace) where the optimal values of the \eqn{sigma} width parameter are shown to lie in between the 0.1 and 0.9 quantile of the \eqn{\|x- x'\|} statistics. When using an RBF kernel and setting \code{kpar} to "automatic", \code{ksvm} uses the \code{sigest} function to estimate the quantiles and uses the median of the values. } \note{Data is scaled internally by default, usually yielding better results.} \references{ \itemize{ \item Chang Chih-Chung, Lin Chih-Jen\cr \emph{LIBSVM: a library for Support Vector Machines}\cr \url{http://www.csie.ntu.edu.tw/~cjlin/libsvm} \item Chih-Wei Hsu, Chih-Jen Lin\cr \emph{BSVM} \url{http://www.csie.ntu.edu.tw/~cjlin/bsvm/} \item J. Platt\cr \emph{Probabilistic outputs for support vector machines and comparison to regularized likelihood methods} \cr Advances in Large Margin Classifiers, A. Smola, P. Bartlett, B. Schoelkopf and D. Schuurmans, Eds. Cambridge, MA: MIT Press, 2000.\cr \url{http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.41.1639} \item H.-T. Lin, C.-J. Lin and R. C. Weng\cr \emph{A note on Platt's probabilistic outputs for support vector machines}\cr \url{http://www.csie.ntu.edu.tw/~htlin/paper/doc/plattprob.pdf} \item C.-W. Hsu and C.-J. Lin \cr \emph{A comparison on methods for multi-class support vector machines}\cr IEEE Transactions on Neural Networks, 13(2002) 415-425.\cr \url{http://www.csie.ntu.edu.tw/~cjlin/papers/multisvm.ps.gz} \item K. Crammer, Y. Singer\cr \emph{On the learnability and design of output codes for multiclass prolems}\cr Computational Learning Theory, 35-46, 2000.\cr \url{http://webee.technion.ac.il/people/koby/publications/ecoc-mlj02.pdf} \item J. Weston, C. Watkins\cr \emph{Multi-class support vector machines} \cr In M. Verleysen, Proceedings of ESANN99 Brussels, 1999\cr \url{http://citeseer.ist.psu.edu/8884.html} } } \author{ Alexandros Karatzoglou (SMO optimizers in C++ by Chih-Chung Chang & Chih-Jen Lin)\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{\code{\link{predict.ksvm}}, \code{\link{ksvm-class}}, \code{\link{couple}} } \keyword{methods} \keyword{regression} \keyword{nonlinear} \keyword{classif} \keyword{neural} \examples{ ## simple example using the spam data set data(spam) ## create test and training set index <- sample(1:dim(spam)[1]) spamtrain <- spam[index[1:floor(dim(spam)[1]/2)], ] spamtest <- spam[index[((ceiling(dim(spam)[1]/2)) + 1):dim(spam)[1]], ] ## train a support vector machine filter <- ksvm(type~.,data=spamtrain,kernel="rbfdot", kpar=list(sigma=0.05),C=5,cross=3) filter ## predict mail type on the test set mailtype <- predict(filter,spamtest[,-58]) ## Check results table(mailtype,spamtest[,58]) ## Another example with the famous iris data data(iris) ## Create a kernel function using the build in rbfdot function rbf <- rbfdot(sigma=0.1) rbf ## train a bound constraint support vector machine irismodel <- ksvm(Species~.,data=iris,type="C-bsvc", kernel=rbf,C=10,prob.model=TRUE) irismodel ## get fitted values fitted(irismodel) ## Test on the training set with probabilities as output predict(irismodel, iris[,-5], type="probabilities") ## Demo of the plot function x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) ### Use kernelMatrix K <- as.kernelMatrix(crossprod(t(x))) svp2 <- ksvm(K, y, type="C-svc") svp2 # test data xtest <- rbind(matrix(rnorm(20),,2),matrix(rnorm(20,mean=3),,2)) # test kernel matrix i.e. inner/kernel product of test data with # Support Vectors Ktest <- as.kernelMatrix(crossprod(t(xtest),t(x[SVindex(svp2), ]))) predict(svp2, Ktest) #### Use custom kernel k <- function(x,y) {(sum(x*y) +1)*exp(-0.001*sum((x-y)^2))} class(k) <- "kernel" data(promotergene) ## train svm using custom kernel gene <- ksvm(Class~.,data=promotergene[c(1:20, 80:100),],kernel=k, C=5,cross=5) gene #### Use text with string kernels data(reuters) is(reuters) tsv <- ksvm(reuters,rlabels,kernel="stringdot", kpar=list(length=5),cross=3,C=10) tsv ## regression # create data x <- seq(-20,20,0.1) y <- sin(x)/x + rnorm(401,sd=0.03) # train support vector machine regm <- ksvm(x,y,epsilon=0.01,kpar=list(sigma=16),cross=3) plot(x,y,type="l") lines(x,predict(regm,x),col="red") } kernlab/man/inchol.Rd0000644000176000001440000001025011304023134014223 0ustar ripleyusers\name{inchol} \alias{inchol} \alias{inchol,matrix-method} %- Also NEED an '\alias' for EACH other topic documented here. \title{Incomplete Cholesky decomposition} \description{ \code{inchol} computes the incomplete Cholesky decomposition of the kernel matrix from a data matrix. } \usage{ inchol(x, kernel="rbfdot", kpar=list(sigma=0.1), tol = 0.001, maxiter = dim(x)[1], blocksize = 50, verbose = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{The data matrix indexed by row} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class \code{kernel}, which computes the inner product in feature space between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well. } \item{tol}{algorithm stops when remaining pivots bring less accuracy then \code{tol} (default: 0.001)} \item{maxiter}{maximum number of iterations and columns in \eqn{Z}} \item{blocksize}{add this many columns to matrix per iteration} \item{verbose}{print info on algorithm convergence} } \details{An incomplete cholesky decomposition calculates \eqn{Z} where \eqn{K= ZZ'} \eqn{K} being the kernel matrix. Since the rank of a kernel matrix is usually low, \eqn{Z} tends to be smaller then the complete kernel matrix. The decomposed matrix can be used to create memory efficient kernel-based algorithms without the need to compute and store a complete kernel matrix in memory.} \value{ An S4 object of class "inchol" which is an extension of the class "matrix". The object is the decomposed kernel matrix along with the slots : \item{pivots}{Indices on which pivots where done} \item{diagresidues}{Residuals left on the diagonal} \item{maxresiduals}{Residuals picked for pivoting} slots can be accessed either by \code{object@slot} or by accessor functions with the same name (e.g., \code{pivots(object))}} \references{ Francis R. Bach, Michael I. Jordan\cr \emph{Kernel Independent Component Analysis}\cr Journal of Machine Learning Research 3, 1-48\cr \url{http://www.jmlr.org/papers/volume3/bach02a/bach02a.pdf} } \author{Alexandros Karatzoglou (based on Matlab code by S.V.N. (Vishy) Vishwanathan and Alex Smola)\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{csi}}, \code{\link{inchol-class}}, \code{\link{chol}}} \examples{ data(iris) datamatrix <- as.matrix(iris[,-5]) # initialize kernel function rbf <- rbfdot(sigma=0.1) rbf Z <- inchol(datamatrix,kernel=rbf) dim(Z) pivots(Z) # calculate kernel matrix K <- crossprod(t(Z)) # difference between approximated and real kernel matrix (K - kernelMatrix(kernel=rbf, datamatrix))[6,] } \keyword{methods} \keyword{algebra} \keyword{array} kernlab/man/kha.Rd0000644000176000001440000001160412117362753013536 0ustar ripleyusers\name{kha} \alias{kha} \alias{kha,formula-method} \alias{kha,matrix-method} \alias{predict,kha-method} \encoding{latin1} \title{Kernel Principal Components Analysis} \description{ Kernel Hebbian Algorithm is a nonlinear iterative algorithm for principal component analysis.} \usage{ \S4method{kha}{formula}(x, data = NULL, na.action, ...) \S4method{kha}{matrix}(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 5, eta = 0.005, th = 1e-4, maxiter = 10000, verbose = FALSE, na.action = na.omit, ...) } \arguments{ \item{x}{ The data matrix indexed by row or a formula describing the model. Note, that an intercept is always included, whether given in the formula or not.} \item{data}{an optional data frame containing the variables in the model (when using a formula).} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes the inner product in feature space between two vector arguments (see \code{\link{kernels}}). \pkg{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{features}{Number of features (principal components) to return. (default: 5)} \item{eta}{The hebbian learning rate (default : 0.005)} \item{th}{the smallest value of the convergence step (default : 0.0001) } \item{maxiter}{the maximum number of iterations.} \item{verbose}{print convergence every 100 iterations. (default : FALSE)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{The original form of KPCA can only be used on small data sets since it requires the estimation of the eigenvectors of a full kernel matrix. The Kernel Hebbian Algorithm iteratively estimates the Kernel Principal Components with only linear order memory complexity. (see ref. for more details) } \value{ An S4 object containing the principal component vectors along with the corresponding normalization values. \item{pcv}{a matrix containing the principal component vectors (column wise)} \item{eig}{The normalization values} \item{xmatrix}{The original data matrix} all the slots of the object can be accessed by accessor functions. } \note{The predict function can be used to embed new data on the new space} \references{Kwang In Kim, M.O. Franz and B. Schlkopf\cr \emph{Kernel Hebbian Algorithm for Iterative Kernel Principal Component Analysis}\cr Max-Planck-Institut fr biologische Kybernetik, Tbingen (109)\cr \url{http://www.kyb.tuebingen.mpg.de/publications/pdfs/pdf2302.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{kpca}}, \code{\link{kfa}}, \code{\link{kcca}}, \code{pca}} \examples{ # another example using the iris data(iris) test <- sample(1:150,70) kpc <- kha(~.,data=iris[-test,-5],kernel="rbfdot", kpar=list(sigma=0.2),features=2, eta=0.001, maxiter=65) #print the principal component vectors pcv(kpc) #plot the data projection on the components plot(predict(kpc,iris[,-5]),col=as.integer(iris[,5]), xlab="1st Principal Component",ylab="2nd Principal Component") } \keyword{cluster} kernlab/man/kkmeans.Rd0000644000176000001440000001346212560414652014426 0ustar ripleyusers\name{kkmeans} \alias{kkmeans} \alias{kkmeans,matrix-method} \alias{kkmeans,formula-method} \alias{kkmeans,list-method} \alias{kkmeans,kernelMatrix-method} \title{Kernel k-means} \description{ A weighted kernel version of the famous k-means algorithm. } \usage{ \S4method{kkmeans}{formula}(x, data = NULL, na.action = na.omit, ...) \S4method{kkmeans}{matrix}(x, centers, kernel = "rbfdot", kpar = "automatic", alg="kkmeans", p=1, na.action = na.omit, ...) \S4method{kkmeans}{kernelMatrix}(x, centers, ...) \S4method{kkmeans}{list}(x, centers, kernel = "stringdot", kpar = list(length=4, lambda=0.5), alg ="kkmeans", p = 1, na.action = na.omit, ...) } \arguments{ \item{x}{the matrix of data to be clustered, or a symbolic description of the model to be fit, or a kernel Matrix of class \code{kernelMatrix}, or a list of character vectors.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `kkmeans' is called from.} \item{centers}{Either the number of clusters or a matrix of initial cluster centers. If the first a random initial partitioning is used.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a inner product in feature space between two vector arguments (see \code{link{kernels}}). \pkg{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel "Gaussian" \item \code{polydot} Polynomial kernel \item \code{vanilladot} Linear kernel \item \code{tanhdot} Hyperbolic tangent kernel \item \code{laplacedot} Laplacian kernel \item \code{besseldot} Bessel kernel \item \code{anovadot} ANOVA RBF kernel \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } Setting the kernel parameter to "matrix" treats \code{x} as a kernel matrix calling the \code{kernelMatrix} interface.\cr The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{a character string or the list of hyper-parameters (kernel parameters). The default character string \code{"automatic"} uses a heuristic the determine a suitable value for the width parameter of the RBF kernel.\cr A list can also be used containing the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{alg}{the algorithm to use. Options currently include \code{kkmeans} and \code{kerninghan}. } \item{p}{a parameter used to keep the affinity matrix positive semidefinite} \item{na.action}{The action to perform on NA} \item{\dots}{additional parameters} } \details{ \code{kernel k-means} uses the 'kernel trick' (i.e. implicitly projecting all data into a non-linear feature space with the use of a kernel) in order to deal with one of the major drawbacks of \code{k-means} that is that it cannot capture clusters that are not linearly separable in input space. \cr The algorithm is implemented using the triangle inequality to avoid unnecessary and computational expensive distance calculations. This leads to significant speedup particularly on large data sets with a high number of clusters. \cr With a particular choice of weights this algorithm becomes equivalent to Kernighan-Lin, and the norm-cut graph partitioning algorithms. \cr The function also support input in the form of a kernel matrix or a list of characters for text clustering.\cr The data can be passed to the \code{kkmeans} function in a \code{matrix} or a \code{data.frame}, in addition \code{kkmeans} also supports input in the form of a kernel matrix of class \code{kernelMatrix} or as a list of character vectors where a string kernel has to be used. } \value{ An S4 object of class \code{specc} which extends the class \code{vector} containing integers indicating the cluster to which each point is allocated. The following slots contain useful information \item{centers}{A matrix of cluster centers.} \item{size}{The number of point in each cluster} \item{withinss}{The within-cluster sum of squares for each cluster} \item{kernelf}{The kernel function used} } \references{ Inderjit Dhillon, Yuqiang Guan, Brian Kulis\cr A Unified view of Kernel k-means, Spectral Clustering and Graph Partitioning\cr UTCS Technical Report\cr \url{http://web.cse.ohio-state.edu/~kulis/pubs/spectral_techreport.pdf} } \author{ Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{specc}}, \code{\link{kpca}}, \code{\link{kcca}} } \examples{ ## Cluster the iris data set. data(iris) sc <- kkmeans(as.matrix(iris[,-5]), centers=3) sc centers(sc) size(sc) withinss(sc) } \keyword{cluster} kernlab/man/kpca-class.Rd0000644000176000001440000000455712117363140015014 0ustar ripleyusers\name{kpca-class} \docType{class} \alias{kpca-class} \alias{rotated} \alias{eig,kpca-method} \alias{kcall,kpca-method} \alias{kernelf,kpca-method} \alias{pcv,kpca-method} \alias{rotated,kpca-method} \alias{xmatrix,kpca-method} \title{Class "kpca"} \description{ The Kernel Principal Components Analysis class} \section{Objects of class "kpca"}{ Objects can be created by calls of the form \code{new("kpca", ...)}. or by calling the \code{kpca} function. } \section{Slots}{ \describe{ \item{\code{pcv}:}{Object of class \code{"matrix"} containing the principal component vectors } \item{\code{eig}:}{Object of class \code{"vector"} containing the corresponding eigenvalues} \item{\code{rotated}:}{Object of class \code{"matrix"} containing the projection of the data on the principal components} \item{\code{kernelf}:}{Object of class \code{"function"} containing the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel parameters used } \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the data matrix used } \item{\code{kcall}:}{Object of class \code{"ANY"} containing the function call } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed on NA } } } \section{Methods}{ \describe{ \item{eig}{\code{signature(object = "kpca")}: returns the eigenvalues } \item{kcall}{\code{signature(object = "kpca")}: returns the performed call} \item{kernelf}{\code{signature(object = "kpca")}: returns the used kernel function} \item{pcv}{\code{signature(object = "kpca")}: returns the principal component vectors } \item{predict}{\code{signature(object = "kpca")}: embeds new data } \item{rotated}{\code{signature(object = "kpca")}: returns the projected data} \item{xmatrix}{\code{signature(object = "kpca")}: returns the used data matrix } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{ksvm-class}}, \code{\link{kcca-class}} } \examples{ # another example using the iris data(iris) test <- sample(1:50,20) kpc <- kpca(~.,data=iris[-test,-5],kernel="rbfdot", kpar=list(sigma=0.2),features=2) #print the principal component vectors pcv(kpc) rotated(kpc) kernelf(kpc) eig(kpc) } \keyword{classes} kernlab/man/ipop-class.Rd0000644000176000001440000000313311304023134015023 0ustar ripleyusers\name{ipop-class} \docType{class} \alias{ipop-class} \alias{primal,ipop-method} \alias{dual,ipop-method} \alias{how,ipop-method} \alias{primal} \alias{dual} \alias{how} \title{Class "ipop"} \description{The quadratic problem solver class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("ipop", ...)}. or by calling the \code{ipop} function. } \section{Slots}{ \describe{ \item{\code{primal}:}{Object of class \code{"vector"} the primal solution of the problem} \item{\code{dual}:}{Object of class \code{"numeric"} the dual of the problem} \item{\code{how}:}{Object of class \code{"character"} convergence information} } } \section{Methods}{ \describe{ \item{primal}{Object of class \code{ipop}}{Return the primal of the problem} \item{dual}{Object of class \code{ipop}}{Return the dual of the problem} \item{how}{Object of class \code{ipop}}{Return information on convergence} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{ipop}} } \examples{ ## solve the Support Vector Machine optimization problem data(spam) ## sample a scaled part (300 points) of the spam data set m <- 300 set <- sample(1:dim(spam)[1],m) x <- scale(as.matrix(spam[,-58]))[set,] y <- as.integer(spam[set,58]) y[y==2] <- -1 ##set C parameter and kernel C <- 5 rbf <- rbfdot(sigma = 0.1) ## create H matrix etc. H <- kernelPol(rbf,x,,y) c <- matrix(rep(-1,m)) A <- t(y) b <- 0 l <- matrix(rep(0,m)) u <- matrix(rep(C,m)) r <- 0 sv <- ipop(c,H,A,b,l,u,r) primal(sv) dual(sv) how(sv) } \keyword{classes} kernlab/man/reuters.Rd0000644000176000001440000000111711304023134014442 0ustar ripleyusers\name{reuters} \alias{reuters} \alias{rlabels} \title{Reuters Text Data} \description{A small sample from the Reuters news data set.} \usage{data(reuters)} \format{ A list of 40 text documents along with the labels. \code{reuters} contains the text documents and \code{rlabels} the labels in a vector. } \details{ This dataset contains a list of 40 text documents along with the labels. The data consist out of 20 documents from the \code{acq} category and 20 documents from the crude category. The labels are stored in \code{rlabels} } \source{Reuters} \keyword{datasets} kernlab/man/inchol-class.Rd0000644000176000001440000000315211304023134015331 0ustar ripleyusers\name{inchol-class} \docType{class} \alias{inchol-class} \alias{diagresidues} \alias{maxresiduals} \alias{pivots} \alias{diagresidues,inchol-method} \alias{maxresiduals,inchol-method} \alias{pivots,inchol-method} \title{Class "inchol" } \description{The reduced Cholesky decomposition object} \section{Objects from the Class}{Objects can be created by calls of the form \code{new("inchol", ...)}. or by calling the \code{inchol} function.} \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"matrix"} contains the decomposed matrix} \item{\code{pivots}:}{Object of class \code{"vector"} contains the pivots performed} \item{\code{diagresidues}:}{Object of class \code{"vector"} contains the diagonial residues} \item{\code{maxresiduals}:}{Object of class \code{"vector"} contains the maximum residues} } } \section{Extends}{ Class \code{"matrix"}, directly. } \section{Methods}{ \describe{ \item{diagresidues}{\code{signature(object = "inchol")}: returns the diagonial residues} \item{maxresiduals}{\code{signature(object = "inchol")}: returns the maximum residues} \item{pivots}{\code{signature(object = "inchol")}: returns the pivots performed} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{inchol}}, \code{\link{csi-class}}, \code{\link{csi}}} \examples{ data(iris) datamatrix <- as.matrix(iris[,-5]) # initialize kernel function rbf <- rbfdot(sigma=0.1) rbf Z <- inchol(datamatrix,kernel=rbf) dim(Z) pivots(Z) diagresidues(Z) maxresiduals(Z) } \keyword{classes} kernlab/man/kcca.Rd0000644000176000001440000000715511304023134013662 0ustar ripleyusers\name{kcca} \alias{kcca} \alias{kcca,matrix-method} \title{Kernel Canonical Correlation Analysis} \description{ Computes the canonical correlation analysis in feature space. } \usage{ \S4method{kcca}{matrix}(x, y, kernel="rbfdot", kpar=list(sigma=0.1), gamma = 0.1, ncomps = 10, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{a matrix containing data index by row} \item{y}{a matrix containing data index by row} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a inner product in feature space between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{gamma}{regularization parameter (default : 0.1)} \item{ncomps}{number of canonical components (default : 10) } \item{\dots}{additional parameters for the \code{kpca} function} } \details{ The kernel version of canonical correlation analysis. Kernel Canonical Correlation Analysis (KCCA) is a non-linear extension of CCA. Given two random variables, KCCA aims at extracting the information which is shared by the two random variables. More precisely given \eqn{x} and \eqn{y} the purpose of KCCA is to provide nonlinear mappings \eqn{f(x)} and \eqn{g(y)} such that their correlation is maximized. } \value{ An S4 object containing the following slots: \item{kcor}{Correlation coefficients in feature space} \item{xcoef}{estimated coefficients for the \code{x} variables in the feature space} \item{ycoef}{estimated coefficients for the \code{y} variables in the feature space} %% \item{xvar}{The canonical variates for \code{x}} %% \item{yvar}{The canonical variates for \code{y}} } \references{ Malte Kuss, Thore Graepel \cr \emph{The Geometry Of Kernel Canonical Correlation Analysis}\cr \url{http://www.kyb.tuebingen.mpg.de/publications/pdfs/pdf2233.pdf}} \author{ Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{cancor}}, \code{\link{kpca}}, \code{\link{kfa}}, \code{\link{kha}}} \examples{ ## dummy data x <- matrix(rnorm(30),15) y <- matrix(rnorm(30),15) kcca(x,y,ncomps=2) } \keyword{multivariate} kernlab/man/kcca-class.Rd0000644000176000001440000000345511304023134014764 0ustar ripleyusers\name{kcca-class} \docType{class} \alias{kcca-class} \alias{kcor} \alias{xcoef} \alias{ycoef} %%\alias{yvar} %%\alias{xvar} \alias{kcor,kcca-method} \alias{xcoef,kcca-method} \alias{xvar,kcca-method} \alias{ycoef,kcca-method} \alias{yvar,kcca-method} \title{Class "kcca"} \description{The "kcca" class } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("kcca", ...)}. or by the calling the \code{kcca} function. } \section{Slots}{ \describe{ \item{\code{kcor}:}{Object of class \code{"vector"} describing the correlations} \item{\code{xcoef}:}{Object of class \code{"matrix"} estimated coefficients for the \code{x} variables} \item{\code{ycoef}:}{Object of class \code{"matrix"} estimated coefficients for the \code{y} variables } %% \item{\code{xvar}:}{Object of class \code{"matrix"} holds the %% canonical variates for \code{x}} %% \item{\code{yvar}:}{Object of class \code{"matrix"} holds the %% canonical variates for \code{y}} } } \section{Methods}{ \describe{ \item{kcor}{\code{signature(object = "kcca")}: returns the correlations} \item{xcoef}{\code{signature(object = "kcca")}: returns the estimated coefficients for the \code{x} variables} \item{ycoef}{\code{signature(object = "kcca")}: returns the estimated coefficients for the \code{y} variables } %% \item{xvar}{\code{signature(object = "kcca")}: returns the canonical %% variates for \code{x}} %% \item{yvar}{\code{signature(object = "kcca")}: returns the canonical %% variates for \code{y}} } } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kcca}}, \code{\link{kpca-class}} } \examples{ ## dummy data x <- matrix(rnorm(30),15) y <- matrix(rnorm(30),15) kcca(x,y,ncomps=2) } \keyword{classes} kernlab/man/ipop.Rd0000644000176000001440000000531712560414652013744 0ustar ripleyusers\name{ipop} \alias{ipop} \alias{ipop,ANY,matrix-method} \title{Quadratic Programming Solver} \description{ ipop solves the quadratic programming problem :\cr \eqn{\min(c'*x + 1/2 * x' * H * x)}\cr subject to: \cr \eqn{b <= A * x <= b + r}\cr \eqn{l <= x <= u} } \usage{ ipop(c, H, A, b, l, u, r, sigf = 7, maxiter = 40, margin = 0.05, bound = 10, verb = 0) } \arguments{ \item{c}{Vector or one column matrix appearing in the quadratic function} \item{H}{square matrix appearing in the quadratic function, or the decomposed form \eqn{Z} of the \eqn{H} matrix where \eqn{Z} is a \eqn{n x m} matrix with \eqn{n > m} and \eqn{ZZ' = H}.} \item{A}{Matrix defining the constrains under which we minimize the quadratic function} \item{b}{Vector or one column matrix defining the constrains} \item{l}{Lower bound vector or one column matrix} \item{u}{Upper bound vector or one column matrix} \item{r}{Vector or one column matrix defining constrains} \item{sigf}{Precision (default: 7 significant figures)} \item{maxiter}{Maximum number of iterations} \item{margin}{how close we get to the constrains} \item{bound}{Clipping bound for the variables} \item{verb}{Display convergence information during runtime} } \details{ ipop uses an interior point method to solve the quadratic programming problem. \cr The \eqn{H} matrix can also be provided in the decomposed form \eqn{Z} where \eqn{ZZ' = H} in that case the Sherman Morrison Woodbury formula is used internally. } \value{ An S4 object with the following slots \item{primal}{Vector containing the primal solution of the quadratic problem} \item{dual}{The dual solution of the problem} \item{how}{Character string describing the type of convergence} all slots can be accessed through accessor functions (see example) } \references{ R. J. Vanderbei\cr \emph{LOQO: An interior point code for quadratic programming}\cr Optimization Methods and Software 11, 451-484, 1999 \cr \url{http://www.princeton.edu/~rvdb/ps/loqo5.pdf} } \author{Alexandros Karatzoglou (based on Matlab code by Alex Smola) \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{solve.QP}, \code{\link{inchol}}, \code{\link{csi}}} \examples{ ## solve the Support Vector Machine optimization problem data(spam) ## sample a scaled part (500 points) of the spam data set m <- 500 set <- sample(1:dim(spam)[1],m) x <- scale(as.matrix(spam[,-58]))[set,] y <- as.integer(spam[set,58]) y[y==2] <- -1 ##set C parameter and kernel C <- 5 rbf <- rbfdot(sigma = 0.1) ## create H matrix etc. H <- kernelPol(rbf,x,,y) c <- matrix(rep(-1,m)) A <- t(y) b <- 0 l <- matrix(rep(0,m)) u <- matrix(rep(C,m)) r <- 0 sv <- ipop(c,H,A,b,l,u,r) sv dual(sv) } \keyword{optimize} kernlab/man/lssvm.Rd0000644000176000001440000002010212117365064014126 0ustar ripleyusers\name{lssvm} \docType{methods} \alias{lssvm} \alias{lssvm-methods} \alias{lssvm,formula-method} \alias{lssvm,vector-method} \alias{lssvm,matrix-method} \alias{lssvm,list-method} \alias{lssvm,kernelMatrix-method} \alias{show,lssvm-method} \alias{coef,lssvm-method} \alias{predict,lssvm-method} \title{Least Squares Support Vector Machine} \description{ The \code{lssvm} function is an implementation of the Least Squares SVM. \code{lssvm} includes a reduced version of Least Squares SVM using a decomposition of the kernel matrix which is calculated by the \code{csi} function. } \usage{ \S4method{lssvm}{formula}(x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE) \S4method{lssvm}{vector}(x, ...) \S4method{lssvm}{matrix}(x, y, scaled = TRUE, kernel = "rbfdot", kpar = "automatic", type = NULL, tau = 0.01, reduced = TRUE, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ..., subset, na.action = na.omit) \S4method{lssvm}{kernelMatrix}(x, y, type = NULL, tau = 0.01, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ...) \S4method{lssvm}{list}(x, y, scaled = TRUE, kernel = "stringdot", kpar = list(length=4, lambda = 0.5), type = NULL, tau = 0.01, reduced = TRUE, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ..., subset) } \arguments{ \item{x}{a symbolic description of the model to be fit, a matrix or vector containing the training data when a formula interface is not used or a \code{kernelMatrix} or a list of character vectors.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `lssvm' is called from.} \item{y}{a response vector with one label for each row/component of \code{x}. Can be either a factor (for classification tasks) or a numeric vector (for classification or regression - currently nor supported -).} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally to zero mean and unit variance. The center and scale values are returned and used for later predictions.} \item{type}{Type of problem. Either "classification" or "regression". Depending on whether \code{y} is a factor or not, the default setting for \code{type} is "classification" or "regression" respectively, but can be overwritten by setting an explicit value. (regression is currently not supported)\cr} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel "Gaussian" \item \code{polydot} Polynomial kernel \item \code{vanilladot} Linear kernel \item \code{tanhdot} Hyperbolic tangent kernel \item \code{laplacedot} Laplacian kernel \item \code{besseldot} Bessel kernel \item \code{anovadot} ANOVA RBF kernel \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } Setting the kernel parameter to "matrix" treats \code{x} as a kernel matrix calling the \code{kernelMatrix} interface.\cr The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{ the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.\cr \code{kpar} can also be set to the string "automatic" which uses the heuristics in \code{\link{sigest}} to calculate a good \code{sigma} value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic"). } \item{tau}{the regularization parameter (default 0.01) } \item{reduced}{if set to \code{FALSE} the full linear problem of the lssvm is solved, when \code{TRUE} a reduced method using \code{csi} is used.} \item{rank}{the maximal rank of the decomposed kernel matrix, see \code{csi}} \item{delta}{number of columns of cholesky performed in advance, see \code{csi} (default 40)} \item{tol}{tolerance of termination criterion for the \code{csi} function, lower tolerance leads to more precise approximation but may increase the training time and the decomposed matrix size (default: 0.0001)} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: 'TRUE')} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the Mean Squared Error for regression} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{Least Squares Support Vector Machines are reformulation to the standard SVMs that lead to solving linear KKT systems. The algorithm is based on the minimization of a classical penalized least-squares cost function. The current implementation approximates the kernel matrix by an incomplete Cholesky factorization obtained by the \code{\link{csi}} function, thus the solution is an approximation to the exact solution of the lssvm optimization problem. The quality of the solution depends on the approximation and can be influenced by the "rank" , "delta", and "tol" parameters. } \value{ An S4 object of class \code{"lssvm"} containing the fitted model, Accessor functions can be used to access the slots of the object (see examples) which include: \item{alpha}{the parameters of the \code{"lssvm"}} \item{coef}{the model coefficients (identical to alpha)} \item{b}{the model offset.} \item{xmatrix}{the training data used by the model} } \references{ J. A. K. Suykens and J. Vandewalle\cr \emph{Least Squares Support Vector Machine Classifiers}\cr Neural Processing Letters vol. 9, issue 3, June 1999\cr } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{ksvm}}, \code{\link{gausspr}}, \code{\link{csi}} } \examples{ ## simple example data(iris) lir <- lssvm(Species~.,data=iris) lir lirr <- lssvm(Species~.,data= iris, reduced = FALSE) lirr ## Using the kernelMatrix interface iris <- unique(iris) rbf <- rbfdot(0.5) k <- kernelMatrix(rbf, as.matrix(iris[,-5])) klir <- lssvm(k, iris[, 5]) klir pre <- predict(klir, k) } \keyword{classif} \keyword{nonlinear} \keyword{methods} kernlab/man/sigest.Rd0000644000176000001440000000631712117366220014267 0ustar ripleyusers\name{sigest} \alias{sigest} \alias{sigest,formula-method} \alias{sigest,matrix-method} \title{Hyperparameter estimation for the Gaussian Radial Basis kernel} \description{ Given a range of values for the "sigma" inverse width parameter in the Gaussian Radial Basis kernel for use with Support Vector Machines. The estimation is based on the data to be used. } \usage{ \S4method{sigest}{formula}(x, data=NULL, frac = 0.5, na.action = na.omit, scaled = TRUE) \S4method{sigest}{matrix}(x, frac = 0.5, scaled = TRUE, na.action = na.omit) } \arguments{ \item{x}{a symbolic description of the model upon the estimation is based. When not using a formula x is a matrix or vector containing the data} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `ksvm' is called from.} \item{frac}{Fraction of data to use for estimation. By default a quarter of the data is used to estimate the range of the sigma hyperparameter.} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally to zero mean and unit variance (since this the default action in \code{ksvm} as well). The center and scale values are returned and used for later predictions. } \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} } \details{ \code{sigest} estimates the range of values for the sigma parameter which would return good results when used with a Support Vector Machine (\code{ksvm}). The estimation is based upon the 0.1 and 0.9 quantile of \eqn{\|x -x'\|^2}. Basically any value in between those two bounds will produce good results. } \value{ Returns a vector of length 3 defining the range (0.1 quantile, median and 0.9 quantile) of the sigma hyperparameter. } \references{ B. Caputo, K. Sim, F. Furesjo, A. Smola, \cr \emph{Appearance-based object recognition using SVMs: which kernel should I use?}\cr Proc of NIPS workshop on Statitsical methods for computational experiments in visual processing and computer vision, Whistler, 2002. } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{ksvm}}} \examples{ ## estimate good sigma values for promotergene data(promotergene) srange <- sigest(Class~.,data = promotergene) srange s <- srange[2] s ## create test and training set ind <- sample(1:dim(promotergene)[1],20) genetrain <- promotergene[-ind, ] genetest <- promotergene[ind, ] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot", kpar=list(sigma = s),C=50,cross=3) gene ## predict gene type on the test set promoter <- predict(gene,genetest[,-1]) ## Check results table(promoter,genetest[,1]) } \keyword{classif} \keyword{regression} kernlab/man/plot.Rd0000644000176000001440000000216511304023134013733 0ustar ripleyusers\name{plot} \alias{plot.ksvm} \alias{plot,ksvm,missing-method} \alias{plot,ksvm-method} \title{plot method for support vector object} \description{Plot a binary classification support vector machine object. The \code{plot} function returns a contour plot of the decision values. } \usage{ \S4method{plot}{ksvm}(object, data=NULL, grid = 50, slice = list()) } \arguments{ \item{object}{a \code{ksvm} classification object created by the \code{ksvm} function} \item{data}{a data frame or matrix containing data to be plotted} \item{grid}{granularity for the contour plot.} \item{slice}{a list of named numeric values for the dimensions held constant (only needed if more than two variables are used). Dimensions not specified are fixed at 0. } } \seealso{\code{\link{ksvm}}} \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \keyword{methods} \keyword{regression} \keyword{classif} \examples{ ## Demo of the plot function x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) } kernlab/man/specc-class.Rd0000644000176000001440000000315311304023134015153 0ustar ripleyusers\name{specc-class} \docType{class} \alias{specc-class} \alias{centers} \alias{size} \alias{withinss} \alias{centers,specc-method} \alias{withinss,specc-method} \alias{size,specc-method} \alias{kernelf,specc-method} \title{Class "specc"} \description{ The Spectral Clustering Class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("specc", ...)}. or by calling the function \code{specc}. } \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"vector"} containing the cluster assignments} \item{\code{centers}:}{Object of class \code{"matrix"} containing the cluster centers} \item{\code{size}:}{Object of class \code{"vector"} containing the number of points in each cluster} \item{\code{withinss}:}{Object of class \code{"vector"} containing the within-cluster sum of squares for each cluster} \item{\code{kernelf}}{Object of class \code{kernel} containing the used kernel function.} } } \section{Methods}{ \describe{ \item{centers}{\code{signature(object = "specc")}: returns the cluster centers} \item{withinss}{\code{signature(object = "specc")}: returns the within-cluster sum of squares for each cluster} \item{size}{\code{signature(object = "specc")}: returns the number of points in each cluster } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{specc}}, \code{\link{kpca-class}} } \examples{ ## Cluster the spirals data set. data(spirals) sc <- specc(spirals, centers=2) centers(sc) size(sc) } \keyword{classes} kernlab/man/gausspr.Rd0000644000176000001440000001661412560371302014455 0ustar ripleyusers\name{gausspr} \alias{gausspr} \alias{gausspr,formula-method} \alias{gausspr,vector-method} \alias{gausspr,matrix-method} \alias{coef,gausspr-method} \alias{show,gausspr-method} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Gaussian processes for regression and classification} \description{ \code{gausspr} is an implementation of Gaussian processes for classification and regression. } \usage{ \S4method{gausspr}{formula}(x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE) \S4method{gausspr}{vector}(x,...) \S4method{gausspr}{matrix}(x, y, scaled = TRUE, type= NULL, kernel="rbfdot", kpar="automatic", var=1, variance.model = FALSE, tol=0.0005, cross=0, fit=TRUE, ... , subset, na.action = na.omit) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{a symbolic description of the model to be fit or a matrix or vector when a formula interface is not used. When not using a formula x is a matrix or vector containing the variables in the model} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `gausspr' is called from.} \item{y}{a response vector with one label for each row/component of \code{x}. Can be either a factor (for classification tasks) or a numeric vector (for regression).} \item{type}{Type of problem. Either "classification" or "regression". Depending on whether \code{y} is a factor or not, the default setting for \code{type} is \code{classification} or \code{regression}, respectively, but can be overwritten by setting an explicit value.\cr} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally (both \code{x} and \code{y} variables) to zero mean and unit variance. The center and scale values are returned and used for later predictions.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{var}{the initial noise variance, (only for regression) (default : 0.001)} \item{variance.model}{build model for variance or standard deviation estimation (only for regression) (default : FALSE)} \item{tol}{tolerance of termination criterion (default: 0.001)} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: 'TRUE')} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the Mean Squared Error for regression} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{ A Gaussian process is specified by a mean and a covariance function. The mean is a function of \eqn{x} (which is often the zero function), and the covariance is a function \eqn{C(x,x')} which expresses the expected covariance between the value of the function \eqn{y} at the points \eqn{x} and \eqn{x'}. The actual function \eqn{y(x)} in any data modeling problem is assumed to be a single sample from this Gaussian distribution. Laplace approximation is used for the parameter estimation in gaussian processes for classification.\cr The predict function can return class probabilities for classification problems by setting the \code{type} parameter to "probabilities". For the regression setting the \code{type} parameter to "variance" or "sdeviation" returns the estimated variance or standard deviation at each predicted point. } \value{ An S4 object of class "gausspr" containing the fitted model along with information. Accessor functions can be used to access the slots of the object which include : \item{alpha}{The resulting model parameters} \item{error}{Training error (if fit == TRUE)} } \references{ C. K. I. Williams and D. Barber \cr Bayesian classification with Gaussian processes. \cr IEEE Transactions on Pattern Analysis and Machine Intelligence, 20(12):1342-1351, 1998\cr \url{http://www.dai.ed.ac.uk/homes/ckiw/postscript/pami_final.ps.gz} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{predict.gausspr}}, \code{\link{rvm}}, \code{\link{ksvm}}, \code{\link{gausspr-class}}, \code{\link{lssvm}} } \examples{ # train model data(iris) test <- gausspr(Species~.,data=iris,var=2) test alpha(test) # predict on the training set predict(test,iris[,-5]) # class probabilities predict(test, iris[,-5], type="probabilities") # create regression data x <- seq(-20,20,0.1) y <- sin(x)/x + rnorm(401,sd=0.03) # regression with gaussian processes foo <- gausspr(x, y) foo # predict and plot ytest <- predict(foo, x) plot(x, y, type ="l") lines(x, ytest, col="red") #predict and variance x = c(-4, -3, -2, -1, 0, 0.5, 1, 2) y = c(-2, 0, -0.5,1, 2, 1, 0, -1) plot(x,y) foo2 <- gausspr(x, y, variance.model = TRUE) xtest <- seq(-4,2,0.2) lines(xtest, predict(foo2, xtest)) lines(xtest, predict(foo2, xtest)+2*predict(foo2,xtest, type="sdeviation"), col="red") lines(xtest, predict(foo2, xtest)-2*predict(foo2,xtest, type="sdeviation"), col="red") } \keyword{classif} \keyword{regression} \keyword{nonlinear} \keyword{methods} kernlab/man/income.Rd0000644000176000001440000000370611304023134014231 0ustar ripleyusers\name{income} \alias{income} \title{Income Data} \description{ Customer Income Data from a marketing survey. } \usage{data(income)} \format{ A data frame with 14 categorical variables (8993 observations). Explanation of the variable names: \tabular{rllll}{ \tab 1 \tab \code{INCOME} \tab annual income of household \tab \cr \tab \tab \tab (Personal income if single) \tab ordinal\cr \tab 2 \tab \code{SEX} \tab sex \tab nominal\cr \tab 3 \tab \code{MARITAL.STATUS} \tab marital status \tab nominal\cr \tab 4 \tab \code{AGE} \tab age \tab ordinal\cr \tab 5 \tab \code{EDUCATION} \tab educational grade \tab ordinal\cr \tab 6 \tab \code{OCCUPATION} \tab type of work \tab nominal \cr \tab 7 \tab \code{AREA} \tab how long the interviewed person has lived\tab \cr \tab \tab \tab in the San Francisco/Oakland/San Jose area \tab ordinal\cr \tab 8 \tab \code{DUAL.INCOMES} \tab dual incomes (if married) \tab nominal\cr \tab 9 \tab \code{HOUSEHOLD.SIZE} \tab persons living in the household \tab ordinal\cr \tab 10 \tab \code{UNDER18} \tab persons in household under 18 \tab ordinal\cr \tab 11 \tab \code{HOUSEHOLDER} \tab householder status \tab nominal\cr \tab 12 \tab \code{HOME.TYPE} \tab type of home \tab nominal\cr \tab 13 \tab \code{ETHNIC.CLASS} \tab ethnic classification \tab nominal\cr \tab 14 \tab \code{LANGUAGE} \tab language most often spoken at home \tab nominal\cr } } \details{ A total of N=9409 questionnaires containing 502 questions were filled out by shopping mall customers in the San Francisco Bay area. The dataset is an extract from this survey. It consists of 14 demographic attributes. The dataset is a mixture of nominal and ordinal variables with a lot of missing data. The goal is to predict the Anual Income of Household from the other 13 demographics attributes. } \source{ Impact Resources, Inc., Columbus, OH (1987). } \keyword{datasets} kernlab/man/specc.Rd0000644000176000001440000001420712560414652014070 0ustar ripleyusers\name{specc} \alias{specc} \alias{specc,matrix-method} \alias{specc,formula-method} \alias{specc,list-method} \alias{specc,kernelMatrix-method} \alias{show,specc-method} \title{Spectral Clustering} \description{ A spectral clustering algorithm. Clustering is performed by embedding the data into the subspace of the eigenvectors of an affinity matrix. } \usage{ \S4method{specc}{formula}(x, data = NULL, na.action = na.omit, ...) \S4method{specc}{matrix}(x, centers, kernel = "rbfdot", kpar = "automatic", nystrom.red = FALSE, nystrom.sample = dim(x)[1]/6, iterations = 200, mod.sample = 0.75, na.action = na.omit, ...) \S4method{specc}{kernelMatrix}(x, centers, nystrom.red = FALSE, iterations = 200, ...) \S4method{specc}{list}(x, centers, kernel = "stringdot", kpar = list(length=4, lambda=0.5), nystrom.red = FALSE, nystrom.sample = length(x)/6, iterations = 200, mod.sample = 0.75, na.action = na.omit, ...) } \arguments{ \item{x}{the matrix of data to be clustered, or a symbolic description of the model to be fit, or a kernel Matrix of class \code{kernelMatrix}, or a list of character vectors.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `specc' is called from.} \item{centers}{Either the number of clusters or a set of initial cluster centers. If the first, a random set of rows in the eigenvectors matrix are chosen as the initial centers.} \item{kernel}{the kernel function used in computing the affinity matrix. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{a character string or the list of hyper-parameters (kernel parameters). The default character string \code{"automatic"} uses a heuristic to determine a suitable value for the width parameter of the RBF kernel. The second option \code{"local"} (local scaling) uses a more advanced heuristic and sets a width parameter for every point in the data set. This is particularly useful when the data incorporates multiple scales. A list can also be used containing the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{nystrom.red}{use nystrom method to calculate eigenvectors. When \code{TRUE} a sample of the dataset is used to calculate the eigenvalues, thus only a \eqn{n x m} matrix where \eqn{n} the sample size is stored in memory (default: \code{FALSE}} \item{nystrom.sample}{number of data points to use for estimating the eigenvalues when using the nystrom method. (default : dim(x)[1]/6)} \item{mod.sample}{proportion of data to use when estimating sigma (default: 0.75)} \item{iterations}{the maximum number of iterations allowed. } \item{na.action}{the action to perform on NA} \item{\dots}{additional parameters} } \details{ Spectral clustering works by embedding the data points of the partitioning problem into the subspace of the \eqn{k} largest eigenvectors of a normalized affinity/kernel matrix. Using a simple clustering method like \code{kmeans} on the embedded points usually leads to good performance. It can be shown that spectral clustering methods boil down to graph partitioning.\cr The data can be passed to the \code{specc} function in a \code{matrix} or a \code{data.frame}, in addition \code{specc} also supports input in the form of a kernel matrix of class \code{kernelMatrix} or as a list of character vectors where a string kernel has to be used.} \value{ An S4 object of class \code{specc} which extends the class \code{vector} containing integers indicating the cluster to which each point is allocated. The following slots contain useful information \item{centers}{A matrix of cluster centers.} \item{size}{The number of point in each cluster} \item{withinss}{The within-cluster sum of squares for each cluster} \item{kernelf}{The kernel function used} } \references{ Andrew Y. Ng, Michael I. Jordan, Yair Weiss\cr \emph{On Spectral Clustering: Analysis and an Algorithm}\cr Neural Information Processing Symposium 2001\cr \url{http://papers.nips.cc/paper/2092-on-spectral-clustering-analysis-and-an-algorithm.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{\code{\link{kkmeans}}, \code{\link{kpca}}, \code{\link{kcca}} } \examples{ ## Cluster the spirals data set. data(spirals) sc <- specc(spirals, centers=2) sc centers(sc) size(sc) withinss(sc) plot(spirals, col=sc) } \keyword{cluster} kernlab/man/predict.ksvm.Rd0000644000176000001440000000511412560430652015377 0ustar ripleyusers\name{predict.ksvm} \alias{predict.ksvm} \alias{predict,ksvm-method} \title{predict method for support vector object} \description{Prediction of test data using support vector machines} \usage{ \S4method{predict}{ksvm}(object, newdata, type = "response", coupler = "minpair") } \arguments{ \item{object}{an S4 object of class \code{ksvm} created by the \code{ksvm} function} \item{newdata}{a data frame or matrix containing new data} \item{type}{one of \code{response}, \code{probabilities} ,\code{votes}, \code{decision} indicating the type of output: predicted values, matrix of class probabilities, matrix of vote counts, or matrix of decision values.} \item{coupler}{Coupling method used in the multiclass case, can be one of \code{minpair} or \code{pkpd} (see reference for more details).} } \value{ If \code{type(object)} is \code{C-svc}, \code{nu-svc}, \code{C-bsvm} or \code{spoc-svc} the vector returned depends on the argument \code{type}: \item{response}{predicted classes (the classes with majority vote).} \item{probabilities}{matrix of class probabilities (one column for each class and one row for each input).} \item{votes}{matrix of vote counts (one column for each class and one row for each new input)} If \code{type(object)} is \code{eps-svr}, \code{eps-bsvr} or \code{nu-svr} a vector of predicted values is returned. If \code{type(object)} is \code{one-classification} a vector of logical values is returned. } \references{ \itemize{ \item T.F. Wu, C.J. Lin, R.C. Weng. \cr \emph{Probability estimates for Multi-class Classification by Pairwise Coupling}\cr \url{http://www.csie.ntu.edu.tw/~cjlin/papers/svmprob/svmprob.pdf} \item H.T. Lin, C.J. Lin, R.C. Weng\cr \emph{A note on Platt's probabilistic outputs for support vector machines}\cr \url{http://www.csie.ntu.edu.tw/~cjlin/papers/plattprob.pdf} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \keyword{methods} \keyword{regression} \keyword{classif} \examples{ ## example using the promotergene data set data(promotergene) ## create test and training set ind <- sample(1:dim(promotergene)[1],20) genetrain <- promotergene[-ind, ] genetest <- promotergene[ind, ] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot", kpar=list(sigma=0.015),C=70,cross=4,prob.model=TRUE) gene ## predict gene type probabilities on the test set genetype <- predict(gene,genetest,type="probabilities") genetype } kernlab/man/predict.kqr.Rd0000644000176000001440000000214112117365174015215 0ustar ripleyusers\name{predict.kqr} \alias{predict.kqr} \alias{predict,kqr-method} \title{Predict method for kernel Quantile Regression object} \description{Prediction of test data for kernel quantile regression} \usage{ \S4method{predict}{kqr}(object, newdata) } \arguments{ \item{object}{an S4 object of class \code{kqr} created by the \code{kqr} function} \item{newdata}{a data frame, matrix, or kernelMatrix containing new data} } \value{The value of the quantile given by the computed \code{kqr} model in a vector of length equal to the the rows of \code{newdata}. } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \keyword{methods} \keyword{regression} \examples{ # create data x <- sort(runif(300)) y <- sin(pi*x) + rnorm(300,0,sd=exp(sin(2*pi*x))) # first calculate the median qrm <- kqr(x, y, tau = 0.5, C=0.15) # predict and plot plot(x, y) ytest <- predict(qrm, x) lines(x, ytest, col="blue") # calculate 0.9 quantile qrm <- kqr(x, y, tau = 0.9, kernel = "rbfdot", kpar= list(sigma=10), C=0.15) ytest <- predict(qrm, x) lines(x, ytest, col="red") }